diff --git a/CHANGELOG.md b/CHANGELOG.md index fa845266..3c8301cd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,36 @@ +**Changed in Veilid 0.3.5** _WIP_ +- Dialinfo detection issues: + - Add a publish() as well as a commit() for routing domain editor + - Should only publish our peer info after we're sure we done editing it (end of public address detection task) + - Publish should happen after relay selection as well + - Publish should happen if the relay's peerinfo has changed + - Publish should not do anything if the peerinfo hasn't changed + - PeerInfo -> Arc everywhere to minimize deep clones and ensure read-only PeerInfo + - Routing domain editing is now more atomic + - When a node selects a relay it now immediately protects its connections. + - Made dial info port (for port restricted nat) more resilient to changes, in the case there are multiple mappings + - Relays that drop protected connections should be deprioritized for relay selection (table saturation detection) + - clear_network_callback in do_public_dial_info_check is a kludge, removed + - Raised the bar for dialinfo changes when its just the port + - Pinging node on the same network works again + - resolve_node() never returns a dead node even when we want to try to communicate with it again + - Removed 'bad public address' detection as it wasn't working anyway + - Added separate parallelism lanes for relay keepalive pings from peer liveness check pings, as they are higher priority + - Change send_data to always check cache for contact method first instead of going with filtered active flows first, avoids choosing UDP when a preferable TCP connection could be made + - Nodes that are not relay capable should drop relayed packets + +- DHT issues: + - Make setvalue more likely to succeed by accepting a getvalue consensus if a full setvalue consensus is not reached. + - Offline subkey writes are cleared too fast and should be thought as 'subkeys not yet synchronized' + - If set_value is partial / in-flight, it should still be in offline_subkey_writes + - Make inflight_subkey_writes list and probably some bit for 'written_while_inflight' so we dont clear the offline_subkey_writes until they're really written + + +- API Additions: + - VeilidConfigInner::new parameteriztion for easier config from rust apps + - Remove veilid-server specific paths from veilid-core defaults + - Lots more stats about node performance in PeerStats + **Changed in Veilid 0.3.4** - Crates updates - Update crates to newer versions diff --git a/Cargo.lock b/Cargo.lock index 9c3e4261..482e7b32 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5955,9 +5955,9 @@ dependencies = [ [[package]] name = "veilid-hashlink" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a3dabbda02cfe176635dcaa18a021416ff2eb4d0b47a913e3fdc7f62049d7b1" +checksum = "2070d1d09dad90091d23e49743408f82f8874994dec5ae0a8d3689b061bba426" dependencies = [ "hashbrown 0.14.5", "serde", diff --git a/veilid-cli/src/command_processor.rs b/veilid-cli/src/command_processor.rs index f648eff4..98a6bf4b 100644 --- a/veilid-cli/src/command_processor.rs +++ b/veilid-cli/src/command_processor.rs @@ -143,7 +143,7 @@ impl CommandProcessor { disable [flag] unset a flag valid flags in include: app_messages -Server Debug Commands: +Core Debug Commands: {} "#, indent_all_by(4, out) diff --git a/veilid-core/Cargo.toml b/veilid-core/Cargo.toml index 68a61227..66ec953a 100644 --- a/veilid-core/Cargo.toml +++ b/veilid-core/Cargo.toml @@ -88,7 +88,7 @@ enumset = { version = "1.1.3", features = ["serde"] } keyvaluedb = "0.1.2" range-set-blaze = "0.1.16" weak-table = "0.3.2" -hashlink = { package = "veilid-hashlink", version = "0.1.0", features = [ +hashlink = { package = "veilid-hashlink", version = "0.1.1", features = [ "serde_impl", ] } diff --git a/veilid-core/src/lib.rs b/veilid-core/src/lib.rs index 0a019576..ed2211e4 100644 --- a/veilid-core/src/lib.rs +++ b/veilid-core/src/lib.rs @@ -97,6 +97,7 @@ use enumset::*; use eyre::{bail, eyre, Report as EyreReport, Result as EyreResult, WrapErr}; #[allow(unused_imports)] use futures_util::stream::{FuturesOrdered, FuturesUnordered}; +use indent::*; use parking_lot::*; use schemars::{schema_for, JsonSchema}; use serde::*; diff --git a/veilid-core/src/network_manager/address_check.rs b/veilid-core/src/network_manager/address_check.rs new file mode 100644 index 00000000..1053f8c2 --- /dev/null +++ b/veilid-core/src/network_manager/address_check.rs @@ -0,0 +1,313 @@ +/// Address checker - keep track of how other nodes are seeing our node's address on a per-protocol basis +/// Used to determine if our address has changed and if we should re-publish new PeerInfo +use super::*; + +/// Number of 'existing dialinfo inconsistent' results in the cache during inbound-capable to trigger detection +pub const ADDRESS_INCONSISTENCY_DETECTION_COUNT: usize = 5; + +/// Number of consistent results in the cache during outbound-only to trigger detection +pub const ADDRESS_CONSISTENCY_DETECTION_COUNT: usize = 5; + +/// Length of consistent/inconsistent result cache for detection +pub const ADDRESS_CHECK_CACHE_SIZE: usize = 10; + +/// Length of consistent/inconsistent result cache for detection +// pub const ADDRESS_CHECK_PEER_COUNT: usize = 256; +// /// Frequency of address checks +// pub const PUBLIC_ADDRESS_CHECK_TASK_INTERVAL_SECS: u32 = 60; +// /// Duration we leave nodes in the inconsistencies table +// pub const PUBLIC_ADDRESS_INCONSISTENCY_TIMEOUT_US: TimestampDuration = +// TimestampDuration::new(300_000_000u64); // 5 minutes +// /// How long we punish nodes for lying about our address +// pub const PUBLIC_ADDRESS_INCONSISTENCY_PUNISHMENT_TIMEOUT_US: TimestampDuration = +// TimestampDuration::new(3_600_000_000_u64); // 60 minutes + +/// Address checker config +pub(crate) struct AddressCheckConfig { + pub(crate) detect_address_changes: bool, + pub(crate) ip6_prefix_size: usize, +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq, Ord, PartialOrd, Hash)] +struct AddressCheckCacheKey(RoutingDomain, ProtocolType, AddressType); + +/// Address checker - keep track of how other nodes are seeing our node's address on a per-protocol basis +/// Used to determine if our address has changed and if we should re-publish new PeerInfo +pub(crate) struct AddressCheck { + config: AddressCheckConfig, + net: Network, + current_network_class: BTreeMap, + current_addresses: BTreeMap>, + // Used by InboundCapable to determine if we have changed our address or re-do our network class + address_inconsistency_table: BTreeMap, + // Used by OutboundOnly to determine if we should re-do our network class + address_consistency_table: BTreeMap>, +} + +impl AddressCheck { + pub fn new(config: AddressCheckConfig, net: Network) -> Self { + Self { + config, + net, + current_network_class: BTreeMap::new(), + current_addresses: BTreeMap::new(), + address_inconsistency_table: BTreeMap::new(), + address_consistency_table: BTreeMap::new(), + } + } + + /// Accept a report of any peerinfo that has changed + pub fn report_peer_info_change(&mut self, peer_info: Arc) { + let routing_domain = peer_info.routing_domain(); + let network_class = peer_info.signed_node_info().node_info().network_class(); + + self.current_network_class + .insert(routing_domain, network_class); + for protocol_type in ProtocolTypeSet::all() { + for address_type in AddressTypeSet::all() { + let acck = AddressCheckCacheKey(routing_domain, protocol_type, address_type); + + // Clear our current addresses so we can rebuild them for this routing domain + self.current_addresses.remove(&acck); + + // Clear our history as well now so we start fresh when we get a new peer info + self.address_inconsistency_table.remove(&acck); + self.address_consistency_table.remove(&acck); + } + } + + for did in peer_info + .signed_node_info() + .node_info() + .dial_info_detail_list() + { + // Strip port from direct and mapped addresses + // as the incoming dialinfo may not match the outbound + // connections' NAT mapping. In this case we only check for IP address changes. + let socket_address = + if did.class == DialInfoClass::Direct || did.class == DialInfoClass::Mapped { + did.dial_info.socket_address().with_port(0) + } else { + did.dial_info.socket_address() + }; + + let address_type = did.dial_info.address_type(); + let protocol_type = did.dial_info.protocol_type(); + let acck = AddressCheckCacheKey(routing_domain, protocol_type, address_type); + + self.current_addresses + .entry(acck) + .or_default() + .insert(socket_address); + } + } + + /// Accept a report of our address as seen by the other end of a flow, such + /// as the StatusA response from a StatusQ + pub fn report_socket_address_change( + &mut self, + routing_domain: RoutingDomain, // the routing domain used by this flow + socket_address: SocketAddress, // the socket address as seen by the remote peer + old_socket_address: Option, // the socket address previously for this peer + flow: Flow, // the flow used + reporting_peer: NodeRef, // the peer's noderef reporting the socket address + ) { + // Don't accept any reports if we're already in the middle of a public dial info check + if self.net.needs_public_dial_info_check() { + return; + } + + // Ignore the LocalNetwork routing domain because we know if our local addresses change + // from our interfaces + if matches!(routing_domain, RoutingDomain::LocalNetwork) { + return; + } + + // Ignore flows that do not start from our listening port (unbound connections etc), + // because a router is going to map these differently + let Some(pla) = self + .net + .get_preferred_local_address_by_key(flow.protocol_type(), flow.address_type()) + else { + return; + }; + let Some(local) = flow.local() else { + return; + }; + if local.port() != pla.port() { + log_network_result!(debug "ignoring address report because local port did not match listener: {} != {}", local.port(), pla.port()); + return; + } + + // Get the ip(block) this report is coming from + let reporting_ipblock = + ip_to_ipblock(self.config.ip6_prefix_size, flow.remote_address().ip_addr()); + + // Reject public address reports from nodes that we know are behind symmetric nat or + // nodes that must be using a relay for everything + let Some(reporting_node_info) = reporting_peer.node_info(routing_domain) else { + return; + }; + if reporting_node_info.network_class() != NetworkClass::InboundCapable { + return; + } + + // If the socket address reported is the same as the reporter, then this is coming through a relay + // or it should be ignored due to local proximity (nodes on the same network block should not be trusted as + // public ip address reporters, only disinterested parties) + if reporting_ipblock == ip_to_ipblock(self.config.ip6_prefix_size, socket_address.ip_addr()) + { + return; + } + + // Get current network class / dial info + // If we haven't gotten our own network class yet we're done for now + let Some(network_class) = self.current_network_class.get(&routing_domain) else { + return; + }; + + // Process the state of the address checker and see if we need to + // perform a full address check for this routing domain + let needs_address_detection = match network_class { + NetworkClass::InboundCapable => self.detect_for_inbound_capable( + routing_domain, + socket_address, + old_socket_address, + flow, + reporting_peer, + ), + NetworkClass::OutboundOnly => self.detect_for_outbound_only( + routing_domain, + socket_address, + flow, + reporting_ipblock, + ), + NetworkClass::WebApp | NetworkClass::Invalid => { + return; + } + }; + + if needs_address_detection { + if self.config.detect_address_changes { + // Reset the address check cache now so we can start detecting fresh + info!( + "{:?} address has changed, detecting dial info", + routing_domain + ); + + // Re-detect the public dialinfo + self.net.set_needs_public_dial_info_check(None); + } else { + warn!( + "{:?} address may have changed. Restarting the server may be required.", + routing_domain + ); + } + } + } + + fn matches_current_address( + &self, + acckey: AddressCheckCacheKey, + socket_address: SocketAddress, + ) -> bool { + self.current_addresses + .get(&acckey) + .map(|current_addresses| { + current_addresses.contains(&socket_address) + || current_addresses.contains(&socket_address.with_port(0)) + }) + .unwrap_or(false) + } + + // If we are inbound capable, but start to see places where our sender info used to match our dial info + // but no longer matches our dial info (count up the number of changes -away- from our dial info) + // then trigger a detection of dial info and network class + fn detect_for_inbound_capable( + &mut self, + routing_domain: RoutingDomain, // the routing domain used by this flow + socket_address: SocketAddress, // the socket address as seen by the remote peer + old_socket_address: Option, // the socket address previously for this peer + flow: Flow, // the flow used + reporting_peer: NodeRef, // the peer's noderef reporting the socket address + ) -> bool { + let acckey = + AddressCheckCacheKey(routing_domain, flow.protocol_type(), flow.address_type()); + + // Check the current socket address and see if it matches our current dial info + let new_matches_current = self.matches_current_address(acckey, socket_address); + + // If we have something that matches our current dial info at all, consider it a validation + if new_matches_current { + self.address_inconsistency_table + .entry(acckey) + .and_modify(|ait| { + if *ait != 0 { + log_net!(debug "Resetting address inconsistency for {:?} due to match on flow {:?} from {}", acckey, flow, reporting_peer); + } + *ait = 0; + }) + .or_insert(0); + return false; + } + + // See if we have a case of switching away from our dial info + let old_matches_current = old_socket_address + .map(|osa| self.matches_current_address(acckey, osa)) + .unwrap_or(false); + + if old_matches_current { + let val = *self + .address_inconsistency_table + .entry(acckey) + .and_modify(|ait| { + *ait += 1; + }) + .or_insert(1); + log_net!(debug "Adding address inconsistency ({}) for {:?} due to address {} on flow {:?} from {}", val, acckey, socket_address, flow, reporting_peer); + return val >= ADDRESS_INCONSISTENCY_DETECTION_COUNT; + } + + false + } + + // If we are currently outbound only, we don't have any public dial info + // but if we are starting to see consistent socket address from multiple reporting peers + // then we may be become inbound capable, so zap the network class so we can re-detect it and any public dial info + // lru the addresses we're seeing and if they all match (same ip only?) then trigger + fn detect_for_outbound_only( + &mut self, + routing_domain: RoutingDomain, // the routing domain used by this flow + socket_address: SocketAddress, // the socket address as seen by the remote peer + flow: Flow, // the flow used + reporting_ipblock: IpAddr, // the IP block this report came from + ) -> bool { + let acckey = + AddressCheckCacheKey(routing_domain, flow.protocol_type(), flow.address_type()); + + // Add the currently seen socket address into the consistency table + let cache = self + .address_consistency_table + .entry(acckey) + .and_modify(|act| { + act.insert(reporting_ipblock, socket_address); + }) + .or_insert_with(|| { + let mut lruc = LruCache::new(ADDRESS_CHECK_CACHE_SIZE); + lruc.insert(reporting_ipblock, socket_address); + lruc + }); + + // If we have at least N consistencies then trigger a detect + let mut consistencies = HashMap::::new(); + for (_k, v) in cache.iter() { + let count = *consistencies.entry(*v).and_modify(|e| *e += 1).or_insert(1); + if count >= ADDRESS_CONSISTENCY_DETECTION_COUNT { + log_net!(debug "Address consistency detected for {:?}: {}", acckey, v); + return true; + } + } + + false + } +} diff --git a/veilid-core/src/network_manager/address_filter.rs b/veilid-core/src/network_manager/address_filter.rs index 76772bad..fd3597ad 100644 --- a/veilid-core/src/network_manager/address_filter.rs +++ b/veilid-core/src/network_manager/address_filter.rs @@ -1,7 +1,6 @@ use super::*; use alloc::collections::btree_map::Entry; -// XXX: Move to config eventually? const PUNISHMENT_DURATION_MIN: usize = 60; const MAX_PUNISHMENTS_BY_NODE_ID: usize = 65536; const DIAL_INFO_FAILURE_DURATION_MIN: usize = 10; diff --git a/veilid-core/src/network_manager/connection_manager.rs b/veilid-core/src/network_manager/connection_manager.rs index 682dd898..cd8cc6c3 100644 --- a/veilid-core/src/network_manager/connection_manager.rs +++ b/veilid-core/src/network_manager/connection_manager.rs @@ -4,6 +4,9 @@ use connection_table::*; use network_connection::*; use stop_token::future::FutureExt; +const PROTECTED_CONNECTION_DROP_SPAN: TimestampDuration = TimestampDuration::new_secs(10); +const PROTECTED_CONNECTION_DROP_COUNT: usize = 3; + /////////////////////////////////////////////////////////// // Connection manager @@ -38,13 +41,21 @@ impl Drop for ConnectionRefScope { } } +#[derive(Debug)] +struct ProtectedAddress { + node_ref: NodeRef, + span_start_ts: Timestamp, + drops_in_span: usize, +} + #[derive(Debug)] struct ConnectionManagerInner { next_id: NetworkConnectionId, sender: flume::Sender, async_processor_jh: Option>, stop_source: Option, - protected_addresses: HashMap, + protected_addresses: HashMap, + reconnection_processor: DeferredStreamProcessor, } struct ConnectionManagerArc { @@ -74,6 +85,7 @@ impl ConnectionManager { stop_source: StopSource, sender: flume::Sender, async_processor_jh: MustJoinHandle<()>, + reconnection_processor: DeferredStreamProcessor, ) -> ConnectionManagerInner { ConnectionManagerInner { next_id: 0.into(), @@ -81,6 +93,7 @@ impl ConnectionManager { sender, async_processor_jh: Some(async_processor_jh), protected_addresses: HashMap::new(), + reconnection_processor, } } fn new_arc(network_manager: NetworkManager) -> ConnectionManagerArc { @@ -123,11 +136,6 @@ impl ConnectionManager { log_net!(debug "startup connection manager"); - let mut inner = self.arc.inner.lock(); - if inner.is_some() { - panic!("shouldn't start connection manager twice without shutting it down first"); - } - // Create channel for async_processor to receive notifications of networking events let (sender, receiver) = flume::unbounded(); @@ -140,8 +148,21 @@ impl ConnectionManager { self.clone().async_processor(stop_source.token(), receiver), ); + // Spawn the reconnection processor + let mut reconnection_processor = DeferredStreamProcessor::new(); + reconnection_processor.init().await; + // Store in the inner object - *inner = Some(Self::new_inner(stop_source, sender, async_processor)); + let mut inner = self.arc.inner.lock(); + if inner.is_some() { + panic!("shouldn't start connection manager twice without shutting it down first"); + } + *inner = Some(Self::new_inner( + stop_source, + sender, + async_processor, + reconnection_processor, + )); guard.success(); @@ -165,7 +186,9 @@ impl ConnectionManager { } } }; - + // Stop the reconnection processor + log_net!(debug "stopping reconnection processor task"); + inner.reconnection_processor.terminate().await; // Stop all the connections and the async processor log_net!(debug "stopping async processor task"); drop(inner.stop_source.take()); @@ -191,7 +214,7 @@ impl ConnectionManager { inner .protected_addresses .get(conn.flow().remote_address()) - .cloned() + .map(|x| x.node_ref.clone()) } // Update connection protections if things change, like a node becomes a relay @@ -205,8 +228,12 @@ impl ConnectionManager { return; }; - // Get addresses for relays in all routing domains - inner.protected_addresses.clear(); + // Protect addresses for relays in all routing domains + let mut dead_addresses = inner + .protected_addresses + .keys() + .cloned() + .collect::>(); for routing_domain in RoutingDomainSet::all() { let Some(relay_node) = self .network_manager() @@ -218,12 +245,28 @@ impl ConnectionManager { for did in relay_node.dial_info_details() { // SocketAddress are distinct per routing domain, so they should not collide // and two nodes should never have the same SocketAddress + let protected_address = did.dial_info.socket_address(); + + // Update the protection, note the protected address is not dead + dead_addresses.remove(&protected_address); inner .protected_addresses - .insert(did.dial_info.socket_address(), relay_node.unfiltered()); + .entry(protected_address) + .and_modify(|pa| pa.node_ref = relay_node.unfiltered()) + .or_insert_with(|| ProtectedAddress { + node_ref: relay_node.unfiltered(), + span_start_ts: Timestamp::now(), + drops_in_span: 0usize, + }); } } + // Remove protected addresses that were not still associated with a protected noderef + for dead_address in dead_addresses { + inner.protected_addresses.remove(&dead_address); + } + + // For all connections, register the protection self.arc .connection_table .with_all_connections_mut(|conn| { @@ -248,6 +291,7 @@ impl ConnectionManager { &self, inner: &mut ConnectionManagerInner, prot_conn: ProtocolNetworkConnection, + opt_dial_info: Option, ) -> EyreResult> { // Get next connection id to use let id = inner.next_id; @@ -264,7 +308,13 @@ impl ConnectionManager { None => bail!("not creating connection because we are stopping"), }; - let mut conn = NetworkConnection::from_protocol(self.clone(), stop_token, prot_conn, id); + let mut conn = NetworkConnection::from_protocol( + self.clone(), + stop_token, + prot_conn, + id, + opt_dial_info, + ); let handle = conn.get_handle(); // See if this should be a protected connection @@ -281,6 +331,7 @@ impl ConnectionManager { Ok(Some(conn)) => { // Connection added and a different one LRU'd out // Send it to be terminated + #[cfg(feature = "verbose-tracing")] log_net!(debug "== LRU kill connection due to limit: {:?}", conn.debug_print(Timestamp::now())); let _ = inner.sender.send(ConnectionManagerEvent::Dead(conn)); } @@ -438,7 +489,7 @@ impl ConnectionManager { } }; - self.on_new_protocol_network_connection(inner, prot_conn) + self.on_new_protocol_network_connection(inner, prot_conn, Some(dial_info)) } /////////////////////////////////////////////////////////////////////////////////////////////////////// @@ -472,7 +523,7 @@ impl ConnectionManager { // We don't care if this fails, since nobody here asked for the inbound connection. // If it does, we just drop the connection - let _ = self.on_new_protocol_network_connection(inner, prot_conn); + let _ = self.on_new_protocol_network_connection(inner, prot_conn, None); } None => { // If this somehow happens, we're shutting down @@ -559,14 +610,81 @@ impl ConnectionManager { // Inform the processor of the event if let Some(conn) = conn { // If the connection closed while it was protected, report it on the node the connection was established on - // In-use connections will already get reported because they will cause a 'question_lost' stat on the remote node + // In-use connections will already get reported because they will cause a 'lost_answer' stat on the remote node if let Some(protect_nr) = conn.protected_node_ref() { - protect_nr.report_protected_connection_dropped(); + // Find the protected address and increase our drop count + if let Some(inner) = self.arc.inner.lock().as_mut() { + for pa in inner.protected_addresses.values_mut() { + if pa.node_ref.same_entry(&protect_nr) { + // See if we've had more than the threshold number of drops in the last span + let cur_ts = Timestamp::now(); + let duration = cur_ts.saturating_sub(pa.span_start_ts); + + let mut reconnect = true; + + if duration < PROTECTED_CONNECTION_DROP_SPAN { + pa.drops_in_span += 1; + log_net!(debug "== Protected connection dropped (count={}): {} -> {} for node {}", pa.drops_in_span, conn.connection_id(), conn.debug_print(Timestamp::now()), protect_nr); + + if pa.drops_in_span >= PROTECTED_CONNECTION_DROP_COUNT { + // Consider this as a failure to send if we've dropped the connection too many times in a single timespan + protect_nr.report_protected_connection_dropped(); + reconnect = false; + + // Reset the drop counter + pa.drops_in_span = 0; + pa.span_start_ts = cur_ts; + } + } else { + // Otherwise, just reset the drop detection span + pa.drops_in_span = 1; + pa.span_start_ts = cur_ts; + + log_net!(debug "== Protected connection dropped (count={}): {} -> {} for node {}", pa.drops_in_span, conn.connection_id(), conn.debug_print(Timestamp::now()), protect_nr); + } + + // Reconnect the protected connection immediately + if reconnect { + if let Some(dial_info) = conn.dial_info() { + self.spawn_reconnector_inner(inner, dial_info); + } else { + log_net!(debug "Can't reconnect to accepted protected connection: {} -> {} for node {}", conn.connection_id(), conn.debug_print(Timestamp::now()), protect_nr); + } + } + + break; + } + } + } } let _ = sender.send_async(ConnectionManagerEvent::Dead(conn)).await; } } + fn spawn_reconnector_inner(&self, inner: &mut ConnectionManagerInner, dial_info: DialInfo) { + let this = self.clone(); + inner.reconnection_processor.add( + Box::pin(futures_util::stream::once(async { dial_info })), + move |dial_info| { + let this = this.clone(); + Box::pin(async move { + match this.get_or_create_connection(dial_info.clone()).await { + Ok(NetworkResult::Value(conn)) => { + log_net!(debug "Reconnection successful to {}: {:?}", dial_info,conn); + } + Ok(res) => { + log_net!(debug "Reconnection unsuccessful to {}: {:?}", dial_info, res); + } + Err(e) => { + log_net!(debug "Reconnection error to {}: {}", dial_info, e); + } + } + false + }) + }, + ); + } + pub async fn debug_print(&self) -> String { //let inner = self.arc.inner.lock(); format!( diff --git a/veilid-core/src/network_manager/direct_boot.rs b/veilid-core/src/network_manager/direct_boot.rs index 554a28f7..97ebfbda 100644 --- a/veilid-core/src/network_manager/direct_boot.rs +++ b/veilid-core/src/network_manager/direct_boot.rs @@ -47,9 +47,16 @@ impl NetworkManager { return Ok(Vec::new()); }); - let bootstrap_peerinfo: Vec = - deserialize_json(std::str::from_utf8(&out_data).wrap_err("bad utf8 in boot peerinfo")?) - .wrap_err("failed to deserialize boot peerinfo")?; + let bootstrap_peerinfo_str = + std::str::from_utf8(&out_data).wrap_err("bad utf8 in boot peerinfo")?; + + let bootstrap_peerinfo: Vec = match deserialize_json(bootstrap_peerinfo_str) { + Ok(v) => v, + Err(e) => { + error!("{}", e); + return Err(e).wrap_err("failed to deserialize peerinfo"); + } + }; Ok(bootstrap_peerinfo.into_iter().map(Arc::new).collect()) } diff --git a/veilid-core/src/network_manager/mod.rs b/veilid-core/src/network_manager/mod.rs index 4b1ffc11..44e8bb26 100644 --- a/veilid-core/src/network_manager/mod.rs +++ b/veilid-core/src/network_manager/mod.rs @@ -5,6 +5,7 @@ mod native; #[cfg(target_arch = "wasm32")] mod wasm; +mod address_check; mod address_filter; mod connection_handle; mod connection_manager; @@ -30,6 +31,7 @@ pub(crate) use stats::*; pub use types::*; //////////////////////////////////////////////////////////////////////////////////////// +use address_check::*; use address_filter::*; use connection_handle::*; use crypto::*; @@ -54,16 +56,9 @@ pub const IPADDR_TABLE_SIZE: usize = 1024; pub const IPADDR_MAX_INACTIVE_DURATION_US: TimestampDuration = TimestampDuration::new(300_000_000u64); // 5 minutes pub const NODE_CONTACT_METHOD_CACHE_SIZE: usize = 1024; -pub const PUBLIC_ADDRESS_CHANGE_CONSISTENCY_DETECTION_COUNT: usize = 3; // Number of consistent results in the cache during outbound-only to trigger detection -pub const PUBLIC_ADDRESS_CHANGE_INCONSISTENCY_DETECTION_COUNT: usize = 7; // Number of inconsistent results in the cache during inbound-capable to trigger detection -pub const PUBLIC_ADDRESS_CHECK_CACHE_SIZE: usize = 10; // Length of consistent/inconsistent result cache for detection -pub const PUBLIC_ADDRESS_CHECK_TASK_INTERVAL_SECS: u32 = 60; -pub const PUBLIC_ADDRESS_INCONSISTENCY_TIMEOUT_US: TimestampDuration = - TimestampDuration::new(300_000_000u64); // 5 minutes -pub const PUBLIC_ADDRESS_INCONSISTENCY_PUNISHMENT_TIMEOUT_US: TimestampDuration = - TimestampDuration::new(3_600_000_000_u64); // 60 minutes pub const ADDRESS_FILTER_TASK_INTERVAL_SECS: u32 = 60; pub const BOOT_MAGIC: &[u8; 4] = b"BOOT"; +pub const HOLE_PUNCH_DELAY_MS: u32 = 100; // Things we get when we start up and go away when we shut down // Routing table is not in here because we want it to survive a network shutdown/startup restart @@ -117,9 +112,6 @@ struct NodeContactMethodCacheKey { target_node_ref_sequencing: Sequencing, } -#[derive(Copy, Clone, Debug, PartialEq, Eq, Ord, PartialOrd, Hash)] -struct PublicAddressCheckCacheKey(ProtocolType, AddressType); - enum SendDataToExistingFlowResult { Sent(UniqueFlow), NotSent(Vec), @@ -137,10 +129,7 @@ struct NetworkManagerInner { stats: NetworkManagerStats, client_allowlist: LruCache, node_contact_method_cache: LruCache, - public_internet_address_check_cache: - BTreeMap>, - public_internet_address_inconsistencies_table: - BTreeMap>, + address_check: Option, } struct NetworkManagerUnlockedInner { @@ -158,7 +147,6 @@ struct NetworkManagerUnlockedInner { update_callback: RwLock>, // Background processes rolling_transfers_task: TickTask, - public_internet_address_check_task: TickTask, address_filter_task: TickTask, // Network Key network_key: Option, @@ -178,8 +166,7 @@ impl NetworkManager { stats: NetworkManagerStats::default(), client_allowlist: LruCache::new_unbounded(), node_contact_method_cache: LruCache::new(NODE_CONTACT_METHOD_CACHE_SIZE), - public_internet_address_check_cache: BTreeMap::new(), - public_internet_address_inconsistencies_table: BTreeMap::new(), + address_check: None, } } fn new_unlocked_inner( @@ -205,10 +192,6 @@ impl NetworkManager { "rolling_transfers_task", ROLLING_TRANSFERS_INTERVAL_SECS, ), - public_internet_address_check_task: TickTask::new( - "public_address_check_task", - PUBLIC_ADDRESS_CHECK_TASK_INTERVAL_SECS, - ), address_filter_task: TickTask::new( "address_filter_task", ADDRESS_FILTER_TASK_INTERVAL_SECS, @@ -437,6 +420,20 @@ impl NetworkManager { return Ok(StartupDisposition::BindRetry); } } + + let (detect_address_changes, ip6_prefix_size) = self.with_config(|c| { + ( + c.network.detect_address_changes, + c.network.max_connections_per_ip6_prefix_size as usize, + ) + }); + let address_check_config = AddressCheckConfig { + detect_address_changes, + ip6_prefix_size, + }; + let address_check = AddressCheck::new(address_check_config, net.clone()); + self.inner.lock().address_check = Some(address_check); + rpc_processor.startup().await?; receipt_manager.startup().await?; @@ -474,18 +471,22 @@ impl NetworkManager { // Cancel all tasks self.cancel_tasks().await; + // Shutdown address check + self.inner.lock().address_check = Option::::None; + // Shutdown network components if they started up log_net!(debug "shutting down network components"); - let components = self.unlocked_inner.components.read().clone(); - if let Some(components) = components { - components.net.shutdown().await; - components.rpc_processor.shutdown().await; - components.receipt_manager.shutdown().await; - components.connection_manager.shutdown().await; - - *self.unlocked_inner.components.write() = None; + { + let components = self.unlocked_inner.components.read().clone(); + if let Some(components) = components { + components.net.shutdown().await; + components.rpc_processor.shutdown().await; + components.receipt_manager.shutdown().await; + components.connection_manager.shutdown().await; + } } + *self.unlocked_inner.components.write() = None; // reset the state log_net!(debug "resetting network manager state"); @@ -835,7 +836,8 @@ impl NetworkManager { .await? ); - // XXX: do we need a delay here? or another hole punch packet? + // Add small delay to encourage packets to be delivered in order + sleep(HOLE_PUNCH_DELAY_MS).await; // Set the hole punch as our 'last connection' to ensure we return the receipt over the direct hole punch peer_nr.set_last_flow(unique_flow.flow, Timestamp::now()); @@ -1074,19 +1076,25 @@ impl NetworkManager { let routing_table = self.routing_table(); let rpc = self.rpc_processor(); - // Peek at header and see if we need to relay this - // If the recipient id is not our node id, then it needs relaying + // See if this sender is punished, if so, ignore the packet let sender_id = envelope.get_sender_typed_id(); if self.address_filter().is_node_id_punished(sender_id) { return Ok(false); } + // Peek at header and see if we need to relay this + // If the recipient id is not our node id, then it needs relaying let recipient_id = envelope.get_recipient_typed_id(); if !routing_table.matches_own_node_id(&[recipient_id]) { // See if the source node is allowed to resolve nodes // This is a costly operation, so only outbound-relay permitted // nodes are allowed to do this, for example PWA users + // xxx: eventually allow recipient_id to be in allowlist? + // xxx: to enable cross-routing domain relaying? or rather + // xxx: that 'localnetwork' routing domain nodes could be allowed to + // xxx: full relay as well as client_allowlist nodes... + let some_relay_nr = if self.check_client_allowlist(sender_id) { // Full relay allowed, do a full resolve_node match rpc @@ -1095,7 +1103,7 @@ impl NetworkManager { { Ok(v) => v.map(|nr| nr.default_filtered()), Err(e) => { - log_net!(debug "failed to resolve recipient node for relay, dropping outbound relayed packet: {}" ,e); + log_net!(debug "failed to resolve recipient node for relay, dropping relayed envelope: {}" ,e); return Ok(false); } } @@ -1103,6 +1111,19 @@ impl NetworkManager { // If this is not a node in the client allowlist, only allow inbound relay // which only performs a lightweight lookup before passing the packet back out + // If our node has the relay capability disabled, we should not be asked to relay + if self.with_config(|c| c.capabilities.disable.contains(&CAP_RELAY)) { + log_net!(debug "node has relay capability disabled, dropping relayed envelope from {} to {}", sender_id, recipient_id); + return Ok(false); + } + + // If our own node requires a relay, we should not be asked to relay + // on behalf of other nodes, just drop relayed packets if we can't relay + if routing_table.relay_node(routing_domain).is_some() { + log_net!(debug "node requires a relay itself, dropping relayed envelope from {} to {}", sender_id, recipient_id); + return Ok(false); + } + // See if we have the node in our routing table // We should, because relays are chosen by nodes that have established connectivity and // should be mutually in each others routing tables. The node needing the relay will be @@ -1110,7 +1131,7 @@ impl NetworkManager { match routing_table.lookup_node_ref(recipient_id) { Ok(v) => v.map(|nr| nr.default_filtered()), Err(e) => { - log_net!(debug "failed to look up recipient node for relay, dropping outbound relayed packet: {}" ,e); + log_net!(debug "failed to look up recipient node for relay, dropping relayed envelope: {}" ,e); return Ok(false); } } @@ -1196,4 +1217,48 @@ impl NetworkManager { pub fn restart_network(&self) { self.net().restart_network(); } + + // If some other subsystem believes our dial info is no longer valid, this will trigger + // a re-check of the dial info and network class + pub fn set_needs_dial_info_check(&self, routing_domain: RoutingDomain) { + match routing_domain { + RoutingDomain::LocalNetwork => { + // nothing here yet + } + RoutingDomain::PublicInternet => { + self.net().set_needs_public_dial_info_check(None); + } + } + } + + // Report peer info changes + pub fn report_peer_info_change(&mut self, peer_info: Arc) { + let mut inner = self.inner.lock(); + if let Some(address_check) = inner.address_check.as_mut() { + address_check.report_peer_info_change(peer_info); + } + } + + // Determine if our IP address has changed + // this means we should recreate our public dial info if it is not static and rediscover it + // Wait until we have received confirmation from N different peers + pub fn report_socket_address_change( + &self, + routing_domain: RoutingDomain, // the routing domain this flow is over + socket_address: SocketAddress, // the socket address as seen by the remote peer + old_socket_address: Option, // the socket address previously for this peer + flow: Flow, // the flow used + reporting_peer: NodeRef, // the peer's noderef reporting the socket address + ) { + let mut inner = self.inner.lock(); + if let Some(address_check) = inner.address_check.as_mut() { + address_check.report_socket_address_change( + routing_domain, + socket_address, + old_socket_address, + flow, + reporting_peer, + ); + } + } } diff --git a/veilid-core/src/network_manager/native/discovery_context.rs b/veilid-core/src/network_manager/native/discovery_context.rs index de8bd572..33de850c 100644 --- a/veilid-core/src/network_manager/native/discovery_context.rs +++ b/veilid-core/src/network_manager/native/discovery_context.rs @@ -6,6 +6,7 @@ use futures_util::stream::FuturesUnordered; const PORT_MAP_VALIDATE_TRY_COUNT: usize = 3; const PORT_MAP_VALIDATE_DELAY_MS: u32 = 500; const PORT_MAP_TRY_COUNT: usize = 3; +const EXTERNAL_INFO_VALIDATIONS: usize = 5; // Detection result of dial info detection futures #[derive(Clone, Debug)] @@ -17,9 +18,16 @@ pub enum DetectedDialInfo { // Detection result of external address #[derive(Clone, Debug)] pub struct DetectionResult { + pub config: DiscoveryContextConfig, pub ddi: DetectedDialInfo, pub external_address_types: AddressTypeSet, - pub local_port: u16, +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct DiscoveryContextConfig { + pub protocol_type: ProtocolType, + pub address_type: AddressType, + pub port: u16, } // Result of checking external address @@ -31,23 +39,16 @@ struct ExternalInfo { } struct DiscoveryContextInner { - // first node contacted - external_1: Option, - // second node contacted - external_2: Option, + external_info: Vec, } struct DiscoveryContextUnlockedInner { routing_table: RoutingTable, net: Network, - clear_network_callback: ClearNetworkCallback, + config: DiscoveryContextConfig, // per-protocol intf_addrs: Vec, - existing_external_address: Option, - protocol_type: ProtocolType, - address_type: AddressType, - port: u16, } #[derive(Clone)] @@ -56,53 +57,23 @@ pub(super) struct DiscoveryContext { inner: Arc>, } -pub(super) type ClearNetworkCallback = Arc SendPinBoxFuture<()> + Send + Sync>; - impl DiscoveryContext { - pub fn new( - routing_table: RoutingTable, - net: Network, - protocol_type: ProtocolType, - address_type: AddressType, - port: u16, - clear_network_callback: ClearNetworkCallback, - ) -> Self { - let intf_addrs = - Self::get_local_addresses(routing_table.clone(), protocol_type, address_type); - - // Get the existing external address to check to see if it has changed - let existing_dial_info = routing_table.all_filtered_dial_info_details( - RoutingDomain::PublicInternet.into(), - &DialInfoFilter::default() - .with_address_type(address_type) - .with_protocol_type(protocol_type), + pub fn new(routing_table: RoutingTable, net: Network, config: DiscoveryContextConfig) -> Self { + let intf_addrs = Self::get_local_addresses( + routing_table.clone(), + config.protocol_type, + config.address_type, ); - let existing_external_address = if existing_dial_info.len() == 1 { - Some( - existing_dial_info - .first() - .unwrap() - .dial_info - .socket_address(), - ) - } else { - None - }; Self { unlocked_inner: Arc::new(DiscoveryContextUnlockedInner { routing_table, net, - clear_network_callback, + config, intf_addrs, - existing_external_address, - protocol_type, - address_type, - port, }), inner: Arc::new(Mutex::new(DiscoveryContextInner { - external_1: None, - external_2: None, + external_info: Vec::new(), })), } } @@ -153,12 +124,12 @@ impl DiscoveryContext { } ); - log_net!( + log_network_result!( debug "request_public_address {:?}: Value({:?})", node_ref, res.answer ); - res.answer.map(|si| si.socket_address) + res.answer.opt_sender_info.map(|si| si.socket_address) } // find fast peers with a particular address type, and ask them to tell us what our external address is @@ -171,8 +142,10 @@ impl DiscoveryContext { c.network.dht.max_find_node_count as usize }; let routing_domain = RoutingDomain::PublicInternet; - let protocol_type = self.unlocked_inner.protocol_type; - let address_type = self.unlocked_inner.address_type; + + let protocol_type = self.unlocked_inner.config.protocol_type; + let address_type = self.unlocked_inner.config.address_type; + let port = self.unlocked_inner.config.port; // Build an filter that matches our protocol and address type // and excludes relayed nodes so we can get an accurate external address @@ -228,7 +201,6 @@ impl DiscoveryContext { } // For each peer, ask them for our public address, filtering on desired dial info - let mut unord = FuturesUnordered::new(); let get_public_address_func = |node: NodeRef| { let this = self.clone(); @@ -242,7 +214,7 @@ impl DiscoveryContext { let dial_info = this .unlocked_inner .net - .make_dial_info(address, this.unlocked_inner.protocol_type); + .make_dial_info(address, protocol_type); return Some(ExternalInfo { dial_info, address, @@ -254,46 +226,65 @@ impl DiscoveryContext { }; let mut external_address_infos = Vec::new(); - - for node in nodes.iter().take(nodes.len() - 1).cloned() { + let mut unord = FuturesUnordered::new(); + for node in nodes.iter().cloned() { let gpa_future = get_public_address_func(node); unord.push(gpa_future); - // Always process two at a time so we get both addresses in parallel if possible - if unord.len() == 2 { + // Always process N at a time so we get all addresses in parallel if possible + if unord.len() == EXTERNAL_INFO_VALIDATIONS { // Process one if let Some(Some(ei)) = unord.next().in_current_span().await { external_address_infos.push(ei); - if external_address_infos.len() == 2 { + if external_address_infos.len() == EXTERNAL_INFO_VALIDATIONS { break; } } } } // Finish whatever is left if we need to - if external_address_infos.len() < 2 { + if external_address_infos.len() < EXTERNAL_INFO_VALIDATIONS { while let Some(res) = unord.next().in_current_span().await { if let Some(ei) = res { external_address_infos.push(ei); - if external_address_infos.len() == 2 { + if external_address_infos.len() == EXTERNAL_INFO_VALIDATIONS { break; } } } } - if external_address_infos.len() < 2 { + if external_address_infos.len() < EXTERNAL_INFO_VALIDATIONS { log_net!(debug "not enough peers responded with an external address for type {:?}:{:?}", protocol_type, address_type); return false; } + // Try to make preferential port come first + external_address_infos.sort_by(|a, b| { + let acmp = a.address.ip_addr().cmp(&b.address.ip_addr()); + if acmp != cmp::Ordering::Equal { + return acmp; + } + if a.address.port() == b.address.port() { + return cmp::Ordering::Equal; + } + if a.address.port() == port { + return cmp::Ordering::Less; + } + if b.address.port() == port { + return cmp::Ordering::Greater; + } + a.address.port().cmp(&b.address.port()) + }); + { let mut inner = self.inner.lock(); - inner.external_1 = Some(external_address_infos[0].clone()); - log_net!(debug "external_1: {:?}", inner.external_1); - inner.external_2 = Some(external_address_infos[1].clone()); - log_net!(debug "external_2: {:?}", inner.external_2); + inner.external_info = external_address_infos; + log_net!(debug "External Addresses: ({:?}:{:?})[{}]", + protocol_type, + address_type, + inner.external_info.iter().map(|x| format!("{} <- {}",x.address, x.node)).collect::>().join(", ")); } true @@ -323,11 +314,12 @@ impl DiscoveryContext { #[instrument(level = "trace", skip(self), ret)] async fn try_upnp_port_mapping(&self) -> Option { - let protocol_type = self.unlocked_inner.protocol_type; + let protocol_type = self.unlocked_inner.config.protocol_type; + let address_type = self.unlocked_inner.config.address_type; + let local_port = self.unlocked_inner.config.port; + let low_level_protocol_type = protocol_type.low_level_protocol_type(); - let address_type = self.unlocked_inner.address_type; - let local_port = self.unlocked_inner.port; - let external_1 = self.inner.lock().external_1.as_ref().unwrap().clone(); + let external_1 = self.inner.lock().external_info.first().unwrap().clone(); let igd_manager = self.unlocked_inner.net.unlocked_inner.igd_manager.clone(); let mut tries = 0; @@ -410,7 +402,7 @@ impl DiscoveryContext { &self, unord: &mut FuturesUnordered>>, ) { - let external_1 = self.inner.lock().external_1.as_ref().unwrap().clone(); + let external_1 = self.inner.lock().external_info.first().cloned().unwrap(); let this = self.clone(); let do_no_nat_fut: SendPinBoxFuture> = Box::pin(async move { @@ -421,22 +413,22 @@ impl DiscoveryContext { { // Add public dial info with Direct dialinfo class Some(DetectionResult { + config: this.unlocked_inner.config, ddi: DetectedDialInfo::Detected(DialInfoDetail { dial_info: external_1.dial_info.clone(), class: DialInfoClass::Direct, }), external_address_types: AddressTypeSet::only(external_1.address.address_type()), - local_port: this.unlocked_inner.port, }) } else { // Add public dial info with Blocked dialinfo class Some(DetectionResult { + config: this.unlocked_inner.config, ddi: DetectedDialInfo::Detected(DialInfoDetail { dial_info: external_1.dial_info.clone(), class: DialInfoClass::Blocked, }), external_address_types: AddressTypeSet::only(external_1.address.address_type()), - local_port: this.unlocked_inner.port, }) } }); @@ -449,28 +441,69 @@ impl DiscoveryContext { &self, unord: &mut FuturesUnordered>>, ) { - // Get the external dial info for our use here - let (external_1, external_2) = { + let external_info = { let inner = self.inner.lock(); - ( - inner.external_1.as_ref().unwrap().clone(), - inner.external_2.as_ref().unwrap().clone(), - ) + inner.external_info.clone() }; + let local_port = self.unlocked_inner.config.port; - // If we have two different external address/port combinations, then this is a symmetric NAT - if external_2.address != external_1.address { + // Get the external dial info histogram for our use here + let mut external_info_addr_port_hist = HashMap::::new(); + let mut external_info_addr_hist = HashMap::::new(); + for ei in &external_info { + external_info_addr_port_hist + .entry(ei.address) + .and_modify(|n| *n += 1) + .or_insert(1); + external_info_addr_hist + .entry(ei.address.address()) + .and_modify(|n| *n += 1) + .or_insert(1); + } + + // If we have two different external addresses, then this is a symmetric NAT + // If just the port differs, and one is the preferential port we still accept + // this as an inbound capable dialinfo for holepunch + let different_addresses = external_info_addr_hist.len() > 1; + let mut best_external_info = None; + let mut local_port_matching_external_info = None; + let mut external_address_types = AddressTypeSet::new(); + + // Get the most popular external port from our sampling + // There will always be a best external info + let mut best_ei_address = None; + let mut best_ei_cnt = 0; + for eiph in &external_info_addr_port_hist { + if *eiph.1 > best_ei_cnt { + best_ei_address = Some(*eiph.0); + best_ei_cnt = *eiph.1; + } + } + // In preference order, pick out the best external address and if we have one the one that + // matches our local port number (may be the same) + for ei in &external_info { + if ei.address.port() == local_port && local_port_matching_external_info.is_none() { + local_port_matching_external_info = Some(ei.clone()); + } + if best_ei_address.unwrap() == ei.address && best_external_info.is_none() { + best_external_info = Some(ei.clone()); + } + external_address_types |= ei.address.address_type(); + } + + // There is a popular port on the best external info (more than one external address sample with same port) + let same_address_has_popular_port = !different_addresses && best_ei_cnt > 1; + + // If we have different addresses in our samples, or no single address has a popular port + // then we consider this a symmetric NAT + if different_addresses || !same_address_has_popular_port { let this = self.clone(); let do_symmetric_nat_fut: SendPinBoxFuture> = Box::pin(async move { Some(DetectionResult { + config: this.unlocked_inner.config, ddi: DetectedDialInfo::SymmetricNAT, - external_address_types: AddressTypeSet::only( - external_1.address.address_type(), - ) | AddressTypeSet::only( - external_2.address.address_type(), - ), - local_port: this.unlocked_inner.port, + external_address_types, }) }); unord.push(do_symmetric_nat_fut); @@ -478,11 +511,12 @@ impl DiscoveryContext { } // Manual Mapping Detection + // If we have no external address that matches our local port, then lets try that port + // on our best external address and see if there's a port forward someone added manually /////////// let this = self.clone(); - let local_port = self.unlocked_inner.port; - if external_1.dial_info.port() != local_port { - let c_external_1 = external_1.clone(); + if local_port_matching_external_info.is_none() && best_external_info.is_some() { + let c_external_1 = best_external_info.as_ref().unwrap().clone(); let c_this = this.clone(); let do_manual_map_fut: SendPinBoxFuture> = Box::pin(async move { @@ -501,6 +535,7 @@ impl DiscoveryContext { { // Add public dial info with Direct dialinfo class return Some(DetectionResult { + config: c_this.unlocked_inner.config, ddi: DetectedDialInfo::Detected(DialInfoDetail { dial_info: external_1_dial_info_with_local_port, class: DialInfoClass::Direct, @@ -508,7 +543,6 @@ impl DiscoveryContext { external_address_types: AddressTypeSet::only( c_external_1.address.address_type(), ), - local_port: c_this.unlocked_inner.port, }); } @@ -534,7 +568,7 @@ impl DiscoveryContext { let mut ord = FuturesOrdered::new(); let c_this = this.clone(); - let c_external_1 = external_1.clone(); + let c_external_1 = external_info.first().cloned().unwrap(); let do_full_cone_fut: SendPinBoxFuture> = Box::pin(async move { // Let's see what kind of NAT we have @@ -551,6 +585,7 @@ impl DiscoveryContext { // Add public dial info with full cone NAT network class return Some(DetectionResult { + config: c_this.unlocked_inner.config, ddi: DetectedDialInfo::Detected(DialInfoDetail { dial_info: c_external_1.dial_info, class: DialInfoClass::FullConeNAT, @@ -558,7 +593,6 @@ impl DiscoveryContext { external_address_types: AddressTypeSet::only( c_external_1.address.address_type(), ), - local_port: c_this.unlocked_inner.port, }); } None @@ -566,8 +600,8 @@ impl DiscoveryContext { ord.push_back(do_full_cone_fut); let c_this = this.clone(); - let c_external_1 = external_1.clone(); - let c_external_2 = external_2.clone(); + let c_external_1 = external_info.first().cloned().unwrap(); + let c_external_2 = external_info.get(1).cloned().unwrap(); let do_restricted_cone_fut: SendPinBoxFuture> = Box::pin(async move { // We are restricted, determine what kind of restriction @@ -586,6 +620,7 @@ impl DiscoveryContext { { // Got a reply from a non-default port, which means we're only address restricted return Some(DetectionResult { + config: c_this.unlocked_inner.config, ddi: DetectedDialInfo::Detected(DialInfoDetail { dial_info: c_external_1.dial_info.clone(), class: DialInfoClass::AddressRestrictedNAT, @@ -593,11 +628,11 @@ impl DiscoveryContext { external_address_types: AddressTypeSet::only( c_external_1.address.address_type(), ), - local_port: c_this.unlocked_inner.port, }); } // Didn't get a reply from a non-default port, which means we are also port restricted Some(DetectionResult { + config: c_this.unlocked_inner.config, ddi: DetectedDialInfo::Detected(DialInfoDetail { dial_info: c_external_1.dial_info.clone(), class: DialInfoClass::PortRestrictedNAT, @@ -605,7 +640,6 @@ impl DiscoveryContext { external_address_types: AddressTypeSet::only( c_external_1.address.address_type(), ), - local_port: c_this.unlocked_inner.port, }) }); ord.push_back(do_restricted_cone_fut); @@ -656,30 +690,6 @@ impl DiscoveryContext { return; } - // Did external address change from the last time we made dialinfo? - // Disregard port for this because we only need to know if the ip address has changed - // If the port has changed it will change only for this protocol and will be overwritten individually by each protocol discover() - let some_clear_network_callback = { - let inner = self.inner.lock(); - let ext_1 = inner.external_1.as_ref().unwrap().address.address(); - let ext_2 = inner.external_2.as_ref().unwrap().address.address(); - if (ext_1 != ext_2) - || Some(ext_1) - != self - .unlocked_inner - .existing_external_address - .map(|ea| ea.address()) - { - // External address was not found, or has changed, go ahead and clear the network so we can do better - Some(self.unlocked_inner.clear_network_callback.clone()) - } else { - None - } - }; - if let Some(clear_network_callback) = some_clear_network_callback { - clear_network_callback().in_current_span().await; - } - // UPNP Automatic Mapping /////////// if enable_upnp { @@ -691,6 +701,7 @@ impl DiscoveryContext { if let Some(external_mapped_dial_info) = this.try_upnp_port_mapping().await { // Got a port mapping, let's use it return Some(DetectionResult { + config: this.unlocked_inner.config, ddi: DetectedDialInfo::Detected(DialInfoDetail { dial_info: external_mapped_dial_info.clone(), class: DialInfoClass::Mapped, @@ -698,7 +709,6 @@ impl DiscoveryContext { external_address_types: AddressTypeSet::only( external_mapped_dial_info.address_type(), ), - local_port: this.unlocked_inner.port, }); } None @@ -710,9 +720,20 @@ impl DiscoveryContext { /////////// // If our local interface list contains external_1 then there is no NAT in place - let external_1 = self.inner.lock().external_1.as_ref().unwrap().clone(); + let local_address_in_external_info = self + .inner + .lock() + .external_info + .iter() + .find_map(|ei| { + self.unlocked_inner + .intf_addrs + .contains(&ei.address) + .then_some(true) + }) + .unwrap_or_default(); - if self.unlocked_inner.intf_addrs.contains(&external_1.address) { + if local_address_in_external_info { self.protocol_process_no_nat(unord).await; } else { self.protocol_process_nat(unord).await; diff --git a/veilid-core/src/network_manager/native/mod.rs b/veilid-core/src/network_manager/native/mod.rs index c71a4c88..a2b726d4 100644 --- a/veilid-core/src/network_manager/native/mod.rs +++ b/veilid-core/src/network_manager/native/mod.rs @@ -30,6 +30,10 @@ use std::path::{Path, PathBuf}; ///////////////////////////////////////////////////////////////// +pub const UPDATE_NETWORK_CLASS_TASK_TICK_PERIOD_SECS: u32 = 1; +pub const NETWORK_INTERFACES_TASK_TICK_PERIOD_SECS: u32 = 1; +pub const UPNP_TASK_TICK_PERIOD_SECS: u32 = 1; + pub const PEEK_DETECT_LEN: usize = 64; cfg_if! { @@ -168,9 +172,15 @@ impl Network { routing_table, connection_manager, interfaces: NetworkInterfaces::new(), - update_network_class_task: TickTask::new("update_network_class_task", 1), - network_interfaces_task: TickTask::new("network_interfaces_task", 1), - upnp_task: TickTask::new("upnp_task", 1), + update_network_class_task: TickTask::new( + "update_network_class_task", + UPDATE_NETWORK_CLASS_TASK_TICK_PERIOD_SECS, + ), + network_interfaces_task: TickTask::new( + "network_interfaces_task", + NETWORK_INTERFACES_TASK_TICK_PERIOD_SECS, + ), + upnp_task: TickTask::new("upnp_task", UPNP_TASK_TICK_PERIOD_SECS), network_task_lock: AsyncMutex::new(()), igd_manager: igd_manager::IGDManager::new(config.clone()), } @@ -543,7 +553,7 @@ impl Network { network_result_value_or_log!(ph.clone() .send_message(data.clone(), peer_socket_addr) .await - .wrap_err("sending data to existing connection")? => [ format!(": data.len={}, flow={:?}", data.len(), flow) ] + .wrap_err("sending data to existing connection")? => [ format!(": data.len={}, flow={:?}", data.len(), flow) ] { return Ok(SendDataToExistingFlowResult::NotSent(data)); } ); // Network accounting @@ -738,22 +748,6 @@ impl Network { self.register_all_dial_info(&mut editor_public_internet, &mut editor_local_network) .await?; - // Set network class statically if we have static public dialinfo - let detect_address_changes = { - let c = self.config.get(); - c.network.detect_address_changes - }; - if !detect_address_changes { - let inner = self.inner.lock(); - if !inner.static_public_dial_info.is_empty() { - editor_public_internet.set_network_class(Some(NetworkClass::InboundCapable)); - } - } - - // Set network class statically for local network routing domain until - // we can do some reachability analysis eventually - editor_local_network.set_network_class(Some(NetworkClass::InboundCapable)); - // Commit routing domain edits if editor_public_internet.commit(true).await { editor_public_internet.publish(); diff --git a/veilid-core/src/network_manager/native/start_protocols.rs b/veilid-core/src/network_manager/native/start_protocols.rs index c330cd56..140d5b15 100644 --- a/veilid-core/src/network_manager/native/start_protocols.rs +++ b/veilid-core/src/network_manager/native/start_protocols.rs @@ -234,7 +234,6 @@ impl Network { // Register the public address editor_public_internet.add_dial_info(pdi.clone(), DialInfoClass::Direct); - editor_public_internet.set_network_class(Some(NetworkClass::InboundCapable)); // See if this public address is also a local interface address we haven't registered yet if self.is_stable_interface_address(pdi_addr.ip()) { @@ -242,7 +241,6 @@ impl Network { DialInfo::udp_from_socketaddr(pdi_addr), DialInfoClass::Direct, ); - editor_local_network.set_network_class(Some(NetworkClass::InboundCapable)); } } } @@ -253,7 +251,6 @@ impl Network { // if no other public address is specified if !detect_address_changes && public_address.is_none() && di.address().is_global() { editor_public_internet.add_dial_info(di.clone(), DialInfoClass::Direct); - editor_public_internet.set_network_class(Some(NetworkClass::InboundCapable)); } // Register interface dial info as well since the address is on the local interface diff --git a/veilid-core/src/network_manager/native/tasks/update_network_class_task.rs b/veilid-core/src/network_manager/native/tasks/update_network_class_task.rs index d1b6dc6f..a4c56e3a 100644 --- a/veilid-core/src/network_manager/native/tasks/update_network_class_task.rs +++ b/veilid-core/src/network_manager/native/tasks/update_network_class_task.rs @@ -3,6 +3,8 @@ use super::*; use futures_util::stream::FuturesUnordered; use stop_token::future::FutureExt as StopTokenFutureExt; +type InboundProtocolMap = HashMap<(AddressType, LowLevelProtocolType, u16), Vec>; + impl Network { #[instrument(parent = None, level = "trace", skip(self), err)] pub async fn update_network_class_task_routine( @@ -14,99 +16,65 @@ impl Network { let _guard = self.unlocked_inner.network_task_lock.lock().await; // Do the public dial info check - let out = self.do_public_dial_info_check(stop_token, l, t).await; + let finished = self.do_public_dial_info_check(stop_token, l, t).await?; // Done with public dial info check - { + if finished { let mut inner = self.inner.lock(); inner.needs_public_dial_info_check = false; - inner.public_dial_info_check_punishment = None; } - out + Ok(()) } - #[instrument(level = "trace", skip(self), err)] - pub async fn update_with_detected_dial_info(&self, ddi: DetectedDialInfo) -> EyreResult<()> { - let existing_network_class = self - .routing_table() - .get_network_class(RoutingDomain::PublicInternet) - .unwrap_or_default(); - + #[instrument(level = "trace", skip(self, editor))] + fn process_detected_dial_info( + &self, + editor: &mut RoutingDomainEditorPublicInternet, + ddi: DetectedDialInfo, + ) { match ddi { - DetectedDialInfo::SymmetricNAT => { - // If we get any symmetric nat dialinfo, this whole network class is outbound only, - // and all dial info should be treated as invalid - if !matches!(existing_network_class, NetworkClass::OutboundOnly) { - let mut editor = self.routing_table().edit_public_internet_routing_domain(); - - editor.clear_dial_info_details(None, None); - editor.set_network_class(Some(NetworkClass::OutboundOnly)); - editor.commit(true).await; - } - } + DetectedDialInfo::SymmetricNAT => {} DetectedDialInfo::Detected(did) => { - // get existing dial info into table by protocol/address type - let mut existing_dial_info = - BTreeMap::<(ProtocolType, AddressType), DialInfoDetail>::new(); - for did in self.routing_table().all_filtered_dial_info_details( - RoutingDomain::PublicInternet.into(), - &DialInfoFilter::all(), - ) { - // Only need to keep one per pt/at pair, since they will all have the same dialinfoclass - existing_dial_info.insert( - (did.dial_info.protocol_type(), did.dial_info.address_type()), - did, - ); - } - // We got a dial info, upgrade everything unless we are fixed to outbound only due to a symmetric nat - if !matches!(existing_network_class, NetworkClass::OutboundOnly) { - // Get existing dial info for protocol/address type combination - let pt = did.dial_info.protocol_type(); - let at = did.dial_info.address_type(); + // We got a dialinfo, add it and tag us as inbound capable + editor.add_dial_info(did.dial_info.clone(), did.class); + } + } + } - // See what operations to perform with this dialinfo - let mut clear = false; - let mut add = false; + #[instrument(level = "trace", skip(self, editor))] + fn update_with_detection_result( + &self, + editor: &mut RoutingDomainEditorPublicInternet, + inbound_protocol_map: &InboundProtocolMap, + dr: DetectionResult, + ) { + // Found some new dial info for this protocol/address combination + self.process_detected_dial_info(editor, dr.ddi.clone()); - if let Some(edi) = existing_dial_info.get(&(pt, at)) { - // Is the dial info class better than our existing dial info? - // Or is the new dial info the same class, but different? Only change if things are different. - if did.class < edi.class - || (did.class == edi.class && did.dial_info != edi.dial_info) - { - // Better or same dial info class was found, clear existing dialinfo for this pt/at pair - // Only keep one dial info per protocol/address type combination - clear = true; - add = true; - } - // Otherwise, don't upgrade, don't add, this is worse than what we have already - } else { - // No existing dial info of this type accept it, no need to upgrade, but add it - add = true; - } - - if clear || add { - let mut editor = self.routing_table().edit_public_internet_routing_domain(); - - if clear { - editor.clear_dial_info_details( - Some(did.dial_info.address_type()), - Some(did.dial_info.protocol_type()), - ); - } - - if add { - editor.add_dial_info(did.dial_info.clone(), did.class); - } - - editor.set_network_class(Some(NetworkClass::InboundCapable)); - editor.commit(true).await; + // Add additional dialinfo for protocols on the same port + match &dr.ddi { + DetectedDialInfo::SymmetricNAT => {} + DetectedDialInfo::Detected(did) => { + let ipmkey = ( + did.dial_info.address_type(), + did.dial_info.protocol_type().low_level_protocol_type(), + dr.config.port, + ); + if let Some(ipm) = inbound_protocol_map.get(&ipmkey) { + for additional_pt in ipm.iter().skip(1) { + // Make dialinfo for additional protocol type + let additional_ddi = DetectedDialInfo::Detected(DialInfoDetail { + dial_info: self + .make_dial_info(did.dial_info.socket_address(), *additional_pt), + class: did.class, + }); + // Add additional dialinfo + self.process_detected_dial_info(editor, additional_ddi); } } } } - Ok(()) } #[instrument(level = "trace", skip(self), err)] @@ -115,7 +83,7 @@ impl Network { stop_token: StopToken, _l: Timestamp, _t: Timestamp, - ) -> EyreResult<()> { + ) -> EyreResult { // Figure out if we can optimize TCP/WS checking since they are often on the same port let (protocol_config, inbound_protocol_map) = { let mut inner = self.inner.lock(); @@ -166,7 +134,7 @@ impl Network { .into_iter() .collect(); - // Set most permissive network config + // Set most permissive network config and start from scratch let mut editor = self.routing_table().edit_public_internet_routing_domain(); editor.setup_network( protocol_config.outbound, @@ -174,47 +142,26 @@ impl Network { protocol_config.family_global, protocol_config.public_internet_capabilities.clone(), ); + editor.clear_dial_info_details(None, None); editor.commit(true).await; - // Create a callback to clear the network if we need to 'start over' - let this = self.clone(); - let clear_network_callback: ClearNetworkCallback = Arc::new(move || { - let this = this.clone(); - Box::pin(async move { - // Ensure we only do this once per network class discovery - { - let mut inner = this.inner.lock(); - if inner.network_already_cleared { - return; - } - inner.network_already_cleared = true; - } - let mut editor = this.routing_table().edit_public_internet_routing_domain(); - editor.clear_dial_info_details(None, None); - editor.set_network_class(None); - editor.commit(true).await; - }) - }); - // Process all protocol and address combinations let mut unord = FuturesUnordered::new(); - - for ((at, _llpt, port), protocols) in &inbound_protocol_map { - let first_pt = protocols.first().unwrap(); - - let discovery_context = DiscoveryContext::new( - self.routing_table(), - self.clone(), - *first_pt, - *at, - *port, - clear_network_callback.clone(), - ); + let mut context_configs = HashSet::new(); + for ((address_type, _llpt, port), protocols) in inbound_protocol_map.clone() { + let protocol_type = *protocols.first().unwrap(); + let dcc = DiscoveryContextConfig { + protocol_type, + address_type, + port, + }; + context_configs.insert(dcc); + let discovery_context = DiscoveryContext::new(self.routing_table(), self.clone(), dcc); discovery_context.discover(&mut unord).await; } // Wait for all discovery futures to complete and apply discoverycontexts - let mut all_address_types = AddressTypeSet::new(); + let mut external_address_types = AddressTypeSet::new(); loop { match unord .next() @@ -223,37 +170,17 @@ impl Network { .await { Ok(Some(Some(dr))) => { - // Found some new dial info for this protocol/address combination - self.update_with_detected_dial_info(dr.ddi.clone()).await?; + // Got something for this config + context_configs.remove(&dr.config); // Add the external address kinds to the set we've seen - all_address_types |= dr.external_address_types; + external_address_types |= dr.external_address_types; - // Add additional dialinfo for protocols on the same port - if let DetectedDialInfo::Detected(did) = &dr.ddi { - let ipmkey = ( - did.dial_info.address_type(), - did.dial_info.protocol_type().low_level_protocol_type(), - dr.local_port, - ); - if let Some(ipm) = inbound_protocol_map.get(&ipmkey) { - for additional_pt in ipm.iter().skip(1) { - // Make dialinfo for additional protocol type - let additional_ddi = DetectedDialInfo::Detected(DialInfoDetail { - dial_info: self.make_dial_info( - did.dial_info.socket_address(), - *additional_pt, - ), - class: did.class, - }); - // Add additional dialinfo - self.update_with_detected_dial_info(additional_ddi).await?; - } - } - } + // Import the dialinfo + self.update_with_detection_result(&mut editor, &inbound_protocol_map, dr); } Ok(Some(None)) => { - // Found no new dial info for this protocol/address combination + // Found no dial info for this protocol/address combination } Ok(None) => { // All done, normally @@ -261,20 +188,34 @@ impl Network { } Err(_) => { // Stop token, exit early without error propagation - return Ok(()); + return Ok(true); } } } - // All done + // See if we have any discovery contexts that did not complete for a + // particular protocol type if its external address type was supported. + let mut success = true; + for cc in &context_configs { + if external_address_types.contains(cc.address_type) { + success = false; + break; + } + } - log_net!(debug "Network class discovery finished with address_types {:?}", all_address_types); + if !success { + log_net!(debug "Network class discovery failed, trying again, needed {:?}", context_configs); + return Ok(false); + } + + // All done + log_net!(debug "Network class discovery finished with address_types {:?}", external_address_types); // Set the address types we've seen editor.setup_network( protocol_config.outbound, protocol_config.inbound, - all_address_types, + external_address_types, protocol_config.public_internet_capabilities, ); if editor.commit(true).await { @@ -298,7 +239,7 @@ impl Network { } } - Ok(()) + Ok(true) } /// Make a dialinfo from an address and protocol type diff --git a/veilid-core/src/network_manager/network_connection.rs b/veilid-core/src/network_manager/network_connection.rs index cf2279f4..fe94cfdb 100644 --- a/veilid-core/src/network_manager/network_connection.rs +++ b/veilid-core/src/network_manager/network_connection.rs @@ -82,16 +82,29 @@ pub struct NetworkConnectionStats { last_message_recv_time: Option, } +/// Represents a connection in the connection table for connection-oriented protocols #[derive(Debug)] pub(in crate::network_manager) struct NetworkConnection { + /// A unique id for this connection connection_id: NetworkConnectionId, + /// The dial info used to make this connection if it was made with 'connect' + /// None if the connection was 'accepted' + opt_dial_info: Option, + /// The network flow 5-tuple this connection is over flow: Flow, + /// Each connection has a processor and this is the task we wait for to ensure it exits cleanly processor: Option>, + /// When this connection was connected or accepted established_time: Timestamp, + /// Statistics about network traffic stats: Arc>, + /// To send data out this connection, it is places in this channel sender: flume::Sender<(Option, Vec)>, + /// Drop this when we want to drop the connection stop_source: Option, + /// The node we are responsible for protecting the connection for if it is protected protected_nr: Option, + /// The number of references to the network connection that exist (handles) ref_count: usize, } @@ -110,6 +123,7 @@ impl NetworkConnection { Self { connection_id: id, + opt_dial_info: None, flow, processor: None, established_time: Timestamp::now(), @@ -129,6 +143,7 @@ impl NetworkConnection { manager_stop_token: StopToken, protocol_connection: ProtocolNetworkConnection, connection_id: NetworkConnectionId, + opt_dial_info: Option, ) -> Self { // Get flow let flow = protocol_connection.flow(); @@ -164,6 +179,7 @@ impl NetworkConnection { // Return the connection Self { connection_id, + opt_dial_info, flow, processor: Some(processor), established_time: Timestamp::now(), @@ -183,6 +199,10 @@ impl NetworkConnection { self.flow } + pub fn dial_info(&self) -> Option { + self.opt_dial_info.clone() + } + #[expect(dead_code)] pub fn unique_flow(&self) -> UniqueFlow { UniqueFlow { @@ -448,32 +468,28 @@ impl NetworkConnection { pub fn debug_print(&self, cur_ts: Timestamp) -> String { format!( - "{} <- {} | {} | est {} sent {} rcvd {} refcount {}{}", - self.flow.remote_address(), - self.flow - .local() - .map(|x| x.to_string()) - .unwrap_or("---".to_owned()), + "{} | {} | est {} sent {} rcvd {} refcount {}{}", + self.flow, self.connection_id.as_u64(), - debug_duration( + display_duration( cur_ts .as_u64() .saturating_sub(self.established_time.as_u64()) ), self.stats() .last_message_sent_time - .map(|ts| debug_duration(cur_ts.as_u64().saturating_sub(ts.as_u64()))) + .map(|ts| display_duration(cur_ts.as_u64().saturating_sub(ts.as_u64()))) .unwrap_or("---".to_owned()), self.stats() .last_message_recv_time - .map(|ts| debug_duration(cur_ts.as_u64().saturating_sub(ts.as_u64()))) + .map(|ts| display_duration(cur_ts.as_u64().saturating_sub(ts.as_u64()))) .unwrap_or("---".to_owned()), self.ref_count, if let Some(pnr) = &self.protected_nr { format!(" PROTECTED:{}", pnr) } else { "".to_owned() - } + }, ) } } diff --git a/veilid-core/src/network_manager/receipt_manager.rs b/veilid-core/src/network_manager/receipt_manager.rs index 149f61f4..959206fe 100644 --- a/veilid-core/src/network_manager/receipt_manager.rs +++ b/veilid-core/src/network_manager/receipt_manager.rs @@ -74,6 +74,7 @@ impl fmt::Debug for ReceiptRecordCallbackType { } } +#[derive(Debug)] struct ReceiptRecord { expiration_ts: Timestamp, receipt: Receipt, @@ -82,18 +83,6 @@ struct ReceiptRecord { receipt_callback: ReceiptRecordCallbackType, } -impl fmt::Debug for ReceiptRecord { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("ReceiptRecord") - .field("expiration_ts", &self.expiration_ts) - .field("receipt", &self.receipt) - .field("expected_returns", &self.expected_returns) - .field("returns_so_far", &self.returns_so_far) - .field("receipt_callback", &self.receipt_callback) - .finish() - } -} - impl ReceiptRecord { #[expect(dead_code)] pub fn new( diff --git a/veilid-core/src/network_manager/send_data.rs b/veilid-core/src/network_manager/send_data.rs index c06cfa06..a737e8fa 100644 --- a/veilid-core/src/network_manager/send_data.rs +++ b/veilid-core/src/network_manager/send_data.rs @@ -1,70 +1,26 @@ use super::*; use stop_token::future::FutureExt as _; +// global debugging statistics for hole punch success +static HOLE_PUNCH_SUCCESS: AtomicUsize = AtomicUsize::new(0); +static HOLE_PUNCH_FAILURE: AtomicUsize = AtomicUsize::new(0); +static REVERSE_CONNECT_SUCCESS: AtomicUsize = AtomicUsize::new(0); +static REVERSE_CONNECT_FAILURE: AtomicUsize = AtomicUsize::new(0); + impl NetworkManager { /// Send raw data to a node /// - /// We may not have dial info for a node, but have an existing flow for it - /// because an inbound flow happened first, and no FindNodeQ has happened to that - /// node yet to discover its dial info. The existing flow should be tried first - /// in this case, if it matches the node ref's filters and no more permissive flow - /// could be established. - /// - /// Sending to a node requires determining a NetworkClass compatible contact method - /// between the source and destination node + /// Sending to a node requires determining a NodeContactMethod. + /// NodeContactMethod is how to reach a node given the context of our current node, which may + /// include information about the existing connections and network state of our node. + /// NodeContactMethod calculation requires first calculating the per-RoutingDomain ContactMethod + /// between the source and destination PeerInfo, which is a stateless operation. #[instrument(level = "trace", target = "net", skip_all, err)] pub(crate) async fn send_data( &self, destination_node_ref: FilteredNodeRef, data: Vec, ) -> EyreResult> { - // First try to send data to the last flow we've seen this peer on - - let data = if let Some(flow) = destination_node_ref.last_flow() { - #[cfg(feature = "verbose-tracing")] - log_net!(debug - "send_data: trying last flow ({:?}) for {:?}", - flow, - destination_node_ref - ); - - match self.net().send_data_to_existing_flow(flow, data).await? { - SendDataToExistingFlowResult::Sent(unique_flow) => { - // Update timestamp for this last flow since we just sent to it - destination_node_ref.set_last_flow(unique_flow.flow, Timestamp::now()); - - #[cfg(feature = "verbose-tracing")] - log_net!(debug - "send_data: sent to last flow ({:?}) for {:?}", - unique_flow, - destination_node_ref - ); - - return Ok(NetworkResult::value(SendDataMethod { - opt_relayed_contact_method: None, - contact_method: NodeContactMethod::Existing, - unique_flow, - })); - } - SendDataToExistingFlowResult::NotSent(data) => { - // Couldn't send data to existing flow - // so pass the data back out - #[cfg(feature = "verbose-tracing")] - log_net!(debug - "send_data: did not send to last flow ({:?}) for {:?}", - flow, - destination_node_ref - ); - data - } - } - } else { - // No last connection - data - }; - - // No existing connection was found or usable, so we proceed to see how to make a new one - // Get the best way to contact this node let possibly_relayed_contact_method = self.get_node_contact_method(destination_node_ref.clone())?; @@ -135,10 +91,23 @@ impl NetworkManager { .await?; if matches!(nres, NetworkResult::Timeout) { // Failed to holepunch, fallback to inbound relay - log_network_result!(debug "Reverse connection failed to {}, falling back to inbound relay via {}", target_node_ref, relay_nr); + let success = REVERSE_CONNECT_SUCCESS.load(Ordering::Acquire); + let failure = REVERSE_CONNECT_FAILURE.fetch_add(1, Ordering::AcqRel) + 1; + let rate = (success as f64 * 100.0) / ((success + failure) as f64); + + log_network_result!(debug "Reverse connection failed ({:.2}% success) to {}, falling back to inbound relay via {}", rate, target_node_ref, relay_nr); network_result_try!(this.try_possibly_relayed_contact_method(NodeContactMethod::InboundRelay(relay_nr), destination_node_ref, data).await?) } else { - log_network_result!(debug "Reverse connection successful to {} via {}", target_node_ref, relay_nr); + if let NetworkResult::Value(sdm) = &nres { + if matches!(sdm.contact_method, NodeContactMethod::SignalReverse(_,_)) { + + let success = REVERSE_CONNECT_SUCCESS.fetch_add(1, Ordering::AcqRel) + 1; + let failure = REVERSE_CONNECT_FAILURE.load(Ordering::Acquire); + let rate = (success as f64 * 100.0) / ((success + failure) as f64); + + log_network_result!(debug "Reverse connection successful ({:.2}% success) to {} via {}", rate, target_node_ref, relay_nr); + } + } network_result_try!(nres) } } @@ -148,10 +117,22 @@ impl NetworkManager { .await?; if matches!(nres, NetworkResult::Timeout) { // Failed to holepunch, fallback to inbound relay - log_network_result!(debug "Hole punch failed to {}, falling back to inbound relay via {}", target_node_ref, relay_nr); + let success = HOLE_PUNCH_SUCCESS.load(Ordering::Acquire); + let failure = HOLE_PUNCH_FAILURE.fetch_add(1, Ordering::AcqRel) + 1; + let rate = (success as f64 * 100.0) / ((success + failure) as f64); + + log_network_result!(debug "Hole punch failed ({:.2}% success) to {} , falling back to inbound relay via {}", rate, target_node_ref , relay_nr); network_result_try!(this.try_possibly_relayed_contact_method(NodeContactMethod::InboundRelay(relay_nr), destination_node_ref, data).await?) } else { - log_network_result!(debug "Hole punch successful to {} via {}", target_node_ref, relay_nr); + if let NetworkResult::Value(sdm) = &nres { + if matches!(sdm.contact_method, NodeContactMethod::SignalHolePunch(_,_)) { + let success = HOLE_PUNCH_SUCCESS.fetch_add(1, Ordering::AcqRel) + 1; + let failure = HOLE_PUNCH_FAILURE.load(Ordering::Acquire); + let rate = (success as f64 * 100.0) / ((success + failure) as f64); + + log_network_result!(debug "Hole punch successful ({:.2}% success) to {} via {}", rate, target_node_ref, relay_nr); + } + } network_result_try!(nres) } } @@ -415,11 +396,40 @@ impl NetworkManager { } }; - // Node A is our own node + // Peer A is our own node // Use whatever node info we've calculated so far let peer_a = routing_table.get_current_peer_info(routing_domain); + let own_node_info_ts = peer_a.signed_node_info().timestamp(); - // Node B is the target node + // Peer B is the target node, get just the timestamp for the cache check + let target_node_info_ts = match target_node_ref.operate(|_rti, e| { + e.signed_node_info(routing_domain) + .map(|sni| sni.timestamp()) + }) { + Some(ts) => ts, + None => { + log_net!( + "no node info for node {:?} in {:?}", + target_node_ref, + routing_domain + ); + return Ok(NodeContactMethod::Unreachable); + } + }; + + // Get cache key + let mut ncm_key = NodeContactMethodCacheKey { + node_ids: target_node_ref.node_ids(), + own_node_info_ts, + target_node_info_ts, + target_node_ref_filter: target_node_ref.filter(), + target_node_ref_sequencing: target_node_ref.sequencing(), + }; + if let Some(ncm) = self.inner.lock().node_contact_method_cache.get(&ncm_key) { + return Ok(ncm.clone()); + } + + // Peer B is the target node, get the whole peer info now let peer_b = match target_node_ref.make_peer_info(routing_domain) { Some(pi) => Arc::new(pi), None => { @@ -427,18 +437,8 @@ impl NetworkManager { return Ok(NodeContactMethod::Unreachable); } }; - - // Get cache key - let ncm_key = NodeContactMethodCacheKey { - node_ids: target_node_ref.node_ids(), - own_node_info_ts: peer_a.signed_node_info().timestamp(), - target_node_info_ts: peer_b.signed_node_info().timestamp(), - target_node_ref_filter: target_node_ref.filter(), - target_node_ref_sequencing: target_node_ref.sequencing(), - }; - if let Some(ncm) = self.inner.lock().node_contact_method_cache.get(&ncm_key) { - return Ok(ncm.clone()); - } + // Update the key's timestamp to ensure we avoid any race conditions + ncm_key.target_node_info_ts = peer_b.signed_node_info().timestamp(); // Dial info filter comes from the target node ref but must be filtered by this node's outbound capabilities let dial_info_filter = target_node_ref.dial_info_filter().filtered( @@ -513,9 +513,42 @@ impl NetworkManager { if !target_node_ref.node_ids().contains(&target_key) { bail!("signalreverse target noderef didn't match target key: {:?} != {} for relay {}", target_node_ref, target_key, relay_key ); } + // Set sequencing requirement for the relay relay_nr.set_sequencing(sequencing); - let target_node_ref = + + // Tighten sequencing for the target to the best reverse connection flow we can get + let tighten = peer_a + .signed_node_info() + .node_info() + .filtered_dial_info_details(DialInfoDetail::NO_SORT, |did| { + did.matches_filter(&dial_info_filter) + }) + .iter() + .find_map(|did| { + if peer_b + .signed_node_info() + .node_info() + .address_types() + .contains(did.dial_info.address_type()) + && peer_b + .signed_node_info() + .node_info() + .outbound_protocols() + .contains(did.dial_info.protocol_type()) + && did.dial_info.protocol_type().is_ordered() + { + Some(true) + } else { + None + } + }) + .unwrap_or(false); + + let mut target_node_ref = target_node_ref.filtered_clone(NodeRefFilter::from(dial_info_filter)); + if tighten { + target_node_ref.set_sequencing(Sequencing::EnsureOrdered); + } NodeContactMethod::SignalReverse(relay_nr, target_node_ref) } ContactMethod::SignalHolePunch(relay_key, target_key) => { @@ -531,6 +564,7 @@ impl NetworkManager { if !target_node_ref.node_ids().contains(&target_key) { bail!("signalholepunch target noderef didn't match target key: {:?} != {} for relay {}", target_node_ref, target_key, relay_key ); } + // Set sequencing requirement for the relay relay_nr.set_sequencing(sequencing); // if any other protocol were possible here we could update this and do_hole_punch @@ -749,10 +783,13 @@ impl NetworkManager { // punch should come through and create a real 'last connection' for us if this succeeds network_result_try!( self.net() - .send_data_to_dial_info(hole_punch_did.dial_info, Vec::new()) + .send_data_to_dial_info(hole_punch_did.dial_info.clone(), Vec::new()) .await? ); + // Add small delay to encourage packets to be delivered in order + sleep(HOLE_PUNCH_DELAY_MS).await; + // Issue the signal let rpc = self.rpc_processor(); network_result_try!(rpc @@ -766,6 +803,13 @@ impl NetworkManager { .await .wrap_err("failed to send signal")?); + // Another hole punch after the signal for UDP redundancy + network_result_try!( + self.net() + .send_data_to_dial_info(hole_punch_did.dial_info, Vec::new()) + .await? + ); + // Wait for the return receipt let inbound_nr = match eventual_value .timeout_at(stop_token) diff --git a/veilid-core/src/network_manager/tasks/local_network_address_check.rs b/veilid-core/src/network_manager/tasks/local_network_address_check.rs deleted file mode 100644 index 72428f2b..00000000 --- a/veilid-core/src/network_manager/tasks/local_network_address_check.rs +++ /dev/null @@ -1,15 +0,0 @@ -use super::*; - -impl NetworkManager { - // Determine if a local IP address has changed - // this means we should restart the low level network and and recreate all of our dial info - // Wait until we have received confirmation from N different peers - pub fn report_local_network_socket_address( - &self, - _socket_address: SocketAddress, - _flow: Flow, - _reporting_peer: NodeRef, - ) { - // XXX: Nothing here yet. - } -} diff --git a/veilid-core/src/network_manager/tasks/mod.rs b/veilid-core/src/network_manager/tasks/mod.rs index 3a68cd4f..a9a62da0 100644 --- a/veilid-core/src/network_manager/tasks/mod.rs +++ b/veilid-core/src/network_manager/tasks/mod.rs @@ -1,5 +1,3 @@ -pub mod local_network_address_check; -pub mod public_internet_address_check; pub mod rolling_transfers; use super::*; @@ -20,20 +18,6 @@ impl NetworkManager { }); } - // Set public internet address check task - { - let this = self.clone(); - self.unlocked_inner - .public_internet_address_check_task - .set_routine(move |s, l, t| { - Box::pin(this.clone().public_internet_address_check_task_routine( - s, - Timestamp::new(l), - Timestamp::new(t), - )) - }); - } - // Set address filter task { let this = self.clone(); diff --git a/veilid-core/src/network_manager/tasks/public_internet_address_check.rs b/veilid-core/src/network_manager/tasks/public_internet_address_check.rs deleted file mode 100644 index 5a7747c9..00000000 --- a/veilid-core/src/network_manager/tasks/public_internet_address_check.rs +++ /dev/null @@ -1,287 +0,0 @@ -use super::*; - -impl NetworkManager { - // Clean up the public address check tables, removing entries that have timed out - #[instrument(parent = None, level = "trace", skip_all, err)] - pub(crate) async fn public_internet_address_check_task_routine( - self, - _stop_token: StopToken, - _last_ts: Timestamp, - cur_ts: Timestamp, - ) -> EyreResult<()> { - // go through public_address_inconsistencies_table and time out things that have expired - let mut inner = self.inner.lock(); - for pait_v in inner - .public_internet_address_inconsistencies_table - .values_mut() - { - pait_v.retain(|_addr, exp_ts| { - // Keep it if it's in the future - *exp_ts > cur_ts - }); - } - Ok(()) - } - // Determine if a global IP address has changed - // this means we should recreate our public dial info if it is not static and rediscover it - // Wait until we have received confirmation from N different peers - pub fn report_public_internet_socket_address( - &self, - socket_address: SocketAddress, // the socket address as seen by the remote peer - flow: Flow, // the flow used - reporting_peer: NodeRef, // the peer's noderef reporting the socket address - ) { - log_network_result!("report_public_internet_socket_address:\nsocket_address: {:#?}\nflow: {:#?}\nreporting_peer: {:#?}", socket_address, flow, reporting_peer); - - // Ignore these reports if we are currently detecting public dial info - let net = self.net(); - if net.needs_public_dial_info_check() { - return; - } - - // Ignore flows that do not start from our listening port (unbound connections etc), - // because a router is going to map these differently - let Some(pla) = - net.get_preferred_local_address_by_key(flow.protocol_type(), flow.address_type()) - else { - return; - }; - let Some(local) = flow.local() else { - return; - }; - if local.port() != pla.port() { - log_network_result!(debug "ignoring public internet address report because local port did not match listener: {} != {}", local.port(), pla.port()); - return; - } - - // Get our current published peer info - let routing_table = self.routing_table(); - let Some(published_peer_info) = - routing_table.get_published_peer_info(RoutingDomain::PublicInternet) - else { - return; - }; - - // If we are a webapp we should skip this completely - // because we will never get inbound dialinfo directly on our public ip address - // If we have an invalid network class, this is not necessary yet - let public_internet_network_class = published_peer_info - .signed_node_info() - .node_info() - .network_class(); - if matches!(public_internet_network_class, NetworkClass::WebApp) { - return; - } - - let (detect_address_changes, ip6_prefix_size) = self.with_config(|c| { - ( - c.network.detect_address_changes, - c.network.max_connections_per_ip6_prefix_size as usize, - ) - }); - - // Get the ip(block) this report is coming from - let reporting_ipblock = ip_to_ipblock(ip6_prefix_size, flow.remote_address().ip_addr()); - - // Reject public address reports from nodes that we know are behind symmetric nat or - // nodes that must be using a relay for everything - let Some(node_info) = reporting_peer.node_info(RoutingDomain::PublicInternet) else { - return; - }; - if node_info.network_class() != NetworkClass::InboundCapable { - return; - } - - // If the socket address reported is the same as the reporter, then this is coming through a relay - // or it should be ignored due to local proximity (nodes on the same network block should not be trusted as - // public ip address reporters, only disinterested parties) - if reporting_ipblock == ip_to_ipblock(ip6_prefix_size, socket_address.ip_addr()) { - return; - } - - // Check if the public address report is coming from a node/block that gives an 'inconsistent' location - // meaning that the node may be not useful for public address detection - // This is done on a per address/protocol basis - - let mut inner = self.inner.lock(); - let inner = &mut *inner; - - let addr_proto_type_key = - PublicAddressCheckCacheKey(flow.protocol_type(), flow.address_type()); - if inner - .public_internet_address_inconsistencies_table - .get(&addr_proto_type_key) - .map(|pait| pait.contains_key(&reporting_ipblock)) - .unwrap_or(false) - { - return; - } - - // Insert this new public address into the lru cache for the address check - // if we've seen this address before, it brings it to the front - let pacc = inner - .public_internet_address_check_cache - .entry(addr_proto_type_key) - .or_insert_with(|| LruCache::new(PUBLIC_ADDRESS_CHECK_CACHE_SIZE)); - pacc.insert(reporting_ipblock, socket_address); - - // Determine if our external address has likely changed - let mut bad_public_internet_address_detection_punishment: Option< - Box, - > = None; - - let needs_public_internet_address_detection = if matches!( - public_internet_network_class, - NetworkClass::InboundCapable - ) { - // Get the dial info filter for this connection so we can check if we have any public dialinfo that may have changed - let dial_info_filter = flow.make_dial_info_filter(); - - // Get current external ip/port from registered global dialinfo - let current_addresses: BTreeSet = published_peer_info - .signed_node_info() - .node_info() - .filtered_dial_info_details(DialInfoDetail::NO_SORT, |did| { - did.matches_filter(&dial_info_filter) - }) - .iter() - .map(|did| { - // Strip port from direct and mapped addresses - // as the incoming dialinfo may not match the outbound - // connections' NAT mapping. In this case we only check for IP address changes. - if did.class == DialInfoClass::Direct || did.class == DialInfoClass::Mapped { - did.dial_info.socket_address().with_port(0) - } else { - did.dial_info.socket_address() - } - }) - .collect(); - - // If we are inbound capable, but start to see inconsistent socket addresses from multiple reporting peers - // then we zap the network class and re-detect it - - // Keep list of the origin ip blocks of inconsistent public address reports - let mut inconsistencies = Vec::new(); - - // Iteration goes from most recent to least recent node/address pair - for (reporting_ip_block, a) in pacc { - // If this address is not one of our current addresses (inconsistent) - // and we haven't already denylisted the reporting source, - // Also check address with port zero in the event we are only checking changes to ip addresses - if !current_addresses.contains(a) - && !current_addresses.contains(&a.with_port(0)) - && !inner - .public_internet_address_inconsistencies_table - .get(&addr_proto_type_key) - .map(|pait| pait.contains_key(reporting_ip_block)) - .unwrap_or(false) - { - // Record the origin of the inconsistency - log_network_result!(debug "inconsistency added from {:?}: reported {:?} with current_addresses = {:?}", reporting_ip_block, a, current_addresses); - - inconsistencies.push(*reporting_ip_block); - } - } - - // If we have enough inconsistencies to consider changing our public dial info, - // add them to our denylist (throttling) and go ahead and check for new - // public dialinfo - let inconsistent = - if inconsistencies.len() >= PUBLIC_ADDRESS_CHANGE_INCONSISTENCY_DETECTION_COUNT { - let exp_ts = Timestamp::now() + PUBLIC_ADDRESS_INCONSISTENCY_TIMEOUT_US; - let pait = inner - .public_internet_address_inconsistencies_table - .entry(addr_proto_type_key) - .or_default(); - for i in &inconsistencies { - pait.insert(*i, exp_ts); - } - - // Run this routine if the inconsistent nodes turn out to be lying - let this = self.clone(); - bad_public_internet_address_detection_punishment = Some(Box::new(move || { - // xxx does this even work?? - - let mut inner = this.inner.lock(); - let pait = inner - .public_internet_address_inconsistencies_table - .entry(addr_proto_type_key) - .or_default(); - let exp_ts = - Timestamp::now() + PUBLIC_ADDRESS_INCONSISTENCY_PUNISHMENT_TIMEOUT_US; - for i in inconsistencies { - pait.insert(i, exp_ts); - } - })); - - true - } else { - false - }; - - // // debug code - // if inconsistent { - // log_net!("report_public_internet_socket_address: {:#?}\ncurrent_addresses: {:#?}\ninconsistencies: {}", inner - // .public_address_check_cache, current_addresses, inconsistencies); - // } - - inconsistent - } else if matches!(public_internet_network_class, NetworkClass::OutboundOnly) { - // If we are currently outbound only, we don't have any public dial info - // but if we are starting to see consistent socket address from multiple reporting peers - // then we may be become inbound capable, so zap the network class so we can re-detect it and any public dial info - - let mut consistencies = 0; - let mut consistent = false; - let mut current_address = Option::::None; - - // Iteration goes from most recent to least recent node/address pair - for (_, a) in pacc { - if let Some(current_address) = current_address { - if current_address == *a { - consistencies += 1; - if consistencies >= PUBLIC_ADDRESS_CHANGE_CONSISTENCY_DETECTION_COUNT { - consistent = true; - break; - } - } - } else { - current_address = Some(*a); - } - } - consistent - } else { - // If we are a webapp we never do this. - // If we have invalid network class, then public address detection is already going to happen via the network_class_discovery task - - // we should have checked for this condition earlier at the top of this function - unreachable!(); - }; - - if needs_public_internet_address_detection { - if detect_address_changes { - // Reset the address check cache now so we can start detecting fresh - info!("PublicInternet address has changed, detecting public dial info"); - log_net!(debug "report_public_internet_socket_address:\nsocket_address: {:#?}\nflow: {:#?}\nreporting_peer: {:#?}", socket_address, flow, reporting_peer); - log_net!(debug - "public_internet_address_check_cache: {:#?}", - inner.public_internet_address_check_cache - ); - - inner.public_internet_address_check_cache.clear(); - - // Re-detect the public dialinfo - net.set_needs_public_dial_info_check( - bad_public_internet_address_detection_punishment, - ); - } else { - warn!("PublicInternet address may have changed. Restarting the server may be required."); - warn!("report_public_internet_socket_address:\nsocket_address: {:#?}\nflow: {:#?}\nreporting_peer: {:#?}", socket_address, flow, reporting_peer); - warn!( - "public_internet_address_check_cache: {:#?}", - inner.public_internet_address_check_cache - ); - } - } - } -} diff --git a/veilid-core/src/network_manager/types/dial_info/mod.rs b/veilid-core/src/network_manager/types/dial_info/mod.rs index 020bf44c..460fdad9 100644 --- a/veilid-core/src/network_manager/types/dial_info/mod.rs +++ b/veilid-core/src/network_manager/types/dial_info/mod.rs @@ -259,7 +259,7 @@ impl DialInfo { Self::WSS(di) => di.socket_address.ip_addr(), } } - #[cfg_attr(target_arch = "wasm32", expect(dead_code))] + #[expect(dead_code)] pub fn port(&self) -> u16 { match self { Self::UDP(di) => di.socket_address.port(), diff --git a/veilid-core/src/network_manager/types/flow.rs b/veilid-core/src/network_manager/types/flow.rs index 5722493c..54a0e024 100644 --- a/veilid-core/src/network_manager/types/flow.rs +++ b/veilid-core/src/network_manager/types/flow.rs @@ -11,12 +11,22 @@ use super::*; /// established connection is always from a real address to another real address. /// -#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +#[derive(Copy, Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] pub struct Flow { remote: PeerAddress, local: Option, } +impl fmt::Display for Flow { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if let Some(local) = &self.local { + write!(f, "{} -> {}", local, self.remote) + } else { + write!(f, "{}", self.remote) + } + } +} + impl Flow { pub fn new(remote: PeerAddress, local: SocketAddress) -> Self { assert!(!remote.protocol_type().is_ordered() || !local.address().is_unspecified()); @@ -47,11 +57,6 @@ impl Flow { pub fn address_type(&self) -> AddressType { self.remote.address_type() } - pub fn make_dial_info_filter(&self) -> DialInfoFilter { - DialInfoFilter::all() - .with_protocol_type(self.protocol_type()) - .with_address_type(self.address_type()) - } } impl MatchesDialInfoFilter for Flow { @@ -75,4 +80,19 @@ pub struct UniqueFlow { pub connection_id: Option, } +impl fmt::Display for UniqueFlow { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "{} ({})", + self.flow, + if let Some(connection_id) = &self.connection_id { + format!("id={}", connection_id) + } else { + "---".to_string() + } + ) + } +} + pub type NetworkConnectionId = AlignedU64; diff --git a/veilid-core/src/network_manager/wasm/mod.rs b/veilid-core/src/network_manager/wasm/mod.rs index d06458f2..6c3b4858 100644 --- a/veilid-core/src/network_manager/wasm/mod.rs +++ b/veilid-core/src/network_manager/wasm/mod.rs @@ -404,15 +404,12 @@ impl Network { .edit_public_internet_routing_domain(); // set up the routing table's network config - // if we have static public dialinfo, upgrade our network class - editor_public_internet.setup_network( protocol_config.outbound, protocol_config.inbound, protocol_config.family_global, protocol_config.public_internet_capabilities.clone(), ); - editor_public_internet.set_network_class(Some(NetworkClass::WebApp)); // commit routing domain edits if editor_public_internet.commit(true).await { diff --git a/veilid-core/src/routing_table/bucket_entry.rs b/veilid-core/src/routing_table/bucket_entry.rs index 017be327..f40e107d 100644 --- a/veilid-core/src/routing_table/bucket_entry.rs +++ b/veilid-core/src/routing_table/bucket_entry.rs @@ -27,7 +27,7 @@ const NEVER_SEEN_PING_COUNT: u32 = 3; #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub(crate) enum BucketEntryDeadReason { - FailedToSend, + CanNotSend, TooManyLostAnswers, NoPingResponse, } @@ -87,8 +87,11 @@ impl From for BucketEntryState { } } -#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Ord, Hash)] -pub(crate) struct LastFlowKey(ProtocolType, AddressType); +#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +pub(crate) struct LastFlowKey(pub ProtocolType, pub AddressType); + +#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Ord, Hash, Serialize, Deserialize)] +pub(crate) struct LastSenderInfoKey(pub RoutingDomain, pub ProtocolType, pub AddressType); /// Bucket entry information specific to the LocalNetwork RoutingDomain #[derive(Debug, Serialize, Deserialize)] @@ -101,6 +104,24 @@ pub(crate) struct BucketEntryPublicInternet { node_status: Option, } +impl fmt::Display for BucketEntryPublicInternet { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if let Some(sni) = &self.signed_node_info { + writeln!(f, "signed_node_info:")?; + write!(f, " {}", indent_string(sni))?; + } else { + writeln!(f, "signed_node_info: None")?; + } + writeln!( + f, + "last_seen_our_node_info_ts: {}", + self.last_seen_our_node_info_ts + )?; + writeln!(f, "node_status: {:?}", self.node_status)?; + Ok(()) + } +} + /// Bucket entry information specific to the LocalNetwork RoutingDomain #[derive(Debug, Serialize, Deserialize)] pub(crate) struct BucketEntryLocalNetwork { @@ -112,6 +133,24 @@ pub(crate) struct BucketEntryLocalNetwork { node_status: Option, } +impl fmt::Display for BucketEntryLocalNetwork { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if let Some(sni) = &self.signed_node_info { + writeln!(f, "signed_node_info:")?; + write!(f, " {}", indent_string(sni))?; + } else { + writeln!(f, "signed_node_info: None")?; + } + writeln!( + f, + "last_seen_our_node_info_ts: {}", + self.last_seen_our_node_info_ts + )?; + writeln!(f, "node_status: {:?}", self.node_status)?; + Ok(()) + } +} + /// The data associated with each bucket entry #[derive(Debug, Serialize, Deserialize)] pub(crate) struct BucketEntryInner { @@ -130,6 +169,9 @@ pub(crate) struct BucketEntryInner { /// The last flows used to contact this node, per protocol type #[serde(skip)] last_flows: BTreeMap, + /// Last seen senderinfo per protocol/address type + #[serde(skip)] + last_sender_info: HashMap, /// The node info for this entry on the publicinternet routing domain public_internet: BucketEntryPublicInternet, /// The node info for this entry on the localnetwork routing domain @@ -142,6 +184,12 @@ pub(crate) struct BucketEntryInner { /// The accounting for the transfer statistics #[serde(skip)] transfer_stats_accounting: TransferStatsAccounting, + /// The account for the state and reason statistics + #[serde(skip)] + state_stats_accounting: Mutex, + /// RPC answer stats accounting + #[serde(skip)] + answer_stats_accounting: AnswerStatsAccounting, /// If the entry is being punished and should be considered dead #[serde(skip)] punishment: Option, @@ -155,6 +203,52 @@ pub(crate) struct BucketEntryInner { node_ref_tracks: HashMap, } +impl fmt::Display for BucketEntryInner { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "validated_node_ids: {}", self.validated_node_ids)?; + writeln!(f, "unsupported_node_ids: {}", self.unsupported_node_ids)?; + writeln!(f, "envelope_support: {:?}", self.envelope_support)?; + writeln!( + f, + "updated_since_last_network_change: {:?}", + self.updated_since_last_network_change + )?; + writeln!(f, "last_flows:")?; + for lf in &self.last_flows { + writeln!( + f, + " {:?}/{:?}: {} @ {}", + lf.0 .0, lf.0 .1, lf.1 .0, lf.1 .1 + )?; + } + writeln!(f, "last_sender_info:")?; + for lsi in &self.last_sender_info { + writeln!( + f, + " {:?}/{:?}/{:?}: {}", + lsi.0 .0, lsi.0 .1, lsi.0 .2, lsi.1.socket_address + )?; + } + writeln!(f, "public_internet:")?; + write!(f, "{}", indent_all_string(&self.public_internet))?; + writeln!(f, "local_network:")?; + write!(f, "{}", indent_all_string(&self.local_network))?; + writeln!(f, "peer_stats:")?; + write!(f, "{}", indent_all_string(&self.peer_stats))?; + writeln!( + f, + "punishment: {}", + if let Some(punishment) = self.punishment { + format!("{:?}", punishment) + } else { + "None".to_owned() + } + )?; + + Ok(()) + } +} + impl BucketEntryInner { #[cfg(feature = "tracking")] pub fn track(&mut self) -> usize { @@ -593,7 +687,7 @@ impl BucketEntryInner { } pub fn state_reason(&self, cur_ts: Timestamp) -> BucketEntryStateReason { - if let Some(punished_reason) = self.punishment { + let reason = if let Some(punished_reason) = self.punishment { BucketEntryStateReason::Punished(punished_reason) } else if let Some(dead_reason) = self.check_dead(cur_ts) { BucketEntryStateReason::Dead(dead_reason) @@ -601,7 +695,14 @@ impl BucketEntryInner { BucketEntryStateReason::Unreliable(unreliable_reason) } else { BucketEntryStateReason::Reliable - } + }; + + // record this reason + self.state_stats_accounting + .lock() + .record_state_reason(cur_ts, reason); + + reason } pub fn state(&self, cur_ts: Timestamp) -> BucketEntryState { @@ -681,6 +782,18 @@ impl BucketEntryInner { self.peer_stats.latency = Some(self.latency_stats_accounting.record_latency(latency)); } + // Called every UPDATE_STATE_STATS_SECS seconds + pub(super) fn update_state_stats(&mut self) { + if let Some(state_stats) = self.state_stats_accounting.lock().take_stats() { + self.peer_stats.state = state_stats; + } + } + + // called every ROLLING_ANSWERS_INTERVAL_SECS seconds + pub(super) fn roll_answer_stats(&mut self, cur_ts: Timestamp) { + self.peer_stats.rpc_stats.answer = self.answer_stats_accounting.roll_answers(cur_ts); + } + ///// state machine handling pub(super) fn check_unreliable( &self, @@ -714,7 +827,7 @@ impl BucketEntryInner { pub(super) fn check_dead(&self, cur_ts: Timestamp) -> Option { // If we have failed to send NEVER_REACHED_PING_COUNT times in a row, the node is dead if self.peer_stats.rpc_stats.failed_to_send >= NEVER_SEEN_PING_COUNT { - return Some(BucketEntryDeadReason::FailedToSend); + return Some(BucketEntryDeadReason::CanNotSend); } match self.peer_stats.rpc_stats.last_seen_ts { @@ -744,17 +857,28 @@ impl BucketEntryInner { None } - /// Return the last time we either saw a node, or asked it a question - fn latest_contact_time(&self) -> Option { - self.peer_stats - .rpc_stats - .last_seen_ts - .max(self.peer_stats.rpc_stats.last_question_ts) + /// Return the last time we asked a node a question + fn last_outbound_contact_time(&self) -> Option { + // This is outbound and inbound contact time which may be a reasonable optimization for nodes that have + // a very low rate of 'lost answers', but for now we are reverting this to ensure outbound connectivity before + // we claim a node is reliable + // + // self.peer_stats + // .rpc_stats + // .last_seen_ts + // .max(self.peer_stats.rpc_stats.last_question_ts) + + self.peer_stats.rpc_stats.last_question_ts } + /// Return the last time we asked a node a question + // fn last_question_time(&self) -> Option { + // self.peer_stats.rpc_stats.last_question_ts + // } + fn needs_constant_ping(&self, cur_ts: Timestamp, interval_us: TimestampDuration) -> bool { // If we have not either seen the node in the last 'interval' then we should ping it - let latest_contact_time = self.latest_contact_time(); + let latest_contact_time = self.last_outbound_contact_time(); match latest_contact_time { None => true, @@ -773,7 +897,7 @@ impl BucketEntryInner { match state { BucketEntryState::Reliable => { // If we are in a reliable state, we need a ping on an exponential scale - let latest_contact_time = self.latest_contact_time(); + let latest_contact_time = self.last_outbound_contact_time(); match latest_contact_time { None => { @@ -873,6 +997,7 @@ impl BucketEntryInner { pub(super) fn question_sent(&mut self, ts: Timestamp, bytes: ByteCount, expects_answer: bool) { self.transfer_stats_accounting.add_up(bytes); + self.answer_stats_accounting.record_question(ts); self.peer_stats.rpc_stats.messages_sent += 1; self.peer_stats.rpc_stats.failed_to_send = 0; if expects_answer { @@ -892,13 +1017,16 @@ impl BucketEntryInner { } pub(super) fn answer_rcvd(&mut self, send_ts: Timestamp, recv_ts: Timestamp, bytes: ByteCount) { self.transfer_stats_accounting.add_down(bytes); + self.answer_stats_accounting.record_answer(recv_ts); self.peer_stats.rpc_stats.messages_rcvd += 1; self.peer_stats.rpc_stats.questions_in_flight -= 1; self.record_latency(recv_ts.saturating_sub(send_ts)); self.touch_last_seen(recv_ts); self.peer_stats.rpc_stats.recent_lost_answers = 0; } - pub(super) fn question_lost(&mut self) { + pub(super) fn lost_answer(&mut self) { + let cur_ts = Timestamp::now(); + self.answer_stats_accounting.record_lost_answer(cur_ts); self.peer_stats.rpc_stats.first_consecutive_seen_ts = None; self.peer_stats.rpc_stats.questions_in_flight -= 1; self.peer_stats.rpc_stats.recent_lost_answers += 1; @@ -910,6 +1038,19 @@ impl BucketEntryInner { self.peer_stats.rpc_stats.failed_to_send += 1; self.peer_stats.rpc_stats.first_consecutive_seen_ts = None; } + pub(super) fn report_sender_info( + &mut self, + key: LastSenderInfoKey, + sender_info: SenderInfo, + ) -> Option { + let last_sender_info = self.last_sender_info.insert(key, sender_info); + if last_sender_info != Some(sender_info) { + // Return last senderinfo if this new one is different + last_sender_info + } else { + None + } + } } #[derive(Debug)] @@ -930,6 +1071,7 @@ impl BucketEntry { envelope_support: Vec::new(), updated_since_last_network_change: false, last_flows: BTreeMap::new(), + last_sender_info: HashMap::new(), local_network: BucketEntryLocalNetwork { last_seen_our_node_info_ts: Timestamp::new(0u64), signed_node_info: None, @@ -945,9 +1087,12 @@ impl BucketEntry { rpc_stats: RPCStats::default(), latency: None, transfer: TransferStatsDownUp::default(), + state: StateStats::default(), }, latency_stats_accounting: LatencyStatsAccounting::new(), transfer_stats_accounting: TransferStatsAccounting::new(), + state_stats_accounting: Mutex::new(StateStatsAccounting::new()), + answer_stats_accounting: AnswerStatsAccounting::new(), punishment: None, #[cfg(feature = "tracking")] next_track_id: 0, diff --git a/veilid-core/src/routing_table/debug.rs b/veilid-core/src/routing_table/debug.rs index e1ed0213..a809206d 100644 --- a/veilid-core/src/routing_table/debug.rs +++ b/veilid-core/src/routing_table/debug.rs @@ -66,22 +66,12 @@ impl RoutingTable { pub(crate) fn debug_info_nodeinfo(&self) -> String { let mut out = String::new(); let inner = self.inner.read(); - out += "Routing Table Info:\n"; - - out += &format!(" Node Ids: {}\n", self.unlocked_inner.node_ids()); + out += &format!("Node Ids: {}\n", self.unlocked_inner.node_ids()); out += &format!( - " Self Latency Stats Accounting: {:#?}\n\n", - inner.self_latency_stats_accounting + "Self Transfer Stats:\n{}", + indent_all_string(&inner.self_transfer_stats) ); - out += &format!( - " Self Transfer Stats Accounting: {:#?}\n\n", - inner.self_transfer_stats_accounting - ); - out += &format!( - " Self Transfer Stats: {:#?}\n\n", - inner.self_transfer_stats - ); - out += &format!(" Version: {}\n\n", veilid_version_string()); + out += &format!("Version: {}", veilid_version_string()); out } @@ -93,11 +83,11 @@ impl RoutingTable { out += "Local Network Dial Info Details:\n"; for (n, ldi) in ldis.iter().enumerate() { - out += &format!(" {:>2}: {:?}\n", n, ldi); + out += &indent_all_string(&format!("{:>2}: {}\n", n, ldi)); } out += "Public Internet Dial Info Details:\n"; for (n, gdi) in gdis.iter().enumerate() { - out += &format!(" {:>2}: {:?}\n", n, gdi); + out += &indent_all_string(&format!("{:>2}: {}\n", n, gdi)); } out } @@ -109,17 +99,16 @@ impl RoutingTable { ) -> String { let mut out = String::new(); if published { - out += &format!( - "{:?} Published PeerInfo:\n {:#?}\n", - routing_domain, - self.get_published_peer_info(routing_domain) - ); + let pistr = if let Some(pi) = self.get_published_peer_info(routing_domain) { + format!("\n{}\n", indent_all_string(&pi)) + } else { + " None".to_owned() + }; + out += &format!("{:?} Published PeerInfo:{}", routing_domain, pistr); } else { - out += &format!( - "{:?} Current PeerInfo:\n {:#?}\n", - routing_domain, - self.get_current_peer_info(routing_domain) - ); + let pi = self.get_current_peer_info(routing_domain); + let pistr = format!("\n{}\n", indent_all_string(&pi)); + out += &format!("{:?} Current PeerInfo:{}", routing_domain, pistr); } out } @@ -138,7 +127,7 @@ impl RoutingTable { // }, BucketEntryStateReason::Dead(d) => match d { - BucketEntryDeadReason::FailedToSend => "DFSEND", + BucketEntryDeadReason::CanNotSend => "DFSEND", BucketEntryDeadReason::TooManyLostAnswers => "DALOST", BucketEntryDeadReason::NoPingResponse => "DNOPNG", }, @@ -153,11 +142,60 @@ impl RoutingTable { } } + fn format_entry( + cur_ts: Timestamp, + node: TypedKey, + e: &BucketEntryInner, + relay_tag: &str, + ) -> String { + format!( + " {} [{}][{}] {} [{}] lastq@{} seen@{}", + // node id + node, + // state reason + Self::format_state_reason(e.state_reason(cur_ts)), + // Relay tag + relay_tag, + // average latency + e.peer_stats() + .latency + .as_ref() + .map(|l| l.to_string()) + .unwrap_or_else(|| "???".to_string()), + // capabilities + if let Some(ni) = e.node_info(RoutingDomain::PublicInternet) { + ni.capabilities() + .iter() + .map(|x| x.to_string()) + .collect::>() + .join(",") + } else { + "???".to_owned() + }, + // duration since last question + e.peer_stats() + .rpc_stats + .last_question_ts + .as_ref() + .map(|l| cur_ts.saturating_sub(*l).to_string()) + .unwrap_or_else(|| "???".to_string()), + // duration since last seen + e.peer_stats() + .rpc_stats + .last_seen_ts + .as_ref() + .map(|l| cur_ts.saturating_sub(*l).to_string()) + .unwrap_or_else(|| "???".to_string()), + ) + } + pub(crate) fn debug_info_entries( &self, min_state: BucketEntryState, capabilities: Vec, ) -> String { + let relay_node_filter = self.make_public_internet_relay_node_filter(); + let inner = self.inner.read(); let inner = &*inner; let cur_ts = Timestamp::now(); @@ -191,35 +229,26 @@ impl RoutingTable { if !filtered_entries.is_empty() { out += &format!("{} Bucket #{}:\n", ck, b); for e in filtered_entries { - let state_reason = e.1.with(inner, |_rti, e| e.state_reason(cur_ts)); - out += &format!( - " {} [{}] {} [{}]\n", - e.0.encode(), - Self::format_state_reason(state_reason), - e.1.with(inner, |_rti, e| { - e.peer_stats() - .latency - .as_ref() - .map(|l| { - format!( - "{:.2}ms", - timestamp_to_secs(l.average.as_u64()) * 1000.0 - ) - }) - .unwrap_or_else(|| "???.??ms".to_string()) - }), - e.1.with(inner, |_rti, e| { - if let Some(ni) = e.node_info(RoutingDomain::PublicInternet) { - ni.capabilities() - .iter() - .map(|x| x.to_string()) - .collect::>() - .join(",") - } else { - "???".to_owned() - } - }) - ); + let node = *e.0; + + let can_be_relay = e.1.with(inner, |_rti, e| relay_node_filter(e)); + let is_relay = self + .relay_node(RoutingDomain::PublicInternet) + .map(|r| r.same_bucket_entry(e.1)) + .unwrap_or(false); + let relay_tag = if is_relay { + "R" + } else if can_be_relay { + "r" + } else { + "-" + }; + + out += " "; + out += &e.1.with(inner, |_rti, e| { + Self::format_entry(cur_ts, TypedKey::new(*ck, node), e, relay_tag) + }); + out += "\n"; } } b += 1; @@ -230,6 +259,71 @@ impl RoutingTable { out } + pub(crate) fn debug_info_entries_fastest( + &self, + min_state: BucketEntryState, + capabilities: Vec, + node_count: usize, + ) -> String { + let cur_ts = Timestamp::now(); + let relay_node_filter = self.make_public_internet_relay_node_filter(); + let mut relay_count = 0usize; + + let mut filters = VecDeque::new(); + filters.push_front( + Box::new(|rti: &RoutingTableInner, e: Option>| { + let Some(e) = e else { + return false; + }; + let cap_match = e.with(rti, |_rti, e| { + e.has_all_capabilities(RoutingDomain::PublicInternet, &capabilities) + }); + let state = e.with(rti, |_rti, e| e.state(cur_ts)); + state >= min_state && cap_match + }) as RoutingTableEntryFilter, + ); + let nodes = self.find_preferred_fastest_nodes( + node_count, + filters, + |_rti, entry: Option>| { + NodeRef::new(self.clone(), entry.unwrap().clone()) + }, + ); + let mut out = String::new(); + let entry_count = nodes.len(); + for node in nodes { + let can_be_relay = node.operate(|_rti, e| relay_node_filter(e)); + let is_relay = self + .relay_node(RoutingDomain::PublicInternet) + .map(|r| r.same_entry(&node)) + .unwrap_or(false); + let relay_tag = if is_relay { + "R" + } else if can_be_relay { + "r" + } else { + "-" + }; + if can_be_relay { + relay_count += 1; + } + + out += " "; + out += &node + .operate(|_rti, e| Self::format_entry(cur_ts, node.best_node_id(), e, relay_tag)); + out += "\n"; + } + + out += &format!( + "Entries: {} Relays: {} Relay %: {:.2}\n", + entry_count, + relay_count, + (relay_count as f64) * 100.0 / (entry_count as f64) + ); + + out + } + pub(crate) fn debug_info_entry(&self, node_ref: NodeRef) -> String { let cur_ts = Timestamp::now(); @@ -237,9 +331,9 @@ impl RoutingTable { out += &node_ref.operate(|_rti, e| { let state_reason = e.state_reason(cur_ts); format!( - "state: {}\n{:#?}\n", + "{}\nstate: {}\n", + e, Self::format_state_reason(state_reason), - e ) }); out diff --git a/veilid-core/src/routing_table/mod.rs b/veilid-core/src/routing_table/mod.rs index 52fbd00c..3bea8ba1 100644 --- a/veilid-core/src/routing_table/mod.rs +++ b/veilid-core/src/routing_table/mod.rs @@ -42,8 +42,11 @@ pub const RELAY_MANAGEMENT_INTERVAL_SECS: u32 = 1; pub const PRIVATE_ROUTE_MANAGEMENT_INTERVAL_SECS: u32 = 1; // Connectionless protocols like UDP are dependent on a NAT translation timeout -// We should ping them with some frequency and 30 seconds is typical timeout -pub const CONNECTIONLESS_TIMEOUT_SECS: u32 = 29; +// We ping relays to maintain our UDP NAT state with a RELAY_KEEPALIVE_PING_INTERVAL_SECS=10 frequency +// since 30 seconds is a typical UDP NAT state timeout. +// Non-relay flows are assumed to be alive for half the typical timeout and we regenerate the hole punch +// if it the flow hasn't had any activity in this amount of time. +pub const CONNECTIONLESS_TIMEOUT_SECS: u32 = 15; // Table store keys const ALL_ENTRY_BYTES: &[u8] = b"all_entry_bytes"; @@ -100,6 +103,10 @@ pub(crate) struct RoutingTableUnlockedInner { kick_queue: Mutex>, /// Background process for computing statistics rolling_transfers_task: TickTask, + /// Background process for computing statistics + update_state_stats_task: TickTask, + /// Background process for computing statistics + rolling_answers_task: TickTask, /// Background process to purge dead routing table entries when necessary kick_buckets_task: TickTask, /// Background process to get our initial routing table @@ -108,8 +115,14 @@ pub(crate) struct RoutingTableUnlockedInner { peer_minimum_refresh_task: TickTask, /// Background process to ensure we have enough nodes close to our own in our routing table closest_peers_refresh_task: TickTask, - /// Background process to check nodes to see if they are still alive and for reliability - ping_validator_task: TickTask, + /// Background process to check PublicInternet nodes to see if they are still alive and for reliability + ping_validator_public_internet_task: TickTask, + /// Background process to check LocalNetwork nodes to see if they are still alive and for reliability + ping_validator_local_network_task: TickTask, + /// Background process to check PublicInternet relay nodes to see if they are still alive and for reliability + ping_validator_public_internet_relay_task: TickTask, + /// Background process to check Active Watch nodes to see if they are still alive and for reliability + ping_validator_active_watch_task: TickTask, /// Background process to keep relays up relay_management_task: TickTask, /// Background process to keep private routes up @@ -216,6 +229,14 @@ impl RoutingTable { "rolling_transfers_task", ROLLING_TRANSFERS_INTERVAL_SECS, ), + update_state_stats_task: TickTask::new( + "update_state_stats_task", + UPDATE_STATE_STATS_INTERVAL_SECS, + ), + rolling_answers_task: TickTask::new( + "rolling_answers_task", + ROLLING_ANSWER_INTERVAL_SECS, + ), kick_buckets_task: TickTask::new("kick_buckets_task", 1), bootstrap_task: TickTask::new("bootstrap_task", 1), peer_minimum_refresh_task: TickTask::new("peer_minimum_refresh_task", 1), @@ -223,7 +244,19 @@ impl RoutingTable { "closest_peers_refresh_task", c.network.dht.min_peer_refresh_time_ms, ), - ping_validator_task: TickTask::new("ping_validator_task", 1), + ping_validator_public_internet_task: TickTask::new( + "ping_validator_public_internet_task", + 1, + ), + ping_validator_local_network_task: TickTask::new( + "ping_validator_local_network_task", + 1, + ), + ping_validator_public_internet_relay_task: TickTask::new( + "ping_validator_public_internet_relay_task", + 1, + ), + ping_validator_active_watch_task: TickTask::new("ping_validator_active_watch_task", 1), relay_management_task: TickTask::new( "relay_management_task", RELAY_MANAGEMENT_INTERVAL_SECS, @@ -573,12 +606,6 @@ impl RoutingTable { self.inner.read().get_current_peer_info(routing_domain) } - /// If we have a valid network class in this routing domain, then our 'NodeInfo' is valid - /// If this is true, we can get our final peer info, otherwise we only have a 'best effort' peer info - pub fn has_valid_network_class(&self, routing_domain: RoutingDomain) -> bool { - self.inner.read().has_valid_network_class(routing_domain) - } - /// Return the domain's currently registered network class #[cfg_attr(target_arch = "wasm32", expect(dead_code))] pub fn get_network_class(&self, routing_domain: RoutingDomain) -> Option { diff --git a/veilid-core/src/routing_table/node_ref/traits.rs b/veilid-core/src/routing_table/node_ref/traits.rs index 2530b3bc..287632b7 100644 --- a/veilid-core/src/routing_table/node_ref/traits.rs +++ b/veilid-core/src/routing_table/node_ref/traits.rs @@ -91,12 +91,16 @@ pub trait NodeRefCommonTrait: NodeRefAccessorsTrait + NodeRefOperateTrait { .unwrap_or(0u64.into()) }) } - fn has_seen_our_node_info_ts( - &self, - routing_domain: RoutingDomain, - our_node_info_ts: Timestamp, - ) -> bool { - self.operate(|_rti, e| e.has_seen_our_node_info_ts(routing_domain, our_node_info_ts)) + fn has_seen_our_node_info_ts(&self, routing_domain: RoutingDomain) -> bool { + self.operate(|rti, e| { + let Some(our_node_info_ts) = rti + .get_published_peer_info(routing_domain) + .map(|pi| pi.signed_node_info().timestamp()) + else { + return false; + }; + e.has_seen_our_node_info_ts(routing_domain, our_node_info_ts) + }) } fn set_seen_our_node_info_ts(&self, routing_domain: RoutingDomain, seen_ts: Timestamp) { self.operate_mut(|_rti, e| e.set_seen_our_node_info_ts(routing_domain, seen_ts)); @@ -183,7 +187,7 @@ pub trait NodeRefCommonTrait: NodeRefAccessorsTrait + NodeRefOperateTrait { out } - /// Get the most recent 'last connection' to this node + /// Get the most recent 'last connection' to this node matching the node ref filter /// Filtered first and then sorted by ordering preference and then by most recent fn last_flow(&self) -> Option { self.operate(|rti, e| { @@ -203,7 +207,8 @@ pub trait NodeRefCommonTrait: NodeRefAccessorsTrait + NodeRefOperateTrait { }) } - /// Get all the 'last connection' flows for this node + /// Get all the 'last connection' flows for this node matching the node ref filter + /// Filtered first and then sorted by ordering preference and then by most recent #[expect(dead_code)] fn last_flows(&self) -> Vec { self.operate(|rti, e| { @@ -287,9 +292,9 @@ pub trait NodeRefCommonTrait: NodeRefAccessorsTrait + NodeRefOperateTrait { e.answer_rcvd(send_ts, recv_ts, bytes); }) } - fn stats_question_lost(&self) { + fn stats_lost_answer(&self) { self.operate_mut(|_rti, e| { - e.question_lost(); + e.lost_answer(); }) } fn stats_failed_to_send(&self, ts: Timestamp, expects_answer: bool) { @@ -297,4 +302,18 @@ pub trait NodeRefCommonTrait: NodeRefAccessorsTrait + NodeRefOperateTrait { e.failed_to_send(ts, expects_answer); }) } + fn report_sender_info( + &self, + routing_domain: RoutingDomain, + protocol_type: ProtocolType, + address_type: AddressType, + sender_info: SenderInfo, + ) -> Option { + self.operate_mut(|_rti, e| { + e.report_sender_info( + LastSenderInfoKey(routing_domain, protocol_type, address_type), + sender_info, + ) + }) + } } diff --git a/veilid-core/src/routing_table/route_spec_store/mod.rs b/veilid-core/src/routing_table/route_spec_store/mod.rs index 4a1bbadb..d2243c78 100644 --- a/veilid-core/src/routing_table/route_spec_store/mod.rs +++ b/veilid-core/src/routing_table/route_spec_store/mod.rs @@ -1122,8 +1122,7 @@ impl RouteSpecStore { // We can optimize the peer info in this safety route if it has been successfully // communicated over either via an outbound test, or used as a private route inbound // and we are replying over the same route as our safety route outbound - let optimize = safety_rssd.get_stats().last_tested_ts.is_some() - || safety_rssd.get_stats().last_received_ts.is_some(); + let optimize = safety_rssd.get_stats().last_known_valid_ts.is_some(); // Get the first hop noderef of the safety route let first_hop = safety_rssd.hop_node_ref(0).unwrap(); @@ -1492,10 +1491,7 @@ impl RouteSpecStore { // See if we can optimize this compilation yet // We don't want to include full nodeinfo if we don't have to - let optimized = optimized.unwrap_or( - rssd.get_stats().last_tested_ts.is_some() - || rssd.get_stats().last_received_ts.is_some(), - ); + let optimized = optimized.unwrap_or(rssd.get_stats().last_known_valid_ts.is_some()); let rsd = rssd .get_route_by_key(key) @@ -1519,10 +1515,7 @@ impl RouteSpecStore { // See if we can optimize this compilation yet // We don't want to include full nodeinfo if we don't have to - let optimized = optimized.unwrap_or( - rssd.get_stats().last_tested_ts.is_some() - || rssd.get_stats().last_received_ts.is_some(), - ); + let optimized = optimized.unwrap_or(rssd.get_stats().last_known_valid_ts.is_some()); let mut out = Vec::new(); for (key, rsd) in rssd.iter_route_set() { @@ -1726,15 +1719,15 @@ impl RouteSpecStore { /// Clear caches when local our local node info changes #[instrument(level = "trace", target = "route", skip(self))] - pub fn reset(&self) { - log_rtab!(debug "flushing route spec store"); + pub fn reset_cache(&self) { + log_rtab!(debug "resetting route cache"); let inner = &mut *self.inner.lock(); - // Clean up local allocated routes + // Clean up local allocated routes (does not delete allocated routes, set republication flag) inner.content.reset_details(); - // Reset private route cache + // Reset private route cache (does not delete imported routes) inner.cache.reset_remote_private_routes(); } @@ -1761,6 +1754,17 @@ impl RouteSpecStore { inner.cache.roll_transfers(last_ts, cur_ts); } + /// Process answer statistics + pub fn roll_answers(&self, cur_ts: Timestamp) { + let inner = &mut *self.inner.lock(); + + // Roll transfers for locally allocated routes + inner.content.roll_answers(cur_ts); + + // Roll transfers for remote private routes + inner.cache.roll_answers(cur_ts); + } + /// Convert private route list to binary blob pub fn private_routes_to_blob(private_routes: &[PrivateRoute]) -> VeilidAPIResult> { let mut buffer = vec![]; @@ -1861,7 +1865,7 @@ impl RouteSpecStore { Ok(RouteId::new(vcrypto.generate_hash(&idbytes).bytes)) } - /// Generate RouteId from set of private routes + /// Generate RouteId from set of private routes fn generate_remote_route_id( &self, private_routes: &[PrivateRoute], diff --git a/veilid-core/src/routing_table/route_spec_store/route_spec_store_cache.rs b/veilid-core/src/routing_table/route_spec_store/route_spec_store_cache.rs index 75bf4e70..89eed378 100644 --- a/veilid-core/src/routing_table/route_spec_store/route_spec_store_cache.rs +++ b/veilid-core/src/routing_table/route_spec_store/route_spec_store_cache.rs @@ -365,6 +365,12 @@ impl RouteSpecStoreCache { v.get_stats_mut().roll_transfers(last_ts, cur_ts); } } + /// Roll answer statistics + pub fn roll_answers(&mut self, cur_ts: Timestamp) { + for (_k, v) in self.remote_private_route_set_cache.iter_mut() { + v.get_stats_mut().roll_answers(cur_ts); + } + } } impl Default for RouteSpecStoreCache { diff --git a/veilid-core/src/routing_table/route_spec_store/route_spec_store_content.rs b/veilid-core/src/routing_table/route_spec_store/route_spec_store_content.rs index 9127f00d..f46b54dd 100644 --- a/veilid-core/src/routing_table/route_spec_store/route_spec_store_content.rs +++ b/veilid-core/src/routing_table/route_spec_store/route_spec_store_content.rs @@ -122,4 +122,10 @@ impl RouteSpecStoreContent { rssd.get_stats_mut().roll_transfers(last_ts, cur_ts); } } + /// Roll answer statistics + pub fn roll_answers(&mut self, cur_ts: Timestamp) { + for rssd in self.details.values_mut() { + rssd.get_stats_mut().roll_answers(cur_ts); + } + } } diff --git a/veilid-core/src/routing_table/route_spec_store/route_stats.rs b/veilid-core/src/routing_table/route_spec_store/route_stats.rs index c224ced0..b3e2387f 100644 --- a/veilid-core/src/routing_table/route_spec_store/route_stats.rs +++ b/veilid-core/src/routing_table/route_spec_store/route_stats.rs @@ -5,30 +5,81 @@ pub(crate) struct RouteStats { /// Consecutive failed to send count #[serde(skip)] pub failed_to_send: u32, - /// Questions lost + /// Consecutive questions that didn't get an answer #[serde(skip)] - pub questions_lost: u32, + pub recent_lost_answers: u32, /// Timestamp of when the route was created pub created_ts: Timestamp, - /// Timestamp of when the route was last checked for validity + /// Timestamp of when the route was last checked for validity or received traffic #[serde(skip)] - pub last_tested_ts: Option, + pub last_known_valid_ts: Option, /// Timestamp of when the route was last sent to #[serde(skip)] pub last_sent_ts: Option, - /// Timestamp of when the route was last received over + /// Timestamp of when the route last received a question or statement #[serde(skip)] - pub last_received_ts: Option, + pub last_rcvd_question_ts: Option, + /// Timestamp of when the route last received an answer + #[serde(skip)] + pub last_rcvd_answer_ts: Option, /// Transfers up and down - pub transfer_stats_down_up: TransferStatsDownUp, + pub transfer: TransferStatsDownUp, /// Latency stats - pub latency_stats: LatencyStats, + pub latency: LatencyStats, + /// Answer stats + pub answer: AnswerStats, /// Accounting mechanism for this route's RPC latency #[serde(skip)] latency_stats_accounting: LatencyStatsAccounting, /// Accounting mechanism for the bandwidth across this route #[serde(skip)] transfer_stats_accounting: TransferStatsAccounting, + /// Accounting mechanism for this route's RPC answers + #[serde(skip)] + answer_stats_accounting: AnswerStatsAccounting, +} + +impl fmt::Display for RouteStats { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "created: {}", self.created_ts)?; + writeln!( + f, + "# recently-lost/failed-to-send: {} / {}", + self.recent_lost_answers, self.failed_to_send + )?; + writeln!( + f, + "last_known_valid: {}", + if let Some(ts) = &self.last_known_valid_ts { + ts.to_string() + } else { + "None".to_owned() + } + )?; + writeln!( + f, + "last_sent: {}", + if let Some(ts) = &self.last_sent_ts { + ts.to_string() + } else { + "None".to_owned() + } + )?; + writeln!( + f, + "last_rcvd_question: {}", + if let Some(ts) = &self.last_rcvd_question_ts { + ts.to_string() + } else { + "None".to_owned() + } + )?; + write!(f, "transfer:\n{}", indent_all_string(&self.transfer))?; + write!(f, "latency: {}", self.latency)?; + write!(f, "answer:\n{}", indent_all_string(&self.answer))?; + + Ok(()) + } } impl RouteStats { @@ -44,16 +95,28 @@ impl RouteStats { self.failed_to_send += 1; } - /// Mark a route as having lost a question - pub fn record_question_lost(&mut self) { - self.questions_lost += 1; + /// Mark a route as having lost an answer + pub fn record_lost_answer(&mut self) { + let cur_ts = Timestamp::now(); + self.recent_lost_answers += 1; + self.answer_stats_accounting.record_lost_answer(cur_ts); } - /// Mark a route as having received something - pub fn record_received(&mut self, cur_ts: Timestamp, bytes: ByteCount) { - self.last_received_ts = Some(cur_ts); - self.last_tested_ts = Some(cur_ts); + /// Mark a route as having received a question or statement + pub fn record_question_received(&mut self, cur_ts: Timestamp, bytes: ByteCount) { + self.last_rcvd_question_ts = Some(cur_ts); + self.last_known_valid_ts = Some(cur_ts); self.transfer_stats_accounting.add_down(bytes); + self.answer_stats_accounting.record_question(cur_ts); + } + + /// Mark a route as having received an answer + pub fn record_answer_received(&mut self, cur_ts: Timestamp, bytes: ByteCount) { + self.last_rcvd_answer_ts = Some(cur_ts); + self.last_known_valid_ts = Some(cur_ts); + self.recent_lost_answers = 0; + self.transfer_stats_accounting.add_down(bytes); + self.answer_stats_accounting.record_answer(cur_ts); } /// Mark a route as having been sent to @@ -67,58 +130,50 @@ impl RouteStats { /// Mark a route as having been sent to pub fn record_latency(&mut self, latency: TimestampDuration) { - self.latency_stats = self.latency_stats_accounting.record_latency(latency); - } - - /// Mark a route as having been tested - pub fn record_tested(&mut self, cur_ts: Timestamp) { - self.last_tested_ts = Some(cur_ts); - - // Reset question_lost and failed_to_send if we test clean - self.failed_to_send = 0; - self.questions_lost = 0; + self.latency = self.latency_stats_accounting.record_latency(latency); } /// Roll transfers for these route stats pub fn roll_transfers(&mut self, last_ts: Timestamp, cur_ts: Timestamp) { - self.transfer_stats_accounting.roll_transfers( - last_ts, - cur_ts, - &mut self.transfer_stats_down_up, - ) + self.transfer_stats_accounting + .roll_transfers(last_ts, cur_ts, &mut self.transfer); + } + pub fn roll_answers(&mut self, cur_ts: Timestamp) { + self.answer = self.answer_stats_accounting.roll_answers(cur_ts); } /// Get the latency stats pub fn latency_stats(&self) -> &LatencyStats { - &self.latency_stats + &self.latency } /// Get the transfer stats #[expect(dead_code)] pub fn transfer_stats(&self) -> &TransferStatsDownUp { - &self.transfer_stats_down_up + &self.transfer } /// Reset stats when network restarts pub fn reset(&mut self) { - self.last_tested_ts = None; + self.last_known_valid_ts = None; self.last_sent_ts = None; - self.last_received_ts = None; + self.last_rcvd_question_ts = None; + self.last_rcvd_answer_ts = None; self.failed_to_send = 0; - self.questions_lost = 0; + self.recent_lost_answers = 0; } /// Check if a route needs testing pub fn needs_testing(&self, cur_ts: Timestamp) -> bool { // Has the route had any failures lately? - if self.questions_lost > 0 || self.failed_to_send > 0 { + if self.recent_lost_answers > 0 || self.failed_to_send > 0 { // If so, always test return true; } // Has the route been tested within the idle time we'd want to check things? // (also if we've received successfully over the route, this will get set) - if let Some(last_tested_ts) = self.last_tested_ts { + if let Some(last_tested_ts) = self.last_known_valid_ts { if cur_ts.saturating_sub(last_tested_ts) > TimestampDuration::new(ROUTE_MIN_IDLE_TIME_MS as u64 * 1000u64) { diff --git a/veilid-core/src/routing_table/routing_table_inner/mod.rs b/veilid-core/src/routing_table/routing_table_inner/mod.rs index e39cc115..03d201e8 100644 --- a/veilid-core/src/routing_table/routing_table_inner/mod.rs +++ b/veilid-core/src/routing_table/routing_table_inner/mod.rs @@ -94,19 +94,6 @@ impl RoutingTableInner { } } - fn with_public_internet_routing_domain_mut(&mut self, f: F) -> R - where - F: FnOnce(&mut PublicInternetRoutingDomainDetail) -> R, - { - f(&mut self.public_internet_routing_domain) - } - fn with_local_network_routing_domain_mut(&mut self, f: F) -> R - where - F: FnOnce(&mut LocalNetworkRoutingDomainDetail) -> R, - { - f(&mut self.local_network_routing_domain) - } - pub fn relay_node(&self, domain: RoutingDomain) -> Option { self.with_routing_domain(domain, |rdd| rdd.relay_node()) } @@ -115,6 +102,17 @@ impl RoutingTableInner { self.with_routing_domain(domain, |rdd| rdd.relay_node_last_keepalive()) } + pub fn set_relay_node_last_keepalive(&mut self, domain: RoutingDomain, ts: Timestamp) { + match domain { + RoutingDomain::PublicInternet => self + .public_internet_routing_domain + .set_relay_node_last_keepalive(Some(ts)), + RoutingDomain::LocalNetwork => self + .local_network_routing_domain + .set_relay_node_last_keepalive(Some(ts)), + }; + } + #[expect(dead_code)] pub fn has_dial_info(&self, domain: RoutingDomain) -> bool { self.with_routing_domain(domain, |rdd| !rdd.dial_info_details().is_empty()) @@ -250,11 +248,6 @@ impl RoutingTableInner { self.with_routing_domain(routing_domain, |rdd| rdd.get_published_peer_info()) } - /// Return if this routing domain has a valid network class - pub fn has_valid_network_class(&self, routing_domain: RoutingDomain) -> bool { - self.with_routing_domain(routing_domain, |rdd| rdd.has_valid_network_class()) - } - /// Return a copy of our node's current peerinfo (may not yet be published) pub fn get_current_peer_info(&self, routing_domain: RoutingDomain) -> Arc { self.with_routing_domain(routing_domain, |rdd| rdd.get_peer_info(self)) @@ -477,6 +470,7 @@ impl RoutingTableInner { None } + // Collect all entries that are 'needs_ping' and have some node info making them reachable somehow pub(super) fn get_nodes_needing_ping( &self, outer_self: RoutingTable, @@ -487,47 +481,107 @@ impl RoutingTableInner { .get_published_peer_info(routing_domain) .map(|pi| pi.signed_node_info().timestamp()); - // Collect all entries that are 'needs_ping' and have some node info making them reachable somehow - let mut node_refs = Vec::::with_capacity(self.bucket_entry_count()); - self.with_entries(cur_ts, BucketEntryState::Unreliable, |rti, entry| { - let entry_needs_ping = |e: &BucketEntryInner| { - // If this entry isn't in the routing domain we are checking, don't include it - if !e.exists_in_routing_domain(rti, routing_domain) { - return false; + let mut filters = VecDeque::new(); + + // Remove our own node from the results + let filter_self = + Box::new(move |_rti: &RoutingTableInner, v: Option>| v.is_some()) + as RoutingTableEntryFilter; + filters.push_back(filter_self); + + let filter_ping = Box::new( + move |rti: &RoutingTableInner, v: Option>| { + let entry = v.unwrap(); + entry.with_inner(|e| { + // If this entry isn't in the routing domain we are checking, don't include it + if !e.exists_in_routing_domain(rti, routing_domain) { + return false; + } + + // If we don't have node status for this node, then we should ping it to get some node status + if e.has_node_info(routing_domain.into()) + && e.node_status(routing_domain).is_none() + { + return true; + } + + // If this entry needs a ping because this node hasn't seen our latest node info, then do it + if opt_own_node_info_ts.is_some() + && !e.has_seen_our_node_info_ts( + routing_domain, + opt_own_node_info_ts.unwrap(), + ) + { + return true; + } + + // If this entry needs need a ping by non-routing-domain-specific metrics then do it + if e.needs_ping(cur_ts) { + return true; + } + + false + }) + }, + ) as RoutingTableEntryFilter; + filters.push_back(filter_ping); + + // Sort by least recently contacted + let compare = |_rti: &RoutingTableInner, + a_entry: &Option>, + b_entry: &Option>| { + // same nodes are always the same + if let Some(a_entry) = a_entry { + if let Some(b_entry) = b_entry { + if Arc::ptr_eq(a_entry, b_entry) { + return core::cmp::Ordering::Equal; + } } - - // If we don't have node status for this node, then we should ping it to get some node status - if e.has_node_info(routing_domain.into()) && e.node_status(routing_domain).is_none() - { - return true; - } - - // If this entry needs a ping because this node hasn't seen our latest node info, then do it - if opt_own_node_info_ts.is_some() - && !e.has_seen_our_node_info_ts(routing_domain, opt_own_node_info_ts.unwrap()) - { - return true; - } - - // If this entry needs need a ping by non-routing-domain-specific metrics then do it - if e.needs_ping(cur_ts) { - return true; - } - - false - }; - - if entry.with_inner(entry_needs_ping) { - node_refs.push(FilteredNodeRef::new( - outer_self.clone(), - entry, - NodeRefFilter::new().with_routing_domain(routing_domain), - Sequencing::default(), - )); + } else if b_entry.is_none() { + return core::cmp::Ordering::Equal; } - Option::<()>::None - }); - node_refs + + // our own node always comes last (should not happen, here for completeness) + if a_entry.is_none() { + return core::cmp::Ordering::Greater; + } + if b_entry.is_none() { + return core::cmp::Ordering::Less; + } + // Sort by least recently contacted regardless of reliability + // If something needs a ping it should get it in the order of need + let ae = a_entry.as_ref().unwrap(); + let be = b_entry.as_ref().unwrap(); + ae.with_inner(|ae| { + be.with_inner(|be| { + let ca = ae + .peer_stats() + .rpc_stats + .last_question_ts + .unwrap_or(Timestamp::new(0)) + .as_u64(); + let cb = be + .peer_stats() + .rpc_stats + .last_question_ts + .unwrap_or(Timestamp::new(0)) + .as_u64(); + + ca.cmp(&cb) + }) + }) + }; + + let transform = |_rti: &RoutingTableInner, v: Option>| { + FilteredNodeRef::new( + outer_self.clone(), + v.unwrap().clone(), + NodeRefFilter::new().with_routing_domain(routing_domain), + Sequencing::default(), + ) + }; + + self.find_peers_with_sort_and_filter(usize::MAX, cur_ts, filters, compare, transform) } #[expect(dead_code)] @@ -891,20 +945,16 @@ impl RoutingTableInner { } // Public internet routing domain is ready for app use, - // when we have proper dialinfo/networkclass - let public_internet_ready = !matches!( - self.get_network_class(RoutingDomain::PublicInternet) - .unwrap_or_default(), - NetworkClass::Invalid - ); + // when we have proper dialinfo/networkclass and it is published + let public_internet_ready = self + .get_published_peer_info(RoutingDomain::PublicInternet) + .is_some(); // Local internet routing domain is ready for app use - // when we have proper dialinfo/networkclass - let local_network_ready = !matches!( - self.get_network_class(RoutingDomain::LocalNetwork) - .unwrap_or_default(), - NetworkClass::Invalid - ); + // when we have proper dialinfo/networkclass and it is published + let local_network_ready = self + .get_published_peer_info(RoutingDomain::LocalNetwork) + .is_some(); let live_entry_counts = self.cached_entry_counts(); @@ -1017,7 +1067,7 @@ impl RoutingTableInner { &'b Option>, &'b Option>, ) -> core::cmp::Ordering, - T: for<'r, 't> FnMut(&'r RoutingTableInner, Option>) -> O, + T: for<'r> FnMut(&'r RoutingTableInner, Option>) -> O, { // collect all the nodes for sorting let mut nodes = diff --git a/veilid-core/src/routing_table/routing_table_inner/routing_domains/editor.rs b/veilid-core/src/routing_table/routing_table_inner/routing_domains/editor.rs index 060ff1af..1544d2f9 100644 --- a/veilid-core/src/routing_table/routing_table_inner/routing_domains/editor.rs +++ b/veilid-core/src/routing_table/routing_table_inner/routing_domains/editor.rs @@ -6,9 +6,7 @@ pub trait RoutingDomainEditorCommonTrait { address_type: Option, protocol_type: Option, ) -> &mut Self; - fn clear_relay_node(&mut self) -> &mut Self; - fn set_relay_node(&mut self, relay_node: NodeRef) -> &mut Self; - fn set_relay_node_keepalive(&mut self, ts: Option) -> &mut Self; + fn set_relay_node(&mut self, relay_node: Option) -> &mut Self; #[cfg_attr(target_arch = "wasm32", expect(dead_code))] fn add_dial_info(&mut self, dial_info: DialInfo, class: DialInfoClass) -> &mut Self; fn setup_network( @@ -18,7 +16,6 @@ pub trait RoutingDomainEditorCommonTrait { address_types: AddressTypeSet, capabilities: Vec, ) -> &mut Self; - fn set_network_class(&mut self, network_class: Option) -> &mut Self; fn commit(&mut self, pause_tasks: bool) -> SendPinBoxFutureLifetime<'_, bool>; fn shutdown(&mut self) -> SendPinBoxFutureLifetime<'_, ()>; fn publish(&mut self); @@ -41,17 +38,10 @@ impl RoutingDomainDetailApplyCommonChange .clear_dial_info_details(address_type, protocol_type); } - RoutingDomainChangeCommon::ClearRelayNode => { - self.common_mut().set_relay_node(None); - } - RoutingDomainChangeCommon::SetRelayNode { relay_node } => { - self.common_mut().set_relay_node(Some(relay_node.clone())) + self.common_mut().set_relay_node(relay_node) } - RoutingDomainChangeCommon::SetRelayNodeKeepalive { ts } => { - self.common_mut().set_relay_node_last_keepalive(ts); - } RoutingDomainChangeCommon::AddDialInfo { dial_info_detail } => { if !self.ensure_dial_info_is_valid(&dial_info_detail.dial_info) { return; @@ -77,9 +67,6 @@ impl RoutingDomainDetailApplyCommonChange capabilities.clone(), ); } - RoutingDomainChangeCommon::SetNetworkClass { network_class } => { - self.common_mut().set_network_class(network_class); - } } } } @@ -90,12 +77,8 @@ pub(super) enum RoutingDomainChangeCommon { address_type: Option, protocol_type: Option, }, - ClearRelayNode, SetRelayNode { - relay_node: NodeRef, - }, - SetRelayNodeKeepalive { - ts: Option, + relay_node: Option, }, AddDialInfo { dial_info_detail: DialInfoDetail, @@ -110,7 +93,4 @@ pub(super) enum RoutingDomainChangeCommon { address_types: AddressTypeSet, capabilities: Vec, }, - SetNetworkClass { - network_class: Option, - }, } diff --git a/veilid-core/src/routing_table/routing_table_inner/routing_domains/local_network/editor.rs b/veilid-core/src/routing_table/routing_table_inner/routing_domains/local_network/editor.rs index 4b8de70f..aa5b67bf 100644 --- a/veilid-core/src/routing_table/routing_table_inner/routing_domains/local_network/editor.rs +++ b/veilid-core/src/routing_table/routing_table_inner/routing_domains/local_network/editor.rs @@ -47,27 +47,13 @@ impl RoutingDomainEditorCommonTrait for RoutingDomainEditorLocalNetwork { self } #[instrument(level = "debug", skip(self))] - fn clear_relay_node(&mut self) -> &mut Self { - self.changes.push(RoutingDomainChangeLocalNetwork::Common( - RoutingDomainChangeCommon::ClearRelayNode, - )); - self - } - #[instrument(level = "debug", skip(self))] - fn set_relay_node(&mut self, relay_node: NodeRef) -> &mut Self { + fn set_relay_node(&mut self, relay_node: Option) -> &mut Self { self.changes.push(RoutingDomainChangeLocalNetwork::Common( RoutingDomainChangeCommon::SetRelayNode { relay_node }, )); self } #[instrument(level = "debug", skip(self))] - fn set_relay_node_keepalive(&mut self, ts: Option) -> &mut Self { - self.changes.push(RoutingDomainChangeLocalNetwork::Common( - RoutingDomainChangeCommon::SetRelayNodeKeepalive { ts }, - )); - self - } - #[instrument(level = "debug", skip(self))] fn add_dial_info(&mut self, dial_info: DialInfo, class: DialInfoClass) -> &mut Self { self.changes.push(RoutingDomainChangeLocalNetwork::Common( RoutingDomainChangeCommon::AddDialInfo { @@ -116,14 +102,6 @@ impl RoutingDomainEditorCommonTrait for RoutingDomainEditorLocalNetwork { self } - #[instrument(level = "debug", skip(self))] - fn set_network_class(&mut self, network_class: Option) -> &mut Self { - self.changes.push(RoutingDomainChangeLocalNetwork::Common( - RoutingDomainChangeCommon::SetNetworkClass { network_class }, - )); - self - } - #[instrument(level = "debug", skip(self))] fn commit(&mut self, pause_tasks: bool) -> SendPinBoxFutureLifetime<'_, bool> { Box::pin(async move { @@ -140,107 +118,125 @@ impl RoutingDomainEditorCommonTrait for RoutingDomainEditorLocalNetwork { // Apply changes let mut peer_info_changed = false; + { + let mut rti_lock = self.routing_table.inner.write(); + let rti = &mut rti_lock; + let detail = &mut rti.local_network_routing_domain; + { + let old_dial_info_details = detail.dial_info_details().clone(); + let old_relay_node = detail.relay_node(); + let old_outbound_protocols = detail.outbound_protocols(); + let old_inbound_protocols = detail.inbound_protocols(); + let old_address_types = detail.address_types(); + let old_capabilities = detail.capabilities(); + let old_network_class = detail.network_class(); - let mut rti_lock = self.routing_table.inner.write(); - let rti = &mut rti_lock; - rti.with_local_network_routing_domain_mut(|detail| { - let old_dial_info_details = detail.dial_info_details().clone(); - let old_relay_node = detail.relay_node(); - let old_outbound_protocols = detail.outbound_protocols(); - let old_inbound_protocols = detail.inbound_protocols(); - let old_address_types = detail.address_types(); - let old_capabilities = detail.capabilities(); - let old_network_class = detail.network_class(); - - for change in self.changes.drain(..) { - match change { - RoutingDomainChangeLocalNetwork::Common(common_change) => { - detail.apply_common_change(common_change); - } - RoutingDomainChangeLocalNetwork::SetLocalNetworks { local_networks } => { - detail.set_local_networks(local_networks); + for change in self.changes.drain(..) { + match change { + RoutingDomainChangeLocalNetwork::Common(common_change) => { + detail.apply_common_change(common_change); + } + RoutingDomainChangeLocalNetwork::SetLocalNetworks { + local_networks, + } => { + detail.set_local_networks(local_networks); + } } } - } - let new_dial_info_details = detail.dial_info_details().clone(); - let new_relay_node = detail.relay_node(); - let new_outbound_protocols = detail.outbound_protocols(); - let new_inbound_protocols = detail.inbound_protocols(); - let new_address_types = detail.address_types(); - let new_capabilities = detail.capabilities(); - let new_network_class = detail.network_class(); + let new_dial_info_details = detail.dial_info_details().clone(); + let new_relay_node = detail.relay_node(); + let new_outbound_protocols = detail.outbound_protocols(); + let new_inbound_protocols = detail.inbound_protocols(); + let new_address_types = detail.address_types(); + let new_capabilities = detail.capabilities(); + let new_network_class = detail.network_class(); - // Compare and see if peerinfo needs republication - let removed_dial_info = old_dial_info_details - .iter() - .filter(|di| !new_dial_info_details.contains(di)) - .collect::>(); - if !removed_dial_info.is_empty() { - info!("[LocalNetwork] removed dial info: {:#?}", removed_dial_info); - peer_info_changed = true; - } - let added_dial_info = new_dial_info_details - .iter() - .filter(|di| !old_dial_info_details.contains(di)) - .collect::>(); - if !added_dial_info.is_empty() { - info!("[LocalNetwork] added dial info: {:#?}", added_dial_info); - peer_info_changed = true; - } - if let Some(nrn) = new_relay_node { - if let Some(orn) = old_relay_node { - if !nrn.same_entry(&orn) { - info!("[LocalNetwork] change relay: {} -> {}", orn, nrn); + // Compare and see if peerinfo needs republication + let removed_dial_info = old_dial_info_details + .iter() + .filter(|di| !new_dial_info_details.contains(di)) + .collect::>(); + if !removed_dial_info.is_empty() { + info!( + "[LocalNetwork] removed dial info:\n{}", + indent_all_string(&removed_dial_info.to_multiline_string()) + ); + peer_info_changed = true; + } + let added_dial_info = new_dial_info_details + .iter() + .filter(|di| !old_dial_info_details.contains(di)) + .collect::>(); + if !added_dial_info.is_empty() { + info!( + "[LocalNetwork] added dial info:\n{}", + indent_all_string(&added_dial_info.to_multiline_string()) + ); + peer_info_changed = true; + } + if let Some(nrn) = new_relay_node { + if let Some(orn) = old_relay_node { + if !nrn.same_entry(&orn) { + info!("[LocalNetwork] change relay: {} -> {}", orn, nrn); + peer_info_changed = true; + } + } else { + info!("[LocalNetwork] set relay: {}", nrn); peer_info_changed = true; } - } else { - info!("[LocalNetwork] set relay: {}", nrn); + } + if old_outbound_protocols != new_outbound_protocols { + info!( + "[LocalNetwork] changed network: outbound {:?}->{:?}\n", + old_outbound_protocols, new_outbound_protocols + ); + peer_info_changed = true; + } + if old_inbound_protocols != new_inbound_protocols { + info!( + "[LocalNetwork] changed network: inbound {:?}->{:?}\n", + old_inbound_protocols, new_inbound_protocols, + ); + peer_info_changed = true; + } + if old_address_types != new_address_types { + info!( + "[LocalNetwork] changed network: address types {:?}->{:?}\n", + old_address_types, new_address_types, + ); + peer_info_changed = true; + } + if old_capabilities != new_capabilities { + info!( + "[PublicInternet] changed network: capabilities {:?}->{:?}\n", + old_capabilities, new_capabilities + ); + peer_info_changed = true; + } + if old_network_class != new_network_class { + info!( + "[LocalNetwork] changed network class: {:?}->{:?}\n", + old_network_class, new_network_class + ); peer_info_changed = true; } } - if old_outbound_protocols != new_outbound_protocols { - info!( - "[LocalNetwork] changed network: outbound {:?}->{:?}\n", - old_outbound_protocols, new_outbound_protocols - ); - peer_info_changed = true; - } - if old_inbound_protocols != new_inbound_protocols { - info!( - "[LocalNetwork] changed network: inbound {:?}->{:?}\n", - old_inbound_protocols, new_inbound_protocols, - ); - peer_info_changed = true; - } - if old_address_types != new_address_types { - info!( - "[LocalNetwork] changed network: address types {:?}->{:?}\n", - old_address_types, new_address_types, - ); - peer_info_changed = true; - } - if old_capabilities != new_capabilities { - info!( - "[PublicInternet] changed network: capabilities {:?}->{:?}\n", - old_capabilities, new_capabilities - ); - peer_info_changed = true; - } - if old_network_class != new_network_class { - info!( - "[LocalNetwork] changed network class: {:?}->{:?}\n", - old_network_class, new_network_class - ); - peer_info_changed = true; - } - }); - if peer_info_changed { - // Allow signed node info updates at same timestamp for otherwise dead nodes if our network has changed - rti.reset_all_updated_since_last_network_change(); + if peer_info_changed { + // Allow signed node info updates at same timestamp for otherwise dead nodes if our network has changed + rti.reset_all_updated_since_last_network_change(); + } } + // Operations that require an unlocked routing table go here + if peer_info_changed { + // Update protections + self.routing_table + .network_manager() + .connection_manager() + .update_protections(); + } peer_info_changed }) } @@ -257,8 +253,7 @@ impl RoutingDomainEditorCommonTrait for RoutingDomainEditorLocalNetwork { fn shutdown(&mut self) -> SendPinBoxFutureLifetime<'_, ()> { Box::pin(async move { self.clear_dial_info_details(None, None) - .set_network_class(None) - .clear_relay_node() + .set_relay_node(None) .commit(true) .await; self.routing_table diff --git a/veilid-core/src/routing_table/routing_table_inner/routing_domains/local_network/mod.rs b/veilid-core/src/routing_table/routing_table_inner/routing_domains/local_network/mod.rs index c440c666..d4162922 100644 --- a/veilid-core/src/routing_table/routing_table_inner/routing_domains/local_network/mod.rs +++ b/veilid-core/src/routing_table/routing_table_inner/routing_domains/local_network/mod.rs @@ -70,6 +70,9 @@ impl RoutingDomainDetail for LocalNetworkRoutingDomainDetail { fn capabilities(&self) -> Vec { self.common.capabilities() } + fn requires_relay(&self) -> Option { + self.common.requires_relay() + } fn relay_node(&self) -> Option { self.common.relay_node() } @@ -79,10 +82,6 @@ impl RoutingDomainDetail for LocalNetworkRoutingDomainDetail { fn dial_info_details(&self) -> &Vec { self.common.dial_info_details() } - fn has_valid_network_class(&self) -> bool { - self.common.has_valid_network_class() - } - fn inbound_dial_info_filter(&self) -> DialInfoFilter { self.common.inbound_dial_info_filter() } @@ -113,33 +112,41 @@ impl RoutingDomainDetail for LocalNetworkRoutingDomainDetail { } fn publish_peer_info(&self, rti: &RoutingTableInner) -> bool { - let pi = self.get_peer_info(rti); + let peer_info = { + let pi = self.get_peer_info(rti); - // If the network class is not yet determined, don't publish - if pi.signed_node_info().node_info().network_class() == NetworkClass::Invalid { - log_rtab!(debug "[LocalNetwork] Not publishing peer info with invalid network class"); - return false; - } - - // If we need a relay and we don't have one, don't publish yet - if let Some(_relay_kind) = pi.signed_node_info().node_info().requires_relay() { - if pi.signed_node_info().relay_ids().is_empty() { - log_rtab!(debug "[LocalNetwork] Not publishing peer info that wants relay until we have a relay"); + // If the network class is not yet determined, don't publish + if pi.signed_node_info().node_info().network_class() == NetworkClass::Invalid { + log_rtab!(debug "[LocalNetwork] Not publishing peer info with invalid network class"); return false; } - } - // Don't publish if the peer info hasnt changed from our previous publication - let mut ppi_lock = self.published_peer_info.lock(); - if let Some(old_peer_info) = &*ppi_lock { - if pi.equivalent(old_peer_info) { - log_rtab!(debug "[LocalNetwork] Not publishing peer info because it is equivalent"); - return false; + // If we need a relay and we don't have one, don't publish yet + if let Some(_relay_kind) = self.requires_relay() { + if pi.signed_node_info().relay_ids().is_empty() { + log_rtab!(debug "[LocalNetwork] Not publishing peer info that wants relay until we have a relay"); + return false; + } } - } - log_rtab!(debug "[LocalNetwork] Published new peer info: {:#?}", pi); - *ppi_lock = Some(pi); + // Don't publish if the peer info hasnt changed from our previous publication + let mut ppi_lock = self.published_peer_info.lock(); + if let Some(old_peer_info) = &*ppi_lock { + if pi.equivalent(old_peer_info) { + log_rtab!(debug "[LocalNetwork] Not publishing peer info because it is equivalent"); + return false; + } + } + + log_rtab!(debug "[LocalNetwork] Published new peer info: {}", pi); + *ppi_lock = Some(pi.clone()); + + pi + }; + + rti.unlocked_inner + .network_manager() + .report_peer_info_change(peer_info); true } @@ -200,4 +207,8 @@ impl RoutingDomainDetail for LocalNetworkRoutingDomainDetail { ContactMethod::Unreachable } + + fn set_relay_node_last_keepalive(&mut self, ts: Option) { + self.common.set_relay_node_last_keepalive(ts); + } } diff --git a/veilid-core/src/routing_table/routing_table_inner/routing_domains/mod.rs b/veilid-core/src/routing_table/routing_table_inner/routing_domains/mod.rs index 7447fbcc..4266f019 100644 --- a/veilid-core/src/routing_table/routing_table_inner/routing_domains/mod.rs +++ b/veilid-core/src/routing_table/routing_table_inner/routing_domains/mod.rs @@ -18,10 +18,10 @@ pub(crate) trait RoutingDomainDetail { fn inbound_protocols(&self) -> ProtocolTypeSet; fn address_types(&self) -> AddressTypeSet; fn capabilities(&self) -> Vec; + fn requires_relay(&self) -> Option; fn relay_node(&self) -> Option; fn relay_node_last_keepalive(&self) -> Option; fn dial_info_details(&self) -> &Vec; - fn has_valid_network_class(&self) -> bool; fn get_published_peer_info(&self) -> Option>; fn inbound_dial_info_filter(&self) -> DialInfoFilter; fn outbound_dial_info_filter(&self) -> DialInfoFilter; @@ -49,6 +49,9 @@ pub(crate) trait RoutingDomainDetail { sequencing: Sequencing, dif_sort: Option>, ) -> ContactMethod; + + // Set last relay keepalive time + fn set_relay_node_last_keepalive(&mut self, ts: Option); } trait RoutingDomainDetailCommonAccessors: RoutingDomainDetail { @@ -109,31 +112,29 @@ fn first_filtered_dial_info_detail_between_nodes( #[derive(Debug)] struct RoutingDomainDetailCommon { routing_domain: RoutingDomain, - network_class: Option, outbound_protocols: ProtocolTypeSet, inbound_protocols: ProtocolTypeSet, address_types: AddressTypeSet, relay_node: Option, - relay_node_last_keepalive: Option, capabilities: Vec, dial_info_details: Vec, // caches cached_peer_info: Mutex>>, + relay_node_last_keepalive: Option, } impl RoutingDomainDetailCommon { pub fn new(routing_domain: RoutingDomain) -> Self { Self { routing_domain, - network_class: Default::default(), outbound_protocols: Default::default(), inbound_protocols: Default::default(), address_types: Default::default(), relay_node: Default::default(), - relay_node_last_keepalive: Default::default(), capabilities: Default::default(), dial_info_details: Default::default(), cached_peer_info: Mutex::new(Default::default()), + relay_node_last_keepalive: Default::default(), } } @@ -141,7 +142,24 @@ impl RoutingDomainDetailCommon { // Accessors pub fn network_class(&self) -> Option { - self.network_class + cfg_if! { + if #[cfg(target_arch = "wasm32")] { + Some(NetworkClass::WebApp) + } else { + if self.address_types.is_empty() { + None + } + else if self.dial_info_details.is_empty() { + if self.relay_node.is_none() { + None + } else { + Some(NetworkClass::OutboundOnly) + } + } else { + Some(NetworkClass::InboundCapable) + } + } + } } pub fn outbound_protocols(&self) -> ProtocolTypeSet { @@ -160,6 +178,37 @@ impl RoutingDomainDetailCommon { self.capabilities.clone() } + pub fn requires_relay(&self) -> Option { + match self.network_class()? { + NetworkClass::InboundCapable => { + let mut all_inbound_set: HashSet<(ProtocolType, AddressType)> = HashSet::new(); + for p in self.inbound_protocols { + for a in self.address_types { + all_inbound_set.insert((p, a)); + } + } + for did in &self.dial_info_details { + if did.class.requires_relay() { + return Some(RelayKind::Inbound); + } + let ib = (did.dial_info.protocol_type(), did.dial_info.address_type()); + all_inbound_set.remove(&ib); + } + if !all_inbound_set.is_empty() { + return Some(RelayKind::Inbound); + } + } + NetworkClass::OutboundOnly => { + return Some(RelayKind::Inbound); + } + NetworkClass::WebApp => { + return Some(RelayKind::Outbound); + } + NetworkClass::Invalid => {} + } + None + } + pub fn relay_node(&self) -> Option { self.relay_node.as_ref().map(|nr| { nr.custom_filtered(NodeRefFilter::new().with_routing_domain(self.routing_domain)) @@ -174,10 +223,6 @@ impl RoutingDomainDetailCommon { &self.dial_info_details } - pub fn has_valid_network_class(&self) -> bool { - self.network_class.unwrap_or(NetworkClass::Invalid) != NetworkClass::Invalid - } - pub fn inbound_dial_info_filter(&self) -> DialInfoFilter { DialInfoFilter::all() .with_protocol_type_set(self.inbound_protocols) @@ -219,19 +264,11 @@ impl RoutingDomainDetailCommon { self.clear_cache(); } - fn set_network_class(&mut self, network_class: Option) { - self.network_class = network_class; - self.clear_cache(); - } - fn set_relay_node(&mut self, opt_relay_node: Option) { self.relay_node = opt_relay_node; self.relay_node_last_keepalive = None; self.clear_cache(); } - fn set_relay_node_last_keepalive(&mut self, ts: Option) { - self.relay_node_last_keepalive = ts; - } fn clear_dial_info_details( &mut self, @@ -267,12 +304,16 @@ impl RoutingDomainDetailCommon { // self.clear_cache(); // } + fn set_relay_node_last_keepalive(&mut self, ts: Option) { + self.relay_node_last_keepalive = ts; + } + ////////////////////////////////////////////////////////////////////////////// // Internal functions fn make_peer_info(&self, rti: &RoutingTableInner) -> PeerInfo { let node_info = NodeInfo::new( - self.network_class.unwrap_or(NetworkClass::Invalid), + self.network_class().unwrap_or(NetworkClass::Invalid), self.outbound_protocols, self.address_types, VALID_ENVELOPE_VERSIONS.to_vec(), diff --git a/veilid-core/src/routing_table/routing_table_inner/routing_domains/public_internet/editor.rs b/veilid-core/src/routing_table/routing_table_inner/routing_domains/public_internet/editor.rs index a40bb999..1f299430 100644 --- a/veilid-core/src/routing_table/routing_table_inner/routing_domains/public_internet/editor.rs +++ b/veilid-core/src/routing_table/routing_table_inner/routing_domains/public_internet/editor.rs @@ -17,6 +17,28 @@ impl RoutingDomainEditorPublicInternet { changes: Vec::new(), } } + + fn sanitize(&self, detail: &mut PublicInternetRoutingDomainDetail) { + // Get the best dial info for each protocol type and address + let mut best_dids: HashMap<(ProtocolType, Address), DialInfoDetail> = HashMap::new(); + for did in detail.common.dial_info_details() { + let didkey = (did.dial_info.protocol_type(), did.dial_info.address()); + best_dids + .entry(didkey) + .and_modify(|e| { + if did.class < e.class { + *e = did.clone(); + } + }) + .or_insert(did.clone()); + } + + // Remove all but the best dial info for each protocol type, address type, and address + detail.common.clear_dial_info_details(None, None); + for did in best_dids.into_values() { + detail.common.add_dial_info_detail(did); + } + } } impl RoutingDomainEditorCommonTrait for RoutingDomainEditorPublicInternet { @@ -36,27 +58,13 @@ impl RoutingDomainEditorCommonTrait for RoutingDomainEditorPublicInternet { self } #[instrument(level = "debug", skip(self))] - fn clear_relay_node(&mut self) -> &mut Self { - self.changes.push(RoutingDomainChangePublicInternet::Common( - RoutingDomainChangeCommon::ClearRelayNode, - )); - self - } - #[instrument(level = "debug", skip(self))] - fn set_relay_node(&mut self, relay_node: NodeRef) -> &mut Self { + fn set_relay_node(&mut self, relay_node: Option) -> &mut Self { self.changes.push(RoutingDomainChangePublicInternet::Common( RoutingDomainChangeCommon::SetRelayNode { relay_node }, )); self } #[instrument(level = "debug", skip(self))] - fn set_relay_node_keepalive(&mut self, ts: Option) -> &mut Self { - self.changes.push(RoutingDomainChangePublicInternet::Common( - RoutingDomainChangeCommon::SetRelayNodeKeepalive { ts }, - )); - self - } - #[instrument(level = "debug", skip(self))] fn add_dial_info(&mut self, dial_info: DialInfo, class: DialInfoClass) -> &mut Self { self.changes.push(RoutingDomainChangePublicInternet::Common( RoutingDomainChangeCommon::AddDialInfo { @@ -105,14 +113,6 @@ impl RoutingDomainEditorCommonTrait for RoutingDomainEditorPublicInternet { self } - #[instrument(level = "debug", skip(self))] - fn set_network_class(&mut self, network_class: Option) -> &mut Self { - self.changes.push(RoutingDomainChangePublicInternet::Common( - RoutingDomainChangeCommon::SetNetworkClass { network_class }, - )); - self - } - #[instrument(level = "debug", skip(self))] fn commit(&mut self, pause_tasks: bool) -> SendPinBoxFutureLifetime<'_, bool> { Box::pin(async move { @@ -129,105 +129,122 @@ impl RoutingDomainEditorCommonTrait for RoutingDomainEditorPublicInternet { // Apply changes let mut peer_info_changed = false; + { + let mut rti_lock = self.routing_table.inner.write(); + let rti = &mut rti_lock; + let detail = &mut rti.public_internet_routing_domain; + { + let old_dial_info_details = detail.dial_info_details().clone(); + let old_relay_node = detail.relay_node(); + let old_outbound_protocols = detail.outbound_protocols(); + let old_inbound_protocols = detail.inbound_protocols(); + let old_address_types = detail.address_types(); + let old_capabilities = detail.capabilities(); + let old_network_class = detail.network_class(); - let mut rti_lock = self.routing_table.inner.write(); - let rti = &mut rti_lock; - rti.with_public_internet_routing_domain_mut(|detail| { - let old_dial_info_details = detail.dial_info_details().clone(); - let old_relay_node = detail.relay_node(); - let old_outbound_protocols = detail.outbound_protocols(); - let old_inbound_protocols = detail.inbound_protocols(); - let old_address_types = detail.address_types(); - let old_capabilities = detail.capabilities(); - let old_network_class = detail.network_class(); - - for change in self.changes.drain(..) { - match change { - RoutingDomainChangePublicInternet::Common(common_change) => { - detail.apply_common_change(common_change); + for change in self.changes.drain(..) { + match change { + RoutingDomainChangePublicInternet::Common(common_change) => { + detail.apply_common_change(common_change); + } } } - } - let new_dial_info_details = detail.dial_info_details().clone(); - let new_relay_node = detail.relay_node(); - let new_outbound_protocols = detail.outbound_protocols(); - let new_inbound_protocols = detail.inbound_protocols(); - let new_address_types = detail.address_types(); - let new_capabilities = detail.capabilities(); - let new_network_class = detail.network_class(); + // Sanitize peer info + self.sanitize(detail); - // Compare and see if peerinfo needs republication - let removed_dial_info = old_dial_info_details - .iter() - .filter(|di| !new_dial_info_details.contains(di)) - .collect::>(); - if !removed_dial_info.is_empty() { - info!( - "[PublicInternet] removed dial info: {:#?}", - removed_dial_info - ); - peer_info_changed = true; - } - let added_dial_info = new_dial_info_details - .iter() - .filter(|di| !old_dial_info_details.contains(di)) - .collect::>(); - if !added_dial_info.is_empty() { - info!("[PublicInternet] added dial info: {:#?}", added_dial_info); - peer_info_changed = true; - } - if let Some(nrn) = new_relay_node { - if let Some(orn) = old_relay_node { - if !nrn.same_entry(&orn) { - info!("[PublicInternet] change relay: {} -> {}", orn, nrn); + let new_dial_info_details = detail.dial_info_details().clone(); + let new_relay_node = detail.relay_node(); + let new_outbound_protocols = detail.outbound_protocols(); + let new_inbound_protocols = detail.inbound_protocols(); + let new_address_types = detail.address_types(); + let new_capabilities = detail.capabilities(); + let new_network_class = detail.network_class(); + + // Compare and see if peerinfo needs republication + let removed_dial_info = old_dial_info_details + .iter() + .filter(|di| !new_dial_info_details.contains(di)) + .collect::>(); + if !removed_dial_info.is_empty() { + info!( + "[PublicInternet] removed dial info:\n{}", + indent_all_string(&removed_dial_info.to_multiline_string()) + ); + peer_info_changed = true; + } + let added_dial_info = new_dial_info_details + .iter() + .filter(|di| !old_dial_info_details.contains(di)) + .collect::>(); + if !added_dial_info.is_empty() { + info!( + "[PublicInternet] added dial info:\n{}", + indent_all_string(&added_dial_info.to_multiline_string()) + ); + peer_info_changed = true; + } + if let Some(nrn) = new_relay_node { + if let Some(orn) = old_relay_node { + if !nrn.same_entry(&orn) { + info!("[PublicInternet] change relay: {} -> {}", orn, nrn); + peer_info_changed = true; + } + } else { + info!("[PublicInternet] set relay: {}", nrn); peer_info_changed = true; } - } else { - info!("[PublicInternet] set relay: {}", nrn); + } + if old_outbound_protocols != new_outbound_protocols { + info!( + "[PublicInternet] changed network: outbound {:?}->{:?}\n", + old_outbound_protocols, new_outbound_protocols + ); + peer_info_changed = true; + } + if old_inbound_protocols != new_inbound_protocols { + info!( + "[PublicInternet] changed network: inbound {:?}->{:?}\n", + old_inbound_protocols, new_inbound_protocols, + ); + peer_info_changed = true; + } + if old_address_types != new_address_types { + info!( + "[PublicInternet] changed network: address types {:?}->{:?}\n", + old_address_types, new_address_types, + ); + peer_info_changed = true; + } + if old_capabilities != new_capabilities { + info!( + "[PublicInternet] changed network: capabilities {:?}->{:?}\n", + old_capabilities, new_capabilities + ); + peer_info_changed = true; + } + if old_network_class != new_network_class { + info!( + "[PublicInternet] changed network class: {:?}->{:?}\n", + old_network_class, new_network_class + ); peer_info_changed = true; } } - if old_outbound_protocols != new_outbound_protocols { - info!( - "[PublicInternet] changed network: outbound {:?}->{:?}\n", - old_outbound_protocols, new_outbound_protocols - ); - peer_info_changed = true; - } - if old_inbound_protocols != new_inbound_protocols { - info!( - "[PublicInternet] changed network: inbound {:?}->{:?}\n", - old_inbound_protocols, new_inbound_protocols, - ); - peer_info_changed = true; - } - if old_address_types != new_address_types { - info!( - "[PublicInternet] changed network: address types {:?}->{:?}\n", - old_address_types, new_address_types, - ); - peer_info_changed = true; - } - if old_capabilities != new_capabilities { - info!( - "[PublicInternet] changed network: capabilities {:?}->{:?}\n", - old_capabilities, new_capabilities - ); - peer_info_changed = true; - } - if old_network_class != new_network_class { - info!( - "[PublicInternet] changed network class: {:?}->{:?}\n", - old_network_class, new_network_class - ); - peer_info_changed = true; - } - }); + if peer_info_changed { + // Allow signed node info updates at same timestamp for otherwise dead nodes if our network has changed + rti.reset_all_updated_since_last_network_change(); + } + } + + // Operations that require an unlocked routing table go here if peer_info_changed { - // Allow signed node info updates at same timestamp for otherwise dead nodes if our network has changed - rti.reset_all_updated_since_last_network_change(); + // Update protections + self.routing_table + .network_manager() + .connection_manager() + .update_protections(); } peer_info_changed @@ -242,10 +259,10 @@ impl RoutingDomainEditorCommonTrait for RoutingDomainEditorPublicInternet { .write() .publish_peer_info(RoutingDomain::PublicInternet); - // Clear the routespecstore cache if our PublicInternet dial info has changed if changed { + // Clear the routespecstore cache if our PublicInternet dial info has changed let rss = self.routing_table.route_spec_store(); - rss.reset(); + rss.reset_cache(); } } @@ -253,8 +270,7 @@ impl RoutingDomainEditorCommonTrait for RoutingDomainEditorPublicInternet { fn shutdown(&mut self) -> SendPinBoxFutureLifetime<'_, ()> { Box::pin(async move { self.clear_dial_info_details(None, None) - .set_network_class(None) - .clear_relay_node() + .set_relay_node(None) .commit(true) .await; self.routing_table diff --git a/veilid-core/src/routing_table/routing_table_inner/routing_domains/public_internet/mod.rs b/veilid-core/src/routing_table/routing_table_inner/routing_domains/public_internet/mod.rs index f8691f7d..31b1e849 100644 --- a/veilid-core/src/routing_table/routing_table_inner/routing_domains/public_internet/mod.rs +++ b/veilid-core/src/routing_table/routing_table_inner/routing_domains/public_internet/mod.rs @@ -51,6 +51,9 @@ impl RoutingDomainDetail for PublicInternetRoutingDomainDetail { fn capabilities(&self) -> Vec { self.common.capabilities() } + fn requires_relay(&self) -> Option { + self.common.requires_relay() + } fn relay_node(&self) -> Option { self.common.relay_node() } @@ -60,9 +63,6 @@ impl RoutingDomainDetail for PublicInternetRoutingDomainDetail { fn dial_info_details(&self) -> &Vec { self.common.dial_info_details() } - fn has_valid_network_class(&self) -> bool { - self.common.has_valid_network_class() - } fn inbound_dial_info_filter(&self) -> DialInfoFilter { self.common.inbound_dial_info_filter() @@ -90,33 +90,41 @@ impl RoutingDomainDetail for PublicInternetRoutingDomainDetail { } fn publish_peer_info(&self, rti: &RoutingTableInner) -> bool { - let pi = self.get_peer_info(rti); + let peer_info = { + let pi = self.get_peer_info(rti); - // If the network class is not yet determined, don't publish - if pi.signed_node_info().node_info().network_class() == NetworkClass::Invalid { - log_rtab!(debug "[PublicInternet] Not publishing peer info with invalid network class"); - return false; - } - - // If we need a relay and we don't have one, don't publish yet - if let Some(_relay_kind) = pi.signed_node_info().node_info().requires_relay() { - if pi.signed_node_info().relay_ids().is_empty() { - log_rtab!(debug "[PublicInternet] Not publishing peer info that wants relay until we have a relay"); + // If the network class is not yet determined, don't publish + if pi.signed_node_info().node_info().network_class() == NetworkClass::Invalid { + log_rtab!(debug "[PublicInternet] Not publishing peer info with invalid network class"); return false; } - } - // Don't publish if the peer info hasnt changed from our previous publication - let mut ppi_lock = self.published_peer_info.lock(); - if let Some(old_peer_info) = &*ppi_lock { - if pi.equivalent(old_peer_info) { - log_rtab!(debug "[PublicInternet] Not publishing peer info because it is equivalent"); - return false; + // If we need a relay and we don't have one, don't publish yet + if let Some(_relay_kind) = self.requires_relay() { + if pi.signed_node_info().relay_ids().is_empty() { + log_rtab!(debug "[PublicInternet] Not publishing peer info that wants relay until we have a relay"); + return false; + } } - } - log_rtab!(debug "[PublicInternet] Published new peer info: {:#?}", pi); - *ppi_lock = Some(pi); + // Don't publish if the peer info hasnt changed from our previous publication + let mut ppi_lock = self.published_peer_info.lock(); + if let Some(old_peer_info) = &*ppi_lock { + if pi.equivalent(old_peer_info) { + log_rtab!(debug "[PublicInternet] Not publishing peer info because it is equivalent"); + return false; + } + } + + log_rtab!(debug "[PublicInternet] Published new peer info: {}", pi); + *ppi_lock = Some(pi.clone()); + + pi + }; + + rti.unlocked_inner + .network_manager() + .report_peer_info_change(peer_info); true } @@ -366,4 +374,8 @@ impl RoutingDomainDetail for PublicInternetRoutingDomainDetail { ContactMethod::Unreachable } + + fn set_relay_node_last_keepalive(&mut self, ts: Option) { + self.common.set_relay_node_last_keepalive(ts); + } } diff --git a/veilid-core/src/routing_table/stats_accounting.rs b/veilid-core/src/routing_table/stats_accounting.rs index 5f0c8960..540a6a03 100644 --- a/veilid-core/src/routing_table/stats_accounting.rs +++ b/veilid-core/src/routing_table/stats_accounting.rs @@ -1,5 +1,4 @@ -use crate::*; -use alloc::collections::VecDeque; +use super::*; // Latency entry is per round-trip packet (ping or data) // - Size is number of entries @@ -11,6 +10,17 @@ const ROLLING_LATENCIES_SIZE: usize = 10; const ROLLING_TRANSFERS_SIZE: usize = 10; pub const ROLLING_TRANSFERS_INTERVAL_SECS: u32 = 1; +// State entry is per state reason change +// - Size is number of entries +const ROLLING_STATE_REASON_SPAN_SIZE: usize = 32; +pub const UPDATE_STATE_STATS_INTERVAL_SECS: u32 = 1; + +// Answer entries are in counts per interval +// - Size is number of entries +// - Interval is number of seconds in each entry +const ROLLING_ANSWERS_SIZE: usize = 10; +pub const ROLLING_ANSWER_INTERVAL_SECS: u32 = 60; + #[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] pub struct TransferCount { down: ByteCount, @@ -73,8 +83,10 @@ impl TransferStatsAccounting { transfer_stats.up.average += bpsu; } let len = self.rolling_transfers.len() as u64; - transfer_stats.down.average /= len; - transfer_stats.up.average /= len; + if len > 0 { + transfer_stats.down.average /= len; + transfer_stats.up.average /= len; + } } } @@ -90,7 +102,7 @@ impl LatencyStatsAccounting { } } - pub fn record_latency(&mut self, latency: TimestampDuration) -> veilid_api::LatencyStats { + pub fn record_latency(&mut self, latency: TimestampDuration) -> LatencyStats { while self.rolling_latencies.len() >= ROLLING_LATENCIES_SIZE { self.rolling_latencies.pop_front(); } @@ -107,8 +119,274 @@ impl LatencyStatsAccounting { ls.average += *rl; } let len = self.rolling_latencies.len() as u64; - ls.average /= len; + if len > 0 { + ls.average /= len; + } ls } } + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct StateReasonSpan { + state_reason: BucketEntryStateReason, + enter_ts: Timestamp, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct StateSpan { + state: BucketEntryState, + enter_ts: Timestamp, +} + +#[derive(Debug, Clone, Default)] +pub struct StateStatsAccounting { + rolling_state_reason_spans: VecDeque, + last_stats: Option, +} + +impl StateStatsAccounting { + pub fn new() -> Self { + Self { + rolling_state_reason_spans: VecDeque::new(), + last_stats: None, + } + } + + fn make_stats(&self, cur_ts: Timestamp) -> StateStats { + let mut ss = StateStats::default(); + let srs = &mut ss.reason; + + let mut last_ts = cur_ts; + for rss in self.rolling_state_reason_spans.iter().rev() { + let span_dur = last_ts.saturating_sub(rss.enter_ts); + + match BucketEntryState::from(rss.state_reason) { + BucketEntryState::Punished => ss.punished += span_dur, + BucketEntryState::Dead => ss.dead += span_dur, + BucketEntryState::Unreliable => ss.unreliable += span_dur, + BucketEntryState::Reliable => ss.reliable += span_dur, + } + match rss.state_reason { + BucketEntryStateReason::Punished(_) => { + // Ignore punished nodes for now + } + BucketEntryStateReason::Dead(bucket_entry_dead_reason) => { + match bucket_entry_dead_reason { + BucketEntryDeadReason::CanNotSend => srs.can_not_send += span_dur, + BucketEntryDeadReason::TooManyLostAnswers => { + srs.too_many_lost_answers += span_dur + } + BucketEntryDeadReason::NoPingResponse => srs.no_ping_response += span_dur, + } + } + BucketEntryStateReason::Unreliable(bucket_entry_unreliable_reason) => { + match bucket_entry_unreliable_reason { + BucketEntryUnreliableReason::FailedToSend => srs.failed_to_send += span_dur, + BucketEntryUnreliableReason::LostAnswers => srs.lost_answers += span_dur, + BucketEntryUnreliableReason::NotSeenConsecutively => { + srs.not_seen_consecutively += span_dur + } + BucketEntryUnreliableReason::InUnreliablePingSpan => { + srs.in_unreliable_ping_span += span_dur + } + } + } + BucketEntryStateReason::Reliable => { + // Reliable nodes don't have a reason other than lack of unreliability + } + } + + last_ts = rss.enter_ts; + } + ss.span = cur_ts.saturating_sub(last_ts); + ss + } + + pub fn take_stats(&mut self) -> Option { + self.last_stats.take() + } + + pub fn record_state_reason(&mut self, cur_ts: Timestamp, state_reason: BucketEntryStateReason) { + let new_span = if let Some(cur_span) = self.rolling_state_reason_spans.back() { + if state_reason != cur_span.state_reason { + while self.rolling_state_reason_spans.len() >= ROLLING_STATE_REASON_SPAN_SIZE { + self.rolling_state_reason_spans.pop_front(); + } + true + } else { + false + } + } else { + true + }; + if new_span { + self.last_stats = Some(self.make_stats(cur_ts)); + self.rolling_state_reason_spans.push_back(StateReasonSpan { + state_reason, + enter_ts: cur_ts, + }); + } + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct AnswerSpan { + enter_ts: Timestamp, + questions: u32, + answers: u32, + lost_answers: u32, + current_consecutive_answers: u32, + current_consecutive_lost_answers: u32, + consecutive_answers_maximum: u32, + consecutive_answers_total: u32, + consecutive_answers_count: u32, + consecutive_answers_minimum: u32, + consecutive_lost_answers_maximum: u32, + consecutive_lost_answers_total: u32, + consecutive_lost_answers_count: u32, + consecutive_lost_answers_minimum: u32, +} + +impl AnswerSpan { + pub fn new(cur_ts: Timestamp) -> Self { + AnswerSpan { + enter_ts: cur_ts, + questions: 0, + answers: 0, + lost_answers: 0, + current_consecutive_answers: 0, + current_consecutive_lost_answers: 0, + consecutive_answers_maximum: 0, + consecutive_answers_total: 0, + consecutive_answers_count: 0, + consecutive_answers_minimum: 0, + consecutive_lost_answers_maximum: 0, + consecutive_lost_answers_total: 0, + consecutive_lost_answers_count: 0, + consecutive_lost_answers_minimum: 0, + } + } +} + +#[derive(Debug, Clone, Default)] +pub struct AnswerStatsAccounting { + rolling_answer_spans: VecDeque, +} + +impl AnswerStatsAccounting { + pub fn new() -> Self { + Self { + rolling_answer_spans: VecDeque::new(), + } + } + + fn current_span(&mut self, cur_ts: Timestamp) -> &mut AnswerSpan { + if self.rolling_answer_spans.is_empty() { + self.rolling_answer_spans.push_back(AnswerSpan::new(cur_ts)); + } + self.rolling_answer_spans.front_mut().unwrap() + } + + fn make_stats(&self, cur_ts: Timestamp) -> AnswerStats { + let mut questions = 0u32; + let mut answers = 0u32; + let mut lost_answers = 0u32; + let mut consecutive_answers_maximum = 0u32; + let mut consecutive_answers_average = 0u32; + let mut consecutive_answers_minimum = u32::MAX; + let mut consecutive_lost_answers_maximum = 0u32; + let mut consecutive_lost_answers_average = 0u32; + let mut consecutive_lost_answers_minimum = u32::MAX; + + let mut last_ts = cur_ts; + for ras in self.rolling_answer_spans.iter().rev() { + questions += ras.questions; + answers += ras.answers; + lost_answers += ras.lost_answers; + + consecutive_answers_maximum.max_assign(ras.consecutive_answers_maximum); + consecutive_answers_minimum.min_assign(ras.consecutive_answers_minimum); + consecutive_answers_average += if ras.consecutive_answers_total > 0 { + ras.consecutive_answers_count / ras.consecutive_answers_total + } else { + 0 + }; + + consecutive_lost_answers_maximum.max_assign(ras.consecutive_lost_answers_maximum); + consecutive_lost_answers_minimum.min_assign(ras.consecutive_lost_answers_minimum); + consecutive_lost_answers_average += if ras.consecutive_lost_answers_total > 0 { + ras.consecutive_lost_answers_count / ras.consecutive_lost_answers_total + } else { + 0 + }; + + last_ts = ras.enter_ts; + } + + let len = self.rolling_answer_spans.len() as u32; + if len > 0 { + consecutive_answers_average /= len; + consecutive_lost_answers_average /= len; + } + + let span = cur_ts.saturating_sub(last_ts); + + AnswerStats { + span, + questions, + answers, + lost_answers, + consecutive_answers_maximum, + consecutive_answers_average, + consecutive_answers_minimum, + consecutive_lost_answers_maximum, + consecutive_lost_answers_average, + consecutive_lost_answers_minimum, + } + } + + pub fn roll_answers(&mut self, cur_ts: Timestamp) -> AnswerStats { + let stats = self.make_stats(cur_ts); + + while self.rolling_answer_spans.len() >= ROLLING_ANSWERS_SIZE { + self.rolling_answer_spans.pop_front(); + } + self.rolling_answer_spans.push_back(AnswerSpan::new(cur_ts)); + + stats + } + + pub fn record_question(&mut self, cur_ts: Timestamp) { + let cas = self.current_span(cur_ts); + cas.questions += 1; + } + pub fn record_answer(&mut self, cur_ts: Timestamp) { + let cas = self.current_span(cur_ts); + cas.answers += 1; + if cas.current_consecutive_lost_answers > 0 { + cas.consecutive_lost_answers_maximum + .max_assign(cas.current_consecutive_lost_answers); + cas.consecutive_lost_answers_minimum + .min_assign(cas.current_consecutive_lost_answers); + cas.consecutive_lost_answers_total += cas.current_consecutive_lost_answers; + cas.consecutive_lost_answers_count += 1; + cas.current_consecutive_lost_answers = 0; + } + cas.current_consecutive_answers = 1; + } + pub fn record_lost_answer(&mut self, cur_ts: Timestamp) { + let cas = self.current_span(cur_ts); + cas.lost_answers += 1; + if cas.current_consecutive_answers > 0 { + cas.consecutive_answers_maximum + .max_assign(cas.current_consecutive_answers); + cas.consecutive_answers_minimum + .min_assign(cas.current_consecutive_answers); + cas.consecutive_answers_total += cas.current_consecutive_answers; + cas.consecutive_answers_count += 1; + cas.current_consecutive_answers = 0; + } + cas.current_consecutive_lost_answers = 1; + } +} diff --git a/veilid-core/src/routing_table/tasks/bootstrap.rs b/veilid-core/src/routing_table/tasks/bootstrap.rs index 5088ddd9..9e41109d 100644 --- a/veilid-core/src/routing_table/tasks/bootstrap.rs +++ b/veilid-core/src/routing_table/tasks/bootstrap.rs @@ -404,7 +404,17 @@ impl RoutingTable { peer_map.into_values().collect() } else { // If not direct, resolve bootstrap servers and recurse their TXT entries - let bsrecs = self.resolve_bootstrap(bootstrap).await?; + let bsrecs = match self + .resolve_bootstrap(bootstrap) + .timeout_at(stop_token.clone()) + .await + { + Ok(v) => v?, + Err(_) => { + // Stop requested + return Ok(()); + } + }; let peers: Vec> = bsrecs .into_iter() .map(|bsrec| { diff --git a/veilid-core/src/routing_table/tasks/mod.rs b/veilid-core/src/routing_table/tasks/mod.rs index 1b3a4f3f..bd0a8e62 100644 --- a/veilid-core/src/routing_table/tasks/mod.rs +++ b/veilid-core/src/routing_table/tasks/mod.rs @@ -5,7 +5,7 @@ pub mod peer_minimum_refresh; pub mod ping_validator; pub mod private_route_management; pub mod relay_management; -pub mod rolling_transfers; +pub mod update_statistics; use super::*; @@ -25,6 +25,34 @@ impl RoutingTable { }); } + // Set update state stats tick task + { + let this = self.clone(); + self.unlocked_inner + .update_state_stats_task + .set_routine(move |s, l, t| { + Box::pin(this.clone().update_state_stats_task_routine( + s, + Timestamp::new(l), + Timestamp::new(t), + )) + }); + } + + // Set rolling answers tick task + { + let this = self.clone(); + self.unlocked_inner + .rolling_answers_task + .set_routine(move |s, l, t| { + Box::pin(this.clone().rolling_answers_task_routine( + s, + Timestamp::new(l), + Timestamp::new(t), + )) + }); + } + // Set kick buckets tick task { let this = self.clone(); @@ -67,13 +95,58 @@ impl RoutingTable { }); } - // Set ping validator tick task + // Set ping validator PublicInternet tick task { let this = self.clone(); self.unlocked_inner - .ping_validator_task + .ping_validator_public_internet_task .set_routine(move |s, l, t| { - Box::pin(this.clone().ping_validator_task_routine( + Box::pin(this.clone().ping_validator_public_internet_task_routine( + s, + Timestamp::new(l), + Timestamp::new(t), + )) + }); + } + + // Set ping validator LocalNetwork tick task + { + let this = self.clone(); + self.unlocked_inner + .ping_validator_local_network_task + .set_routine(move |s, l, t| { + Box::pin(this.clone().ping_validator_local_network_task_routine( + s, + Timestamp::new(l), + Timestamp::new(t), + )) + }); + } + + // Set ping validator PublicInternet Relay tick task + { + let this = self.clone(); + self.unlocked_inner + .ping_validator_public_internet_relay_task + .set_routine(move |s, l, t| { + Box::pin( + this.clone() + .ping_validator_public_internet_relay_task_routine( + s, + Timestamp::new(l), + Timestamp::new(t), + ), + ) + }); + } + + // Set ping validator Active Watch tick task + { + let this = self.clone(); + self.unlocked_inner + .ping_validator_active_watch_task + .set_routine(move |s, l, t| { + Box::pin(this.clone().ping_validator_active_watch_task_routine( s, Timestamp::new(l), Timestamp::new(t), @@ -126,6 +199,12 @@ impl RoutingTable { // Do rolling transfers every ROLLING_TRANSFERS_INTERVAL_SECS secs self.unlocked_inner.rolling_transfers_task.tick().await?; + // Do state stats update every UPDATE_STATE_STATS_INTERVAL_SECS secs + self.unlocked_inner.update_state_stats_task.tick().await?; + + // Do rolling answers every ROLLING_ANSWER_INTERVAL_SECS secs + self.unlocked_inner.rolling_answers_task.tick().await?; + // Kick buckets task let kick_bucket_queue_count = self.unlocked_inner.kick_queue.lock().len(); if kick_bucket_queue_count > 0 { @@ -165,22 +244,29 @@ impl RoutingTable { } // Ping validate some nodes to groom the table - self.unlocked_inner.ping_validator_task.tick().await?; + self.unlocked_inner + .ping_validator_public_internet_task + .tick() + .await?; + self.unlocked_inner + .ping_validator_local_network_task + .tick() + .await?; + self.unlocked_inner + .ping_validator_public_internet_relay_task + .tick() + .await?; + self.unlocked_inner + .ping_validator_active_watch_task + .tick() + .await?; // Run the relay management task self.unlocked_inner.relay_management_task.tick().await?; - // Only perform these operations if we already have a valid network class - // and if we didn't need to bootstrap or perform a peer minimum refresh as these operations - // require having a suitably full routing table and guaranteed ability to contact other nodes - if !needs_bootstrap - && !needs_peer_minimum_refresh - && self.has_valid_network_class(RoutingDomain::PublicInternet) - { + // Get more nodes if we need to + if !needs_bootstrap && !needs_peer_minimum_refresh { // Run closest peers refresh task - // this will also inform other close nodes of -our- existence so we would - // much rather perform this action -after- we have a valid network class - // so our PeerInfo is valid when informing the other nodes of our existence. self.unlocked_inner .closest_peers_refresh_task .tick() @@ -212,6 +298,14 @@ impl RoutingTable { if let Err(e) = self.unlocked_inner.rolling_transfers_task.stop().await { error!("rolling_transfers_task not stopped: {}", e); } + log_rtab!(debug "stopping update state stats task"); + if let Err(e) = self.unlocked_inner.update_state_stats_task.stop().await { + error!("update_state_stats_task not stopped: {}", e); + } + log_rtab!(debug "stopping rolling answers task"); + if let Err(e) = self.unlocked_inner.rolling_answers_task.stop().await { + error!("rolling_answers_task not stopped: {}", e); + } log_rtab!(debug "stopping kick buckets task"); if let Err(e) = self.unlocked_inner.kick_buckets_task.stop().await { error!("kick_buckets_task not stopped: {}", e); @@ -224,10 +318,44 @@ impl RoutingTable { if let Err(e) = self.unlocked_inner.peer_minimum_refresh_task.stop().await { error!("peer_minimum_refresh_task not stopped: {}", e); } - log_rtab!(debug "stopping ping_validator task"); - if let Err(e) = self.unlocked_inner.ping_validator_task.stop().await { - error!("ping_validator_task not stopped: {}", e); + + log_rtab!(debug "stopping ping_validator tasks"); + if let Err(e) = self + .unlocked_inner + .ping_validator_public_internet_task + .stop() + .await + { + error!("ping_validator_public_internet_task not stopped: {}", e); } + if let Err(e) = self + .unlocked_inner + .ping_validator_local_network_task + .stop() + .await + { + error!("ping_validator_local_network_task not stopped: {}", e); + } + if let Err(e) = self + .unlocked_inner + .ping_validator_public_internet_relay_task + .stop() + .await + { + error!( + "ping_validator_public_internet_relay_task not stopped: {}", + e + ); + } + if let Err(e) = self + .unlocked_inner + .ping_validator_active_watch_task + .stop() + .await + { + error!("ping_validator_active_watch_task not stopped: {}", e); + } + log_rtab!(debug "stopping relay management task"); if let Err(e) = self.unlocked_inner.relay_management_task.stop().await { warn!("relay_management_task not stopped: {}", e); diff --git a/veilid-core/src/routing_table/tasks/ping_validator.rs b/veilid-core/src/routing_table/tasks/ping_validator.rs index 90567155..53419b7e 100644 --- a/veilid-core/src/routing_table/tasks/ping_validator.rs +++ b/veilid-core/src/routing_table/tasks/ping_validator.rs @@ -7,17 +7,92 @@ const RELAY_KEEPALIVE_PING_INTERVAL_SECS: u32 = 10; /// Keepalive pings are done for active watch nodes to make sure they are still there const ACTIVE_WATCH_KEEPALIVE_PING_INTERVAL_SECS: u32 = 10; -/// Ping queue processing depth -const MAX_PARALLEL_PINGS: usize = 16; +/// Ping queue processing depth per validator +const MAX_PARALLEL_PINGS: usize = 8; -use futures_util::stream::{FuturesUnordered, StreamExt}; use futures_util::FutureExt; -use stop_token::future::FutureExt as StopFutureExt; -type PingValidatorFuture = - SendPinBoxFuture>>, RPCError>>; +type PingValidatorFuture = SendPinBoxFuture>; impl RoutingTable { + // Task routine for PublicInternet status pings + #[instrument(level = "trace", skip(self), err)] + pub(crate) async fn ping_validator_public_internet_task_routine( + self, + stop_token: StopToken, + _last_ts: Timestamp, + cur_ts: Timestamp, + ) -> EyreResult<()> { + let mut future_queue: VecDeque = VecDeque::new(); + + self.ping_validator_public_internet(cur_ts, &mut future_queue) + .await?; + + self.process_ping_validation_queue("PublicInternet", stop_token, cur_ts, future_queue) + .await; + + Ok(()) + } + + // Task routine for LocalNetwork status pings + #[instrument(level = "trace", skip(self), err)] + pub(crate) async fn ping_validator_local_network_task_routine( + self, + stop_token: StopToken, + _last_ts: Timestamp, + cur_ts: Timestamp, + ) -> EyreResult<()> { + let mut future_queue: VecDeque = VecDeque::new(); + + self.ping_validator_local_network(cur_ts, &mut future_queue) + .await?; + + self.process_ping_validation_queue("LocalNetwork", stop_token, cur_ts, future_queue) + .await; + + Ok(()) + } + + // Task routine for PublicInternet relay keepalive pings + #[instrument(level = "trace", skip(self), err)] + pub(crate) async fn ping_validator_public_internet_relay_task_routine( + self, + stop_token: StopToken, + _last_ts: Timestamp, + cur_ts: Timestamp, + ) -> EyreResult<()> { + let mut future_queue: VecDeque = VecDeque::new(); + + self.relay_keepalive_public_internet(cur_ts, &mut future_queue) + .await?; + + self.process_ping_validation_queue("RelayKeepalive", stop_token, cur_ts, future_queue) + .await; + + Ok(()) + } + + // Task routine for active watch keepalive pings + #[instrument(level = "trace", skip(self), err)] + pub(crate) async fn ping_validator_active_watch_task_routine( + self, + stop_token: StopToken, + _last_ts: Timestamp, + cur_ts: Timestamp, + ) -> EyreResult<()> { + let mut future_queue: VecDeque = VecDeque::new(); + + self.active_watches_keepalive_public_internet(cur_ts, &mut future_queue) + .await?; + + self.process_ping_validation_queue("WatchKeepalive", stop_token, cur_ts, future_queue) + .await; + + Ok(()) + } + + //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + // Ping the relay to keep it alive, over every protocol it is relaying for us #[instrument(level = "trace", skip(self, futurequeue), err)] async fn relay_keepalive_public_internet( @@ -49,10 +124,9 @@ impl RoutingTable { return Ok(()); } // Say we're doing this keepalive now - self.edit_public_internet_routing_domain() - .set_relay_node_keepalive(Some(cur_ts)) - .commit(false) - .await; + self.inner + .write() + .set_relay_node_last_keepalive(RoutingDomain::PublicInternet, cur_ts); // We need to keep-alive at one connection per ordering for relays // but also one per NAT mapping that we need to keep open for our inbound dial info @@ -107,13 +181,13 @@ impl RoutingTable { for relay_nr_filtered in relay_noderefs { let rpc = rpc.clone(); - - log_rtab!("--> Keepalive ping to {:?}", relay_nr_filtered); - futurequeue.push_back( async move { - rpc.rpc_call_status(Destination::direct(relay_nr_filtered)) - .await + log_rtab!("--> PublicInternet Relay ping to {:?}", relay_nr_filtered); + let _ = rpc + .rpc_call_status(Destination::direct(relay_nr_filtered)) + .await?; + Ok(()) } .boxed(), ); @@ -151,17 +225,15 @@ impl RoutingTable { // Get all the active watches from the storage manager let storage_manager = self.unlocked_inner.network_manager.storage_manager(); - let watch_node_refs = storage_manager.get_active_watch_nodes().await; + let watch_destinations = storage_manager.get_active_watch_nodes().await; - for watch_nr in watch_node_refs { + for watch_destination in watch_destinations { let rpc = rpc.clone(); - - log_rtab!("--> Watch ping to {:?}", watch_nr); - futurequeue.push_back( async move { - rpc.rpc_call_status(Destination::direct(watch_nr.default_filtered())) - .await + log_rtab!("--> Watch Keepalive ping to {:?}", watch_destination); + let _ = rpc.rpc_call_status(watch_destination).await?; + Ok(()) } .boxed(), ); @@ -182,20 +254,19 @@ impl RoutingTable { // Get all nodes needing pings in the PublicInternet routing domain let node_refs = self.get_nodes_needing_ping(RoutingDomain::PublicInternet, cur_ts); - // If we have a relay, let's ping for NAT keepalives - self.relay_keepalive_public_internet(cur_ts, futurequeue) - .await?; - - // Check active watch keepalives - self.active_watches_keepalive_public_internet(cur_ts, futurequeue) - .await?; - // Just do a single ping with the best protocol for all the other nodes to check for liveness for nr in node_refs { + let nr = nr.sequencing_clone(Sequencing::PreferOrdered); + let rpc = rpc.clone(); - log_rtab!("--> Validator ping to {:?}", nr); futurequeue.push_back( - async move { rpc.rpc_call_status(Destination::direct(nr)).await }.boxed(), + async move { + #[cfg(feature = "verbose-tracing")] + log_rtab!(debug "--> PublicInternet Validator ping to {:?}", nr); + let _ = rpc.rpc_call_status(Destination::direct(nr)).await?; + Ok(()) + } + .boxed(), ); } @@ -215,76 +286,58 @@ impl RoutingTable { // Get all nodes needing pings in the LocalNetwork routing domain let node_refs = self.get_nodes_needing_ping(RoutingDomain::LocalNetwork, cur_ts); - // For all nodes needing pings, figure out how many and over what protocols + // Just do a single ping with the best protocol for all the other nodes to check for liveness for nr in node_refs { + let nr = nr.sequencing_clone(Sequencing::PreferOrdered); + let rpc = rpc.clone(); // Just do a single ping with the best protocol for all the nodes futurequeue.push_back( - async move { rpc.rpc_call_status(Destination::direct(nr)).await }.boxed(), + async move { + #[cfg(feature = "verbose-tracing")] + log_rtab!(debug "--> LocalNetwork Validator ping to {:?}", nr); + let _ = rpc.rpc_call_status(Destination::direct(nr)).await?; + Ok(()) + } + .boxed(), ); } Ok(()) } - // Ping each node in the routing table if they need to be pinged - // to determine their reliability - #[instrument(level = "trace", skip(self), err)] - pub(crate) async fn ping_validator_task_routine( - self, + // Common handler for running ping validations in a batch + async fn process_ping_validation_queue( + &self, + name: &str, stop_token: StopToken, - _last_ts: Timestamp, cur_ts: Timestamp, - ) -> EyreResult<()> { - let mut futurequeue: VecDeque = VecDeque::new(); - - // PublicInternet - self.ping_validator_public_internet(cur_ts, &mut futurequeue) - .await?; - - // LocalNetwork - self.ping_validator_local_network(cur_ts, &mut futurequeue) - .await?; - - // Wait for ping futures to complete in parallel - let mut unord = FuturesUnordered::new(); - - while !unord.is_empty() || !futurequeue.is_empty() { - log_rtab!( - "Ping validation queue: {} remaining, {} in progress", - futurequeue.len(), - unord.len() - ); - - // Process one unordered futures if we have some - match unord - .next() - .timeout_at(stop_token.clone()) - .in_current_span() - .await - { - Ok(Some(_)) => { - // Some ping completed - } - Ok(None) => { - // We're empty - } - Err(_) => { - // Timeout means we drop the rest because we were asked to stop - break; - } - } - - // Fill unord up to max parallelism - while unord.len() < MAX_PARALLEL_PINGS { - let Some(fq) = futurequeue.pop_front() else { - break; - }; - unord.push(fq); - } + future_queue: VecDeque, + ) { + let count = future_queue.len(); + if count == 0 { + return; } + log_rtab!(debug "[{}] Ping validation queue: {} remaining", name, count); - Ok(()) + let atomic_count = AtomicUsize::new(count); + process_batched_future_queue(future_queue, MAX_PARALLEL_PINGS, stop_token, |res| async { + if let Err(e) = res { + log_rtab!(error "[{}] Error performing status ping: {}", name, e); + } + let remaining = atomic_count.fetch_sub(1, Ordering::AcqRel) - 1; + if remaining > 0 { + log_rtab!(debug "[{}] Ping validation queue: {} remaining", name, remaining); + } + }) + .await; + let done_ts = Timestamp::now(); + log_rtab!(debug + "[{}] Ping validation queue finished {} pings in {}", + name, + count, + done_ts - cur_ts + ); } } diff --git a/veilid-core/src/routing_table/tasks/private_route_management.rs b/veilid-core/src/routing_table/tasks/private_route_management.rs index 402cd374..6e064a60 100644 --- a/veilid-core/src/routing_table/tasks/private_route_management.rs +++ b/veilid-core/src/routing_table/tasks/private_route_management.rs @@ -2,6 +2,7 @@ use super::*; use futures_util::stream::{FuturesUnordered, StreamExt}; use futures_util::FutureExt; +use stop_token::future::FutureExt as _; const BACKGROUND_SAFETY_ROUTE_COUNT: usize = 2; @@ -58,12 +59,12 @@ impl RoutingTable { } // If this has been published, always test if we need it // Also if the route has never been tested, test it at least once - if v.is_published() || stats.last_tested_ts.is_none() { + if v.is_published() || stats.last_known_valid_ts.is_none() { must_test_routes.push(*k); } // If this is a default route hop length, include it in routes to keep alive else if v.hop_count() == default_route_hop_count { - unpublished_routes.push((*k, stats.latency_stats.average.as_u64())); + unpublished_routes.push((*k, stats.latency.average.as_u64())); } // Else this is a route that hasnt been used recently enough and we can tear it down else { @@ -102,10 +103,10 @@ impl RoutingTable { } /// Test set of routes and remove the ones that don't test clean - #[instrument(level = "trace", skip(self, _stop_token), err)] + #[instrument(level = "trace", skip(self, stop_token), err)] async fn test_route_set( &self, - _stop_token: StopToken, + stop_token: StopToken, routes_needing_testing: Vec, ) -> EyreResult<()> { if routes_needing_testing.is_empty() { @@ -152,7 +153,7 @@ impl RoutingTable { } // Wait for test_route futures to complete in parallel - while unord.next().await.is_some() {} + while let Ok(Some(())) = unord.next().timeout_at(stop_token.clone()).await {} } // Process failed routes diff --git a/veilid-core/src/routing_table/tasks/relay_management.rs b/veilid-core/src/routing_table/tasks/relay_management.rs index 7827f381..a3a04582 100644 --- a/veilid-core/src/routing_table/tasks/relay_management.rs +++ b/veilid-core/src/routing_table/tasks/relay_management.rs @@ -14,7 +14,11 @@ impl RoutingTable { } // If we -need- a relay always request one - if let Some(rk) = own_node_info.requires_relay() { + let requires_relay = self + .inner + .read() + .with_routing_domain(RoutingDomain::PublicInternet, |rdd| rdd.requires_relay()); + if let Some(rk) = requires_relay { return Some(rk); } @@ -71,7 +75,7 @@ impl RoutingTable { BucketEntryStateReason::Dead(_) | BucketEntryStateReason::Punished(_) ) { log_rtab!(debug "Relay node is now {:?}, dropping relay {}", state_reason, relay_node); - editor.clear_relay_node(); + editor.set_relay_node(None); false } // Relay node no longer can relay @@ -80,7 +84,7 @@ impl RoutingTable { "Relay node can no longer relay, dropping relay {}", relay_node ); - editor.clear_relay_node(); + editor.set_relay_node(None); false } // Relay node is no longer wanted @@ -89,7 +93,7 @@ impl RoutingTable { "Relay node no longer desired, dropping relay {}", relay_node ); - editor.clear_relay_node(); + editor.set_relay_node(None); false } else { true @@ -114,7 +118,7 @@ impl RoutingTable { match self.register_node_with_peer_info(outbound_relay_peerinfo, false) { Ok(nr) => { log_rtab!(debug "Outbound relay node selected: {}", nr); - editor.set_relay_node(nr.unfiltered()); + editor.set_relay_node(Some(nr.unfiltered())); got_outbound_relay = true; } Err(e) => { @@ -133,7 +137,7 @@ impl RoutingTable { relay_node_filter, ) { log_rtab!(debug "Inbound relay node selected: {}", nr); - editor.set_relay_node(nr); + editor.set_relay_node(Some(nr)); } } } @@ -142,10 +146,6 @@ impl RoutingTable { if editor.commit(false).await { // Try to publish the peer info editor.publish(); - - self.network_manager() - .connection_manager() - .update_protections(); } Ok(()) diff --git a/veilid-core/src/routing_table/tasks/rolling_transfers.rs b/veilid-core/src/routing_table/tasks/rolling_transfers.rs deleted file mode 100644 index 6ea87bdb..00000000 --- a/veilid-core/src/routing_table/tasks/rolling_transfers.rs +++ /dev/null @@ -1,36 +0,0 @@ -use super::*; - -impl RoutingTable { - // Compute transfer statistics to determine how 'fast' a node is - #[instrument(level = "trace", skip(self), err)] - pub(crate) async fn rolling_transfers_task_routine( - self, - _stop_token: StopToken, - last_ts: Timestamp, - cur_ts: Timestamp, - ) -> EyreResult<()> { - // log_rtab!("--- rolling_transfers task"); - { - let inner = &mut *self.inner.write(); - - // Roll our own node's transfers - inner.self_transfer_stats_accounting.roll_transfers( - last_ts, - cur_ts, - &mut inner.self_transfer_stats, - ); - - // Roll all bucket entry transfers - let all_entries: Vec> = inner.all_entries.iter().collect(); - for entry in all_entries { - entry.with_mut(inner, |_rti, e| e.roll_transfers(last_ts, cur_ts)); - } - } - - // Roll all route transfers - let rss = self.route_spec_store(); - rss.roll_transfers(last_ts, cur_ts); - - Ok(()) - } -} diff --git a/veilid-core/src/routing_table/tasks/update_statistics.rs b/veilid-core/src/routing_table/tasks/update_statistics.rs new file mode 100644 index 00000000..da448c60 --- /dev/null +++ b/veilid-core/src/routing_table/tasks/update_statistics.rs @@ -0,0 +1,81 @@ +use super::*; + +impl RoutingTable { + // Compute transfer statistics to determine how 'fast' a node is + #[instrument(level = "trace", skip(self), err)] + pub(crate) async fn rolling_transfers_task_routine( + self, + _stop_token: StopToken, + last_ts: Timestamp, + cur_ts: Timestamp, + ) -> EyreResult<()> { + { + let inner = &mut *self.inner.write(); + + // Roll our own node's transfers + inner.self_transfer_stats_accounting.roll_transfers( + last_ts, + cur_ts, + &mut inner.self_transfer_stats, + ); + + // Roll all bucket entry transfers + let all_entries: Vec> = inner.all_entries.iter().collect(); + for entry in all_entries { + entry.with_mut(inner, |_rti, e| e.roll_transfers(last_ts, cur_ts)); + } + } + + // Roll all route transfers + let rss = self.route_spec_store(); + rss.roll_transfers(last_ts, cur_ts); + + Ok(()) + } + + // Update state statistics in PeerStats + #[instrument(level = "trace", skip(self), err)] + pub(crate) async fn update_state_stats_task_routine( + self, + _stop_token: StopToken, + _last_ts: Timestamp, + _cur_ts: Timestamp, + ) -> EyreResult<()> { + { + let inner = &mut *self.inner.write(); + + // Roll all bucket entry transfers + let all_entries: Vec> = inner.all_entries.iter().collect(); + for entry in all_entries { + entry.with_mut(inner, |_rti, e| e.update_state_stats()); + } + } + + Ok(()) + } + + // Update rolling answers in PeerStats + #[instrument(level = "trace", skip(self), err)] + pub(crate) async fn rolling_answers_task_routine( + self, + _stop_token: StopToken, + _last_ts: Timestamp, + cur_ts: Timestamp, + ) -> EyreResult<()> { + { + let inner = &mut *self.inner.write(); + + // Roll all bucket entry answers stats + let all_entries: Vec> = inner.all_entries.iter().collect(); + for entry in all_entries { + entry.with_mut(inner, |_rti, e| e.roll_answer_stats(cur_ts)); + } + } + + // Roll all route answers + let rss = self.route_spec_store(); + rss.roll_answers(cur_ts); + + Ok(()) + } +} diff --git a/veilid-core/src/routing_table/types/dial_info_detail.rs b/veilid-core/src/routing_table/types/dial_info_detail.rs index dc7db216..b16e685b 100644 --- a/veilid-core/src/routing_table/types/dial_info_detail.rs +++ b/veilid-core/src/routing_table/types/dial_info_detail.rs @@ -7,6 +7,12 @@ pub struct DialInfoDetail { pub dial_info: DialInfo, } +impl fmt::Display for DialInfoDetail { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}:{}", self.class, self.dial_info) + } +} + impl MatchesDialInfoFilter for DialInfoDetail { fn matches_filter(&self, filter: &DialInfoFilter) -> bool { self.dial_info.matches_filter(filter) diff --git a/veilid-core/src/routing_table/types/node_info.rs b/veilid-core/src/routing_table/types/node_info.rs index 13c84137..8640cad1 100644 --- a/veilid-core/src/routing_table/types/node_info.rs +++ b/veilid-core/src/routing_table/types/node_info.rs @@ -15,7 +15,7 @@ pub const CAP_BLOCKSTORE: Capability = FourCC(*b"BLOC"); pub const DISTANCE_METRIC_CAPABILITIES: &[Capability] = &[CAP_DHT, CAP_DHT_WATCH]; -#[derive(Clone, Default, PartialEq, Eq, Debug, Serialize, Deserialize)] +#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)] pub struct NodeInfo { network_class: NetworkClass, outbound_protocols: ProtocolTypeSet, @@ -26,6 +26,22 @@ pub struct NodeInfo { dial_info_detail_list: Vec, } +impl fmt::Display for NodeInfo { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "network_class: {:?}", self.network_class)?; + writeln!(f, "outbound_protocols: {:?}", self.outbound_protocols)?; + writeln!(f, "address_types: {:?}", self.address_types)?; + writeln!(f, "envelope_support: {:?}", self.envelope_support)?; + writeln!(f, "crypto_support: {:?}", self.crypto_support)?; + writeln!(f, "capabilities: {:?}", self.capabilities)?; + writeln!(f, "dial_info_detail_list:")?; + for did in &self.dial_info_detail_list { + writeln!(f, " {}", did)?; + } + Ok(()) + } +} + impl NodeInfo { pub fn new( network_class: NetworkClass, @@ -130,27 +146,6 @@ impl NodeInfo { !self.dial_info_detail_list.is_empty() } - /// Is some relay required either for signal or inbound relay or outbound relay? - pub fn requires_relay(&self) -> Option { - match self.network_class { - NetworkClass::InboundCapable => { - for did in &self.dial_info_detail_list { - if did.class.requires_relay() { - return Some(RelayKind::Inbound); - } - } - } - NetworkClass::OutboundOnly => { - return Some(RelayKind::Inbound); - } - NetworkClass::WebApp => { - return Some(RelayKind::Outbound); - } - NetworkClass::Invalid => {} - } - None - } - pub fn has_capability(&self, cap: Capability) -> bool { self.capabilities.contains(&cap) } diff --git a/veilid-core/src/routing_table/types/peer_info.rs b/veilid-core/src/routing_table/types/peer_info.rs index 462f3678..1199d14c 100644 --- a/veilid-core/src/routing_table/types/peer_info.rs +++ b/veilid-core/src/routing_table/types/peer_info.rs @@ -1,12 +1,34 @@ use super::*; -#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub struct PeerInfo { + #[serde( + default = "default_routing_domain", + skip_serializing_if = "is_default_routing_domain" + )] routing_domain: RoutingDomain, node_ids: TypedKeyGroup, signed_node_info: SignedNodeInfo, } +fn default_routing_domain() -> RoutingDomain { + RoutingDomain::PublicInternet +} + +fn is_default_routing_domain(routing_domain: &RoutingDomain) -> bool { + matches!(routing_domain, RoutingDomain::PublicInternet) +} + +impl fmt::Display for PeerInfo { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "routing_domain: {:?}", self.routing_domain)?; + writeln!(f, "node_ids: {}", self.node_ids)?; + writeln!(f, "signed_node_info:")?; + write!(f, "{}", indent_all_string(&self.signed_node_info))?; + Ok(()) + } +} + impl PeerInfo { pub fn new( routing_domain: RoutingDomain, diff --git a/veilid-core/src/routing_table/types/signed_direct_node_info.rs b/veilid-core/src/routing_table/types/signed_direct_node_info.rs index 77b96cf3..e4587262 100644 --- a/veilid-core/src/routing_table/types/signed_direct_node_info.rs +++ b/veilid-core/src/routing_table/types/signed_direct_node_info.rs @@ -1,12 +1,26 @@ use super::*; /// Signed NodeInfo that can be passed around amongst peers and verifiable -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct SignedDirectNodeInfo { node_info: NodeInfo, timestamp: Timestamp, signatures: Vec, } + +impl fmt::Display for SignedDirectNodeInfo { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "node_info:")?; + write!(f, "{}", indent_all_string(&self.node_info))?; + writeln!(f, "timestamp: {}", self.timestamp)?; + writeln!(f, "signatures:")?; + for sig in &self.signatures { + writeln!(f, "{}", indent_all_string(sig))?; + } + Ok(()) + } +} + impl SignedDirectNodeInfo { /// Returns a new SignedDirectNodeInfo that has its signatures validated. /// On success, this will modify the node_ids set to only include node_ids whose signatures validate. diff --git a/veilid-core/src/routing_table/types/signed_node_info.rs b/veilid-core/src/routing_table/types/signed_node_info.rs index 0d331218..cd394ea4 100644 --- a/veilid-core/src/routing_table/types/signed_node_info.rs +++ b/veilid-core/src/routing_table/types/signed_node_info.rs @@ -1,11 +1,28 @@ use super::*; -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub enum SignedNodeInfo { Direct(SignedDirectNodeInfo), Relayed(SignedRelayedNodeInfo), } +impl fmt::Display for SignedNodeInfo { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Direct(arg0) => { + writeln!(f, "direct:")?; + write!(f, "{}", indent_all_string(arg0))?; + Ok(()) + } + Self::Relayed(arg0) => { + writeln!(f, "relayed:")?; + write!(f, "{}", indent_all_string(&arg0))?; + Ok(()) + } + } + } +} + impl SignedNodeInfo { pub fn validate( &self, diff --git a/veilid-core/src/routing_table/types/signed_relayed_node_info.rs b/veilid-core/src/routing_table/types/signed_relayed_node_info.rs index 1f8bfd44..275cb0d0 100644 --- a/veilid-core/src/routing_table/types/signed_relayed_node_info.rs +++ b/veilid-core/src/routing_table/types/signed_relayed_node_info.rs @@ -1,7 +1,7 @@ use super::*; /// Signed NodeInfo with a relay that can be passed around amongst peers and verifiable -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct SignedRelayedNodeInfo { node_info: NodeInfo, relay_ids: TypedKeyGroup, @@ -10,6 +10,22 @@ pub struct SignedRelayedNodeInfo { signatures: Vec, } +impl fmt::Display for SignedRelayedNodeInfo { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "node_info:")?; + write!(f, "{}", indent_all_string(&self.node_info))?; + writeln!(f, "relay_ids: {}", self.relay_ids)?; + writeln!(f, "relay_info:")?; + write!(f, "{}", indent_all_string(&self.relay_info))?; + writeln!(f, "timestamp: {}", self.timestamp)?; + writeln!(f, "signatures:")?; + for sig in &self.signatures { + writeln!(f, "{}", indent_all_string(sig))?; + } + Ok(()) + } +} + impl SignedRelayedNodeInfo { /// Returns a new SignedRelayedNodeInfo that has its signatures validated. /// On success, this will modify the node_ids set to only include node_ids whose signatures validate. diff --git a/veilid-core/src/rpc_processor/mod.rs b/veilid-core/src/rpc_processor/mod.rs index 55ada4e0..fe8f0492 100644 --- a/veilid-core/src/rpc_processor/mod.rs +++ b/veilid-core/src/rpc_processor/mod.rs @@ -692,7 +692,7 @@ impl RPCProcessor { match &out { Err(e) => { log_rpc!(debug "RPC Lost (id={} {}): {}", id, debug_string, e); - self.record_question_lost( + self.record_lost_answer( waitable_reply.send_ts, waitable_reply.node_ref.clone(), waitable_reply.safety_route, @@ -702,7 +702,7 @@ impl RPCProcessor { } Ok(TimeoutOr::Timeout) => { log_rpc!(debug "RPC Lost (id={} {}): Timeout", id, debug_string); - self.record_question_lost( + self.record_lost_answer( waitable_reply.send_ts, waitable_reply.node_ref.clone(), waitable_reply.safety_route, @@ -1008,11 +1008,8 @@ impl RPCProcessor { let routing_table = self.routing_table(); if let Some(published_peer_info) = routing_table.get_published_peer_info(routing_domain) { - // Get our node info timestamp - let our_node_info_ts = published_peer_info.signed_node_info().timestamp(); - // If the target has not yet seen our published peer info, send it along if we have it - if !node.has_seen_our_node_info_ts(routing_domain, our_node_info_ts) { + if !node.has_seen_our_node_info_ts(routing_domain) { return SenderPeerInfo::new(published_peer_info, target_node_info_ts); } } @@ -1056,7 +1053,7 @@ impl RPCProcessor { /// Record question lost to node or route #[instrument(level = "trace", target = "rpc", skip_all)] - fn record_question_lost( + fn record_lost_answer( &self, send_ts: Timestamp, node_ref: NodeRef, @@ -1066,7 +1063,7 @@ impl RPCProcessor { ) { // Record for node if this was not sent via a route if safety_route.is_none() && remote_private_route.is_none() { - node_ref.stats_question_lost(); + node_ref.stats_lost_answer(); // Also clear the last_connections for the entry so we make a new connection next time node_ref.clear_last_flows(); @@ -1080,19 +1077,19 @@ impl RPCProcessor { if let Some(sr_pubkey) = &safety_route { let rss = self.routing_table.route_spec_store(); rss.with_route_stats_mut(send_ts, sr_pubkey, |s| { - s.record_question_lost(); + s.record_lost_answer(); }); } // If remote private route was used, record question lost there if let Some(rpr_pubkey) = &remote_private_route { rss.with_route_stats_mut(send_ts, rpr_pubkey, |s| { - s.record_question_lost(); + s.record_lost_answer(); }); } // If private route was used, record question lost there if let Some(pr_pubkey) = &private_route { rss.with_route_stats_mut(send_ts, pr_pubkey, |s| { - s.record_question_lost(); + s.record_lost_answer(); }); } } @@ -1169,8 +1166,8 @@ impl RPCProcessor { // If safety route was used, record route there if let Some(sr_pubkey) = &safety_route { rss.with_route_stats_mut(send_ts, sr_pubkey, |s| { - // If we received an answer, the safety route we sent over can be considered tested - s.record_tested(recv_ts); + // Record received bytes + s.record_answer_received(recv_ts, bytes); // If we used a safety route to send, use our last tested latency total_local_latency += s.latency_stats().average @@ -1181,7 +1178,7 @@ impl RPCProcessor { if let Some(pr_pubkey) = &reply_private_route { rss.with_route_stats_mut(send_ts, pr_pubkey, |s| { // Record received bytes - s.record_received(recv_ts, bytes); + s.record_answer_received(recv_ts, bytes); // If we used a private route to receive, use our last tested latency total_local_latency += s.latency_stats().average @@ -1192,7 +1189,7 @@ impl RPCProcessor { if let Some(rpr_pubkey) = &remote_private_route { rss.with_route_stats_mut(send_ts, rpr_pubkey, |s| { // Record received bytes - s.record_received(recv_ts, bytes); + s.record_answer_received(recv_ts, bytes); // The remote route latency is recorded using the total latency minus the total local latency let remote_latency = total_latency.saturating_sub(total_local_latency); @@ -1248,7 +1245,7 @@ impl RPCProcessor { // This may record nothing if the remote safety route is not also // a remote private route that been imported, but that's okay rss.with_route_stats_mut(recv_ts, &d.remote_safety_route, |s| { - s.record_received(recv_ts, bytes); + s.record_question_received(recv_ts, bytes); }); } // Process messages that arrived to our private route @@ -1260,12 +1257,12 @@ impl RPCProcessor { // it could also be a node id if no remote safety route was used // in which case this also will do nothing rss.with_route_stats_mut(recv_ts, &d.remote_safety_route, |s| { - s.record_received(recv_ts, bytes); + s.record_question_received(recv_ts, bytes); }); // Record for our local private route we received over rss.with_route_stats_mut(recv_ts, &d.private_route, |s| { - s.record_received(recv_ts, bytes); + s.record_question_received(recv_ts, bytes); }); } } @@ -1748,7 +1745,7 @@ impl RPCProcessor { log_rpc!(debug "Could not complete rpc operation: id = {}: {}", op_id, e); } RPCError::Ignore(_) => { - log_rpc!("Answer late: id = {}", op_id); + log_rpc!(debug "Answer late: id = {}", op_id); } }; // Don't throw an error here because it's okay if the original operation timed out diff --git a/veilid-core/src/rpc_processor/operation_waiter.rs b/veilid-core/src/rpc_processor/operation_waiter.rs index ae6c2f80..99aeddfa 100644 --- a/veilid-core/src/rpc_processor/operation_waiter.rs +++ b/veilid-core/src/rpc_processor/operation_waiter.rs @@ -149,7 +149,7 @@ where inner .waiting_op_table .remove(&op_id) - .ok_or_else(RPCError::else_internal(format!( + .ok_or_else(RPCError::else_ignore(format!( "Unmatched operation id: {}", op_id )))? diff --git a/veilid-core/src/rpc_processor/rpc_error.rs b/veilid-core/src/rpc_processor/rpc_error.rs index 93ddaa34..eb6dda2d 100644 --- a/veilid-core/src/rpc_processor/rpc_error.rs +++ b/veilid-core/src/rpc_processor/rpc_error.rs @@ -62,6 +62,9 @@ impl RPCError { pub fn map_ignore(message: M) -> impl FnOnce(X) -> Self { move |x| Self::Ignore(format!("{}: {}", message.to_string(), x.to_string())) } + pub fn else_ignore(message: M) -> impl FnOnce() -> Self { + move || Self::Ignore(message.to_string()) + } } impl From for VeilidAPIError { diff --git a/veilid-core/src/rpc_processor/rpc_status.rs b/veilid-core/src/rpc_processor/rpc_status.rs index 11c500ed..54f8d2c2 100644 --- a/veilid-core/src/rpc_processor/rpc_status.rs +++ b/veilid-core/src/rpc_processor/rpc_status.rs @@ -1,10 +1,16 @@ use super::*; -#[derive(Clone, Debug, PartialEq, PartialOrd, Ord, Eq, Hash, Default)] +#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Ord, Eq, Hash, Default)] pub struct SenderInfo { pub socket_address: SocketAddress, } +#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Ord, Eq, Hash, Default)] +pub struct StatusResult { + pub opt_sender_info: Option, + pub opt_previous_sender_info: Option, +} + impl RPCProcessor { // Send StatusQ RPC request, receive StatusA answer // Can be sent via relays or routes, but will have less information via routes @@ -19,7 +25,7 @@ impl RPCProcessor { pub async fn rpc_call_status( self, dest: Destination, - ) -> RPCNetworkResult>> { + ) -> RPCNetworkResult> { let _guard = self .unlocked_inner .startup_lock @@ -105,6 +111,7 @@ impl RPCProcessor { // Don't need to validate these addresses for the current routing domain // the address itself is irrelevant, and the remote node can lie anyway let mut opt_sender_info = None; + let mut opt_previous_sender_info = None; match dest { Destination::Direct { node: target, @@ -120,24 +127,23 @@ impl RPCProcessor { { // Directly requested status that actually gets sent directly and not over a relay will tell us what our IP address appears as // If this changes, we'd want to know about that to reset the networking stack - match routing_domain { - RoutingDomain::PublicInternet => self - .network_manager() - .report_public_internet_socket_address( - sender_info.socket_address, - send_data_method.unique_flow.flow, - target.unfiltered(), - ), - RoutingDomain::LocalNetwork => { - self.network_manager().report_local_network_socket_address( - sender_info.socket_address, - send_data_method.unique_flow.flow, - target.unfiltered(), - ) - } - } + opt_previous_sender_info = target.report_sender_info( + routing_domain, + send_data_method.unique_flow.flow.protocol_type(), + send_data_method.unique_flow.flow.address_type(), + sender_info, + ); }; - opt_sender_info = Some(sender_info.clone()); + opt_sender_info = Some(sender_info); + + // Report ping status results to network manager + self.network_manager().report_socket_address_change( + routing_domain, + sender_info.socket_address, + opt_previous_sender_info.map(|s| s.socket_address), + send_data_method.unique_flow.flow, + target.unfiltered(), + ); } } } @@ -156,7 +162,10 @@ impl RPCProcessor { Ok(NetworkResult::value(Answer::new( latency, reply_private_route, - opt_sender_info, + StatusResult { + opt_sender_info, + opt_previous_sender_info, + }, ))) } diff --git a/veilid-core/src/storage_manager/get_value.rs b/veilid-core/src/storage_manager/get_value.rs index 0c276d20..c466af4e 100644 --- a/veilid-core/src/storage_manager/get_value.rs +++ b/veilid-core/src/storage_manager/get_value.rs @@ -272,7 +272,7 @@ impl StorageManager { kind, value_nodes: ctx.value_nodes.clone(), }; - log_network_result!(debug "GetValue Fanout: {:?}", fanout_result); + log_dht!(debug "GetValue Fanout: {:?}", fanout_result); if let Err(e) = out_tx.send(Ok(OutboundGetValueResult { fanout_result, diff --git a/veilid-core/src/storage_manager/inspect_value.rs b/veilid-core/src/storage_manager/inspect_value.rs index fc54330d..42eeb0b0 100644 --- a/veilid-core/src/storage_manager/inspect_value.rs +++ b/veilid-core/src/storage_manager/inspect_value.rs @@ -300,7 +300,7 @@ impl StorageManager { fanout_results.push(fanout_result); } - log_network_result!(debug "InspectValue Fanout ({:?}):\n{}", kind, debug_fanout_results(&fanout_results)); + log_dht!(debug "InspectValue Fanout ({:?}):\n{}", kind, debug_fanout_results(&fanout_results)); Ok(OutboundInspectValueResult { fanout_results, diff --git a/veilid-core/src/storage_manager/mod.rs b/veilid-core/src/storage_manager/mod.rs index 6de9facf..621f9981 100644 --- a/veilid-core/src/storage_manager/mod.rs +++ b/veilid-core/src/storage_manager/mod.rs @@ -207,12 +207,20 @@ impl StorageManager { } /// Get the set of nodes in our active watches - pub async fn get_active_watch_nodes(&self) -> Vec { + pub async fn get_active_watch_nodes(&self) -> Vec { let inner = self.inner.lock().await; inner .opened_records .values() - .filter_map(|v| v.active_watch().map(|aw| aw.watch_node)) + .filter_map(|v| { + v.active_watch().map(|aw| { + Destination::direct( + aw.watch_node + .routing_domain_filtered(RoutingDomain::PublicInternet), + ) + .with_safety(v.safety_selection()) + }) + }) .collect() } diff --git a/veilid-core/src/storage_manager/record_store/mod.rs b/veilid-core/src/storage_manager/record_store/mod.rs index 87f7d26b..6944c016 100644 --- a/veilid-core/src/storage_manager/record_store/mod.rs +++ b/veilid-core/src/storage_manager/record_store/mod.rs @@ -1268,7 +1268,7 @@ where out += &format!( " {} age={} len={} subkeys={}\n", rik.key, - debug_duration(get_timestamp() - rec.last_touched().as_u64()), + display_duration(get_timestamp() - rec.last_touched().as_u64()), rec.record_data_size(), rec.stored_subkeys(), ); diff --git a/veilid-core/src/storage_manager/set_value.rs b/veilid-core/src/storage_manager/set_value.rs index 0472031c..69bc968f 100644 --- a/veilid-core/src/storage_manager/set_value.rs +++ b/veilid-core/src/storage_manager/set_value.rs @@ -88,7 +88,7 @@ impl StorageManager { let context = context.clone(); let descriptor = descriptor.clone(); async move { - let send_descriptor = true; // xxx check if next_node needs the descriptor or not + let send_descriptor = true; // xxx check if next_node needs the descriptor or not, see issue #203 // get most recent value to send let value = { @@ -274,7 +274,7 @@ impl StorageManager { kind, value_nodes: ctx.value_nodes.clone(), }; - log_network_result!(debug "SetValue Fanout: {:?}", fanout_result); + log_dht!(debug "SetValue Fanout: {:?}", fanout_result); if let Err(e) = out_tx.send(Ok(OutboundSetValueResult { fanout_result, diff --git a/veilid-core/src/storage_manager/storage_manager_inner.rs b/veilid-core/src/storage_manager/storage_manager_inner.rs index 93934739..aac4da59 100644 --- a/veilid-core/src/storage_manager/storage_manager_inner.rs +++ b/veilid-core/src/storage_manager/storage_manager_inner.rs @@ -743,6 +743,7 @@ impl StorageManagerInner { receiver: flume::Receiver, handler: impl FnMut(T) -> SendPinBoxFuture + Send + 'static, ) -> bool { - self.deferred_result_processor.add(receiver, handler) + self.deferred_result_processor + .add(receiver.into_stream(), handler) } } diff --git a/veilid-core/src/storage_manager/tasks/offline_subkey_writes.rs b/veilid-core/src/storage_manager/tasks/offline_subkey_writes.rs index cf330fe1..e225ac52 100644 --- a/veilid-core/src/storage_manager/tasks/offline_subkey_writes.rs +++ b/veilid-core/src/storage_manager/tasks/offline_subkey_writes.rs @@ -204,9 +204,7 @@ impl StorageManager { } } std::collections::hash_map::Entry::Vacant(_) => { - panic!( - "offline write work items should always be on offline_subkey_writes entries that exist" - ) + warn!("offline write work items should always be on offline_subkey_writes entries that exist: ignoring key {}", result.key) } } diff --git a/veilid-core/src/storage_manager/watch_value.rs b/veilid-core/src/storage_manager/watch_value.rs index 5412560d..561d0ad8 100644 --- a/veilid-core/src/storage_manager/watch_value.rs +++ b/veilid-core/src/storage_manager/watch_value.rs @@ -62,7 +62,7 @@ impl StorageManager { )?; if wva.answer.accepted { - log_dht!(debug "WatchValue canceled: id={} expiration_ts={} ({})", wva.answer.watch_id, debug_ts(wva.answer.expiration_ts.as_u64()), watch_node); + log_dht!(debug "WatchValue canceled: id={} expiration_ts={} ({})", wva.answer.watch_id, display_ts(wva.answer.expiration_ts.as_u64()), watch_node); Ok(Some(OutboundWatchValueResult { expiration_ts: wva.answer.expiration_ts, watch_id: wva.answer.watch_id, @@ -127,9 +127,9 @@ impl StorageManager { if wva.answer.accepted { if watch_id != wva.answer.watch_id { - log_dht!(debug "WatchValue changed: id={}->{} expiration_ts={} ({})", watch_id, wva.answer.watch_id, debug_ts(wva.answer.expiration_ts.as_u64()), watch_node); + log_dht!(debug "WatchValue changed: id={}->{} expiration_ts={} ({})", watch_id, wva.answer.watch_id, display_ts(wva.answer.expiration_ts.as_u64()), watch_node); } else { - log_dht!(debug "WatchValue renewed: id={} expiration_ts={} ({})", watch_id, debug_ts(wva.answer.expiration_ts.as_u64()), watch_node); + log_dht!(debug "WatchValue renewed: id={} expiration_ts={} ({})", watch_id, display_ts(wva.answer.expiration_ts.as_u64()), watch_node); } Ok(Some(OutboundWatchValueResult { @@ -280,7 +280,7 @@ impl StorageManager { let mut done = false; if wva.answer.expiration_ts.as_u64() > 0 { // If the expiration time is greater than zero this watch is active - log_dht!(debug "Watch created: id={} expiration_ts={} ({})", wva.answer.watch_id, debug_ts(wva.answer.expiration_ts.as_u64()), next_node); + log_dht!(debug "Watch created: id={} expiration_ts={} ({})", wva.answer.watch_id, display_ts(wva.answer.expiration_ts.as_u64()), next_node); done = true; } else { // If the returned expiration time is zero, this watch was cancelled or rejected diff --git a/veilid-core/src/veilid_api/debug.rs b/veilid-core/src/veilid_api/debug.rs index 359449c2..1d399438 100644 --- a/veilid-core/src/veilid_api/debug.rs +++ b/veilid-core/src/veilid_api/debug.rs @@ -330,6 +330,17 @@ fn get_filtered_node_ref( routing_table: RoutingTable, ) -> impl FnOnce(&str) -> Option { move |text| { + // Safety selection + let (text, seq) = if let Some((first, second)) = text.split_once('+') { + let seq = get_sequencing(second)?; + (first, Some(seq)) + } else { + (text, None) + }; + if text.is_empty() { + return None; + } + let (text, mods) = text .split_once('/') .map(|x| (x.0, Some(x.1))) @@ -342,10 +353,15 @@ fn get_filtered_node_ref( } else { return None; }; - if let Some(mods) = mods { - Some(get_node_ref_modifiers(nr)(mods)?) + let nr = if let Some(mods) = mods { + get_node_ref_modifiers(nr)(mods)? } else { - Some(nr.default_filtered()) + nr.default_filtered() + }; + if let Some(seq) = seq { + Some(nr.sequencing_clone(seq)) + } else { + Some(nr) } } } @@ -610,9 +626,12 @@ impl VeilidAPI { let mut min_state = BucketEntryState::Unreliable; let mut capabilities = vec![]; + let mut fastest = false; for arg in args { if let Some(ms) = get_bucket_entry_state(&arg) { min_state = ms; + } else if arg == "fastest" { + fastest = true; } else { for cap in arg.split(',') { if let Ok(capfcc) = FourCC::from_str(cap) { @@ -626,7 +645,10 @@ impl VeilidAPI { // Dump routing table entries let routing_table = self.network_manager()?.routing_table(); - Ok(routing_table.debug_info_entries(min_state, capabilities)) + Ok(match fastest { + true => routing_table.debug_info_entries_fastest(min_state, capabilities, 100000), + false => routing_table.debug_info_entries(min_state, capabilities), + }) } async fn debug_entry(&self, args: String) -> VeilidAPIResult { @@ -656,7 +678,8 @@ impl VeilidAPI { "debug_relay", "node_id", get_node_ref(routing_table), - )?; + ) + .ok(); let routing_domain = get_debug_argument_at( &args, @@ -716,7 +739,7 @@ impl VeilidAPI { // Dump connection table let connman = connection_manager.debug_print().await; - Ok(format!("{}\n\n{}\n\n{}\n\n", nodeinfo, peertable, connman)) + Ok(format!("{}\n{}\n{}\n", nodeinfo, peertable, connman)) } async fn debug_nodeid(&self, _args: String) -> VeilidAPIResult { @@ -1746,7 +1769,7 @@ impl VeilidAPI { if ts.as_u64() == 0 { return Ok("Failed to watch value".to_owned()); } - Ok(format!("Success: expiration={:?}", debug_ts(ts.as_u64()))) + Ok(format!("Success: expiration={:?}", display_ts(ts.as_u64()))) } async fn debug_record_cancel(&self, args: Vec) -> VeilidAPIResult { @@ -1986,12 +2009,12 @@ Routing: buckets [dead|reliable] - Display the routing table bucket statistics (default is only non-dead nodes) entries [dead|reliable] [] - Display the index of nodes in the routing table entry - Display all the details about a particular node in the routing table - contact [] - Explain what mechanism would be used to contact a particular node + contact [+][] - Explain what mechanism would be used to contact a particular node resolve - Search the network for a particular node or private route relay [public|local] - Change the relay in use for this node punish list - List all punishments this node has assigned to other nodes / networks clear - Clear all punishments from this node - route allocate [ord|*ord] [rel] [] [in|out] - Allocate a route + route allocate [] [rel] [] [in|out] - Allocate a route release - Release a route publish [full] - Publish a route 'blob' that can be imported on another machine unpublish - Mark a route as 'no longer published' @@ -2000,14 +2023,14 @@ Routing: import - Import a remote route blob generated by another node's 'publish' command. test - Test an allocated or imported remote route -Utilities: - config [insecure] [configkey [new value]] - Display or temporarily change the node config +Utilities: + config [insecure] [configkey [new value]] - Display or temporarily change the node config (most values should not be changed this way, careful!) txtrecord - Generate a TXT record for making this node into a bootstrap node capable of DNS bootstrap keypair [cryptokind] - Generate and display a random public/private keypair purge - Throw away the node's routing table, connections, or routes attach - Attach the node to the network if it is detached - detach - Detach the node from the network if it is attached + detach - Detach the node from the network if it is attached restart network - Restart the low level network RPC Operations: @@ -2016,7 +2039,7 @@ RPC Operations: appcall - Send a 'App Call' RPC question to a destination node and display the answer appreply [#id] - Reply to an 'App Call' RPC received by this node -DHT Operations: +DHT Operations: record list - display the dht records in the store purge [bytes] - clear all dht records optionally down to some total size create [ []] - create a new dht record @@ -2042,21 +2065,24 @@ TableDB Operations: * direct: [+][] * relay: @[+][] * private: #[+] + is: + * prefer_ordered: ord + * ensure_ordered: *ord is: - * unsafe: -[ord|*ord] - * safe: [route][,ord|*ord][,rel][,] + * unsafe: - + * safe: [route][,][,rel][,] is: [/][/][/] is: udp|tcp|ws|wss is: ipv4|ipv6 is: public|local is: VLD0 - is: - * a single-quoted json dht schema, or + is: + * a single-quoted json dht schema, or * an integer number for a DFLT schema subkey count. default is '{"kind":"DFLT","o_cnt":1}' is: local, syncget, syncset, updateget, updateset is: a number: 2 - is: + is: * a number: 2 * a comma-separated inclusive range list: 1..=3,5..=8 is: @@ -2243,3 +2269,29 @@ TableDB Operations: Ok((key, rc)) } } + +const DEFAULT_INDENT: usize = 4; +pub fn indent_string(s: &S) -> String { + indent_by(DEFAULT_INDENT, s.to_string()) +} +pub fn indent_all_string(s: &S) -> String { + indent_all_by(DEFAULT_INDENT, s.to_string()) +} + +pub trait ToMultilineString { + fn to_multiline_string(&self) -> String; +} + +impl ToMultilineString for Vec +where + T: fmt::Display, +{ + fn to_multiline_string(&self) -> String { + let mut out = String::new(); + for x in self { + out += &x.to_string(); + out += "\n"; + } + out + } +} diff --git a/veilid-core/src/veilid_api/routing_context.rs b/veilid-core/src/veilid_api/routing_context.rs index 39303f33..7eee4269 100644 --- a/veilid-core/src/veilid_api/routing_context.rs +++ b/veilid-core/src/veilid_api/routing_context.rs @@ -377,7 +377,7 @@ impl RoutingContext { count: u32, ) -> VeilidAPIResult { event!(target: "veilid_api", Level::DEBUG, - "RoutingContext::watch_dht_values(self: {:?}, key: {:?}, subkeys: {:?}, expiration: {:?}, count: {:?})", self, key, subkeys, expiration, count); + "RoutingContext::watch_dht_values(self: {:?}, key: {:?}, subkeys: {:?}, expiration: {}, count: {})", self, key, subkeys, expiration, count); Crypto::validate_crypto_kind(key.kind)?; let storage_manager = self.api.storage_manager()?; diff --git a/veilid-core/src/veilid_api/tests/fixtures.rs b/veilid-core/src/veilid_api/tests/fixtures.rs index 853a0415..2b93e3ae 100644 --- a/veilid-core/src/veilid_api/tests/fixtures.rs +++ b/veilid-core/src/veilid_api/tests/fixtures.rs @@ -26,6 +26,21 @@ pub fn fix_transferstatsdownup() -> TransferStatsDownUp { } } +pub fn fix_answerstats() -> AnswerStats { + AnswerStats { + span: TimestampDuration::new_secs(10), + questions: 10, + answers: 8, + lost_answers: 0, + consecutive_answers_maximum: 1, + consecutive_answers_average: 2, + consecutive_answers_minimum: 3, + consecutive_lost_answers_maximum: 4, + consecutive_lost_answers_average: 5, + consecutive_lost_answers_minimum: 6, + } +} + pub fn fix_rpcstats() -> RPCStats { RPCStats { messages_sent: 1_000_000, @@ -36,6 +51,26 @@ pub fn fix_rpcstats() -> RPCStats { first_consecutive_seen_ts: Some(Timestamp::from(1685569111851)), recent_lost_answers: 5, failed_to_send: 3, + answer: fix_answerstats(), + } +} + +pub fn fix_statestats() -> StateStats { + StateStats { + span: TimestampDuration::new_secs(10), + reliable: TimestampDuration::new_secs(5), + unreliable: TimestampDuration::new_secs(5), + dead: TimestampDuration::new_secs(0), + punished: TimestampDuration::new_secs(0), + reason: StateReasonStats { + can_not_send: TimestampDuration::new_secs(1), + too_many_lost_answers: TimestampDuration::new_secs(2), + no_ping_response: TimestampDuration::new_secs(3), + failed_to_send: TimestampDuration::new_secs(4), + lost_answers: TimestampDuration::new_secs(5), + not_seen_consecutively: TimestampDuration::new_secs(6), + in_unreliable_ping_span: TimestampDuration::new_secs(7), + }, } } @@ -45,6 +80,7 @@ pub fn fix_peerstats() -> PeerStats { rpc_stats: fix_rpcstats(), latency: Some(fix_latencystats()), transfer: fix_transferstatsdownup(), + state: fix_statestats(), } } diff --git a/veilid-core/src/veilid_api/types/stats.rs b/veilid-core/src/veilid_api/types/stats.rs index c5d4ea55..ca856de4 100644 --- a/veilid-core/src/veilid_api/types/stats.rs +++ b/veilid-core/src/veilid_api/types/stats.rs @@ -1,22 +1,54 @@ use super::*; +/// Measurement of communications latency to this node over all RPC questions #[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] #[cfg_attr(target_arch = "wasm32", derive(Tsify))] pub struct LatencyStats { - pub fastest: TimestampDuration, // fastest latency in the ROLLING_LATENCIES_SIZE last latencies - pub average: TimestampDuration, // average latency over the ROLLING_LATENCIES_SIZE last latencies - pub slowest: TimestampDuration, // slowest latency in the ROLLING_LATENCIES_SIZE last latencies + /// fastest latency in the ROLLING_LATENCIES_SIZE last latencies + pub fastest: TimestampDuration, + /// average latency over the ROLLING_LATENCIES_SIZE last latencies + pub average: TimestampDuration, + /// slowest latency in the ROLLING_LATENCIES_SIZE last latencies + pub slowest: TimestampDuration, } +impl fmt::Display for LatencyStats { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "{} slow / {} avg / {} fast", + self.slowest, self.average, self.fastest + )?; + Ok(()) + } +} + +/// Measurement of how much data has transferred to or from this node over a time span #[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] #[cfg_attr(target_arch = "wasm32", derive(Tsify))] pub struct TransferStats { - pub total: ByteCount, // total amount transferred ever - pub maximum: ByteCount, // maximum rate over the ROLLING_TRANSFERS_SIZE last amounts - pub average: ByteCount, // average rate over the ROLLING_TRANSFERS_SIZE last amounts - pub minimum: ByteCount, // minimum rate over the ROLLING_TRANSFERS_SIZE last amounts + /// total amount transferred ever + pub total: ByteCount, + /// maximum rate over the ROLLING_TRANSFERS_SIZE last amounts + pub maximum: ByteCount, + /// average rate over the ROLLING_TRANSFERS_SIZE last amounts + pub average: ByteCount, + /// minimum rate over the ROLLING_TRANSFERS_SIZE last amounts + pub minimum: ByteCount, } +impl fmt::Display for TransferStats { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "{} min / {} avg / {} max / {} total", + self.minimum, self.average, self.maximum, self.total + )?; + Ok(()) + } +} + +/// Transfer statistics from a node to our own (down) and #[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] #[cfg_attr(target_arch = "wasm32", derive(Tsify))] pub struct TransferStatsDownUp { @@ -24,24 +56,243 @@ pub struct TransferStatsDownUp { pub up: TransferStats, } +impl fmt::Display for TransferStatsDownUp { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "down: {}", self.down)?; + writeln!(f, "up: {}", self.up)?; + Ok(()) + } +} + +/// Measurement of what states the node has been in over a time span +#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] +#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +pub struct StateStats { + /// total amount of time measured + pub span: TimestampDuration, + /// amount of time spent in a reliable state + pub reliable: TimestampDuration, + /// amount of time spent in an unreliable state + pub unreliable: TimestampDuration, + /// amount of time spent in a dead state + pub dead: TimestampDuration, + /// amount of time spent in a punished state + pub punished: TimestampDuration, + /// state reason stats for this peer + #[serde(default)] + pub reason: StateReasonStats, +} + +impl fmt::Display for StateStats { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "span: {}", self.span)?; + writeln!(f, "reliable: {}", self.reliable)?; + writeln!(f, "unreliable: {}", self.unreliable)?; + writeln!(f, "dead: {}", self.dead)?; + writeln!(f, "punished: {}", self.punished)?; + write!(f, "reason:\n{}", indent_all_string(&self.reason))?; + Ok(()) + } +} + +/// Measurement of what state reasons the node has been in over a time span +#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] +#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +pub struct StateReasonStats { + /// time spent dead due to being unable to send + pub can_not_send: TimestampDuration, + /// time spent dead because of too many lost answers + pub too_many_lost_answers: TimestampDuration, + /// time spent dead because of no ping response + pub no_ping_response: TimestampDuration, + /// time spent unreliable because of failures to send + pub failed_to_send: TimestampDuration, + /// time spent unreliable because of lost answers + pub lost_answers: TimestampDuration, + /// time spent unreliable because of not being seen consecutively + pub not_seen_consecutively: TimestampDuration, + /// time spent unreliable because we are in the unreliable ping span + pub in_unreliable_ping_span: TimestampDuration, +} + +impl fmt::Display for StateReasonStats { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "(dead) can_not_send: {}", self.can_not_send)?; + writeln!(f, "(dead) lost_answers: {}", self.too_many_lost_answers)?; + writeln!(f, "(dead) no_ping_response: {}", self.no_ping_response)?; + writeln!(f, "(urel) failed_to_send: {}", self.failed_to_send)?; + writeln!(f, "(urel) lost_answers: {}", self.lost_answers)?; + writeln!( + f, + "(urel) not_consecutive: {}", + self.not_seen_consecutively + )?; + writeln!( + f, + "(urel) unreliable_ping: {}", + self.in_unreliable_ping_span + )?; + writeln!(f, "(urel) can_not_send: {}", self.can_not_send)?; + Ok(()) + } +} + +/// Measurement of round-trip RPC question/answer performance +#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] +#[cfg_attr(target_arch = "wasm32", derive(Tsify))] +pub struct AnswerStats { + /// total amount of time measured + pub span: TimestampDuration, + /// number of questions sent in this span + pub questions: u32, + /// number of answers received in this span + pub answers: u32, + /// number of lost answers in this span + pub lost_answers: u32, + /// maximum number of received answers before a lost answer in this span + pub consecutive_answers_maximum: u32, + /// average number of received answers before a lost answer in this span + pub consecutive_answers_average: u32, + /// minimum number of received answers before a lost answer in this span + pub consecutive_answers_minimum: u32, + /// maximum number of timeouts before a received answer in this span + pub consecutive_lost_answers_maximum: u32, + /// average number of timeouts before a received answer in this span + pub consecutive_lost_answers_average: u32, + /// minimum number of timeouts before a received answer in this span + pub consecutive_lost_answers_minimum: u32, +} + +impl fmt::Display for AnswerStats { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "span: {}", self.span)?; + writeln!( + f, + "questions/answers/lost: {} / {} / {}", + self.questions, self.answers, self.lost_answers + )?; + writeln!( + f, + "consecutive answers min/avg/max: {} / {} / {}", + self.consecutive_answers_minimum, + self.consecutive_answers_average, + self.consecutive_answers_maximum + )?; + writeln!( + f, + "consecutive lost min/avg/max: {} / {} / {}", + self.consecutive_lost_answers_minimum, + self.consecutive_lost_answers_average, + self.consecutive_lost_answers_maximum + )?; + + Ok(()) + } +} + +/// Statistics for RPC operations performed on a node #[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] #[cfg_attr(target_arch = "wasm32", derive(Tsify))] pub struct RPCStats { - pub messages_sent: u32, // number of rpcs that have been sent in the total_time range - pub messages_rcvd: u32, // number of rpcs that have been received in the total_time range - pub questions_in_flight: u32, // number of questions issued that have yet to be answered - pub last_question_ts: Option, // when the peer was last questioned (either successfully or not) and we wanted an answer - pub last_seen_ts: Option, // when the peer was last seen for any reason, including when we first attempted to reach out to it - pub first_consecutive_seen_ts: Option, // the timestamp of the first consecutive proof-of-life for this node (an answer or received question) - pub recent_lost_answers: u32, // number of answers that have been lost since we lost reliability - pub failed_to_send: u32, // number of messages that have failed to send or connections dropped since we last successfully sent one + /// number of rpcs that have been sent in the total entry time range + pub messages_sent: u32, + /// number of rpcs that have been received in the total entry time range + pub messages_rcvd: u32, + /// number of questions issued that have yet to be answered + pub questions_in_flight: u32, + /// when the peer was last questioned (either successfully or not) and we wanted an answer + pub last_question_ts: Option, + /// when the peer was last seen for any reason, including when we first attempted to reach out to it + pub last_seen_ts: Option, + /// the timestamp of the first consecutive proof-of-life for this node (an answer or received question) + pub first_consecutive_seen_ts: Option, + /// number of answers that have been lost consecutively + pub recent_lost_answers: u32, + /// number of messages that have failed to send or connections dropped since we last successfully sent one + pub failed_to_send: u32, + /// rpc answer stats for this peer + #[serde(default)] + pub answer: AnswerStats, } +impl fmt::Display for RPCStats { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!( + f, + "# sent/received/in-flight: {} / {} / {}", + self.messages_sent, self.messages_rcvd, self.questions_in_flight + )?; + writeln!( + f, + "# recently-lost/failed-to-send: {} / {}", + self.recent_lost_answers, self.failed_to_send + )?; + writeln!( + f, + "last_question: {}", + if let Some(ts) = &self.last_question_ts { + ts.to_string() + } else { + "None".to_owned() + } + )?; + writeln!( + f, + "last_seen: {}", + if let Some(ts) = &self.last_seen_ts { + ts.to_string() + } else { + "None".to_owned() + } + )?; + writeln!( + f, + "first_consecutive: {}", + if let Some(ts) = &self.first_consecutive_seen_ts { + ts.to_string() + } else { + "None".to_owned() + } + )?; + + write!(f, "answers:\n{}", indent_all_string(&self.answer))?; + + Ok(()) + } +} + +/// Statistics for a peer in the routing table #[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] #[cfg_attr(target_arch = "wasm32", derive(Tsify))] pub struct PeerStats { - pub time_added: Timestamp, // when the peer was added to the routing table - pub rpc_stats: RPCStats, // information about RPCs - pub latency: Option, // latencies for communications with the peer - pub transfer: TransferStatsDownUp, // Stats for communications with the peer + /// when the peer was added to the routing table + pub time_added: Timestamp, + #[serde(default)] + /// information about RPCs + pub rpc_stats: RPCStats, + #[serde(default)] + /// latency stats for this peer + pub latency: Option, + /// transfer stats for this peer + #[serde(default)] + pub transfer: TransferStatsDownUp, + /// state stats for this peer + #[serde(default)] + pub state: StateStats, +} + +impl fmt::Display for PeerStats { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "time_added: {}", self.time_added)?; + write!(f, "rpc_stats:\n{}", indent_all_string(&self.rpc_stats))?; + if let Some(ls) = &self.latency { + writeln!(f, "latency: {}", ls)?; + } else { + writeln!(f, "latency: None")?; + } + write!(f, "transfer:\n{}", indent_all_string(&self.transfer))?; + write!(f, "state:\n{}", indent_all_string(&self.state))?; + + Ok(()) + } } diff --git a/veilid-core/src/veilid_api/types/timestamp.rs b/veilid-core/src/veilid_api/types/timestamp.rs index 59d24b0e..61df8b7b 100644 --- a/veilid-core/src/veilid_api/types/timestamp.rs +++ b/veilid-core/src/veilid_api/types/timestamp.rs @@ -2,11 +2,11 @@ use super::*; aligned_u64_type!(Timestamp); -aligned_u64_type_default_display_impl!(Timestamp); +aligned_u64_type_default_debug_impl!(Timestamp); -impl fmt::Debug for Timestamp { +impl fmt::Display for Timestamp { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", debug_ts(self.as_u64())) + write!(f, "{}", display_ts(self.as_u64())) } } diff --git a/veilid-core/src/veilid_api/types/timestamp_duration.rs b/veilid-core/src/veilid_api/types/timestamp_duration.rs index cf6834b1..bc7be0c2 100644 --- a/veilid-core/src/veilid_api/types/timestamp_duration.rs +++ b/veilid-core/src/veilid_api/types/timestamp_duration.rs @@ -2,20 +2,20 @@ use super::*; aligned_u64_type!(TimestampDuration); -aligned_u64_type_default_display_impl!(TimestampDuration); +aligned_u64_type_default_debug_impl!(TimestampDuration); aligned_u64_type_default_math_impl!(TimestampDuration); -impl fmt::Debug for TimestampDuration { +impl fmt::Display for TimestampDuration { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", debug_duration(self.as_u64())) + write!(f, "{}", display_duration(self.as_u64())) } } impl TimestampDuration { - pub fn new_secs(secs: N) -> Self { - TimestampDuration::new(secs.to_u64().unwrap() * 1_000_000u64) + pub const fn new_secs(secs: u32) -> Self { + TimestampDuration::new(secs as u64 * 1_000_000u64) } - pub fn new_ms(ms: N) -> Self { - TimestampDuration::new(ms.to_u64().unwrap() * 1_000u64) + pub const fn new_ms(ms: u64) -> Self { + TimestampDuration::new(ms * 1_000u64) } } diff --git a/veilid-core/src/veilid_config.rs b/veilid-core/src/veilid_config.rs index db26b6fb..9eecae67 100644 --- a/veilid-core/src/veilid_config.rs +++ b/veilid-core/src/veilid_config.rs @@ -293,31 +293,27 @@ pub struct VeilidConfigTLS { impl Default for VeilidConfigTLS { fn default() -> Self { - let certificate_path = get_default_ssl_directory("certs/server.crt"); - let private_key_path = get_default_ssl_directory("keys/server.key"); Self { - certificate_path, - private_key_path, + certificate_path: "".to_string(), + private_key_path: "".to_string(), connection_initial_timeout_ms: 2000, } } } #[cfg_attr(target_arch = "wasm32", allow(unused_variables))] -pub fn get_default_ssl_directory(sub_path: &str) -> String { +pub fn get_default_ssl_directory( + program_name: &str, + organization: &str, + qualifier: &str, + sub_path: &str, +) -> String { cfg_if::cfg_if! { if #[cfg(target_arch = "wasm32")] { "".to_owned() } else { use std::path::PathBuf; - #[cfg(unix)] - { - let default_path = PathBuf::from("/etc/veilid-server/ssl").join(sub_path); - if default_path.exists() { - return default_path.to_string_lossy().into(); - } - } - ProjectDirs::from("org", "Veilid", "Veilid") + ProjectDirs::from(qualifier, organization, program_name) .map(|dirs| dirs.data_local_dir().join("ssl").join(sub_path)) .unwrap_or_else(|| PathBuf::from("./ssl").join(sub_path)) .to_string_lossy() @@ -535,37 +531,26 @@ impl Default for VeilidConfigNetwork { } } -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] +#[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] #[cfg_attr(target_arch = "wasm32", derive(Tsify))] pub struct VeilidConfigTableStore { pub directory: String, pub delete: bool, } -impl Default for VeilidConfigTableStore { - fn default() -> Self { - Self { - directory: get_default_store_path("table_store"), - delete: false, - } - } -} - #[cfg_attr(target_arch = "wasm32", allow(unused_variables))] -fn get_default_store_path(store_type: &str) -> String { +fn get_default_store_path( + program_name: &str, + organization: &str, + qualifier: &str, + store_type: &str, +) -> String { cfg_if::cfg_if! { if #[cfg(target_arch = "wasm32")] { "".to_owned() } else { use std::path::PathBuf; - #[cfg(unix)] - { - let globalpath = PathBuf::from(format!("/var/db/veilid-server/{}", store_type)); - if globalpath.exists() { - return globalpath.to_string_lossy().into(); - } - } - ProjectDirs::from("org", "Veilid", "Veilid") + ProjectDirs::from(qualifier, organization, program_name) .map(|dirs| dirs.data_local_dir().to_path_buf()) .unwrap_or_else(|| PathBuf::from("./")) .join(store_type) @@ -585,7 +570,7 @@ pub struct VeilidConfigBlockStore { impl Default for VeilidConfigBlockStore { fn default() -> Self { Self { - directory: get_default_store_path("block_store"), + directory: "".to_string(), delete: false, } } @@ -608,7 +593,7 @@ impl Default for VeilidConfigProtectedStore { Self { allow_insecure_fallback: false, always_use_insecure_storage: false, - directory: get_default_store_path("protected_store"), + directory: "".to_string(), delete: false, device_encryption_key_password: "".to_owned(), new_device_encryption_key_password: None, @@ -744,6 +729,81 @@ pub struct VeilidConfigInner { pub network: VeilidConfigNetwork, } +impl VeilidConfigInner { + /// Create a new 'VeilidConfigInner' for use with `setup_from_config` + /// Should match the application bundle name if used elsewhere in the format: + /// `qualifier.organization.program_name` - for example `org.veilid.veilidchat` + /// + /// The 'bundle name' will be used when choosing the default storage location for the + /// application in a platform-dependent fashion, unless 'storage_directory' is + /// specified to override this location + /// + /// * `program_name` - Pick a program name and do not change it from release to release, + /// see `VeilidConfigInner::program_name` for details. + /// * `organization_name` - Similar to program_name, but for the organization publishing this app + /// * `qualifier` - Suffix for the application bundle name + /// * `storage_directory` - Override for the path where veilid-core stores its content + /// such as the table store, protected store, and block store + /// * `config_directory` - Override for the path where veilid-core can retrieve extra configuration files + /// such as certificates and keys + pub fn new( + program_name: &str, + organization: &str, + qualifier: &str, + storage_directory: Option<&str>, + config_directory: Option<&str>, + ) -> Self { + let mut out = Self { + program_name: program_name.to_owned(), + ..Default::default() + }; + + if let Some(storage_directory) = storage_directory { + out.protected_store.directory = (std::path::PathBuf::from(storage_directory) + .join("protected_store")) + .to_string_lossy() + .to_string(); + out.table_store.directory = (std::path::PathBuf::from(storage_directory) + .join("table_store")) + .to_string_lossy() + .to_string(); + out.block_store.directory = (std::path::PathBuf::from(storage_directory) + .join("block_store")) + .to_string_lossy() + .to_string(); + } else { + out.protected_store.directory = + get_default_store_path(program_name, organization, qualifier, "protected_store"); + out.table_store.directory = + get_default_store_path(program_name, organization, qualifier, "table_store"); + out.block_store.directory = + get_default_store_path(program_name, organization, qualifier, "block_store"); + } + + if let Some(config_directory) = config_directory { + out.network.tls.certificate_path = (std::path::PathBuf::from(config_directory) + .join("ssl/certs/server.crt")) + .to_string_lossy() + .to_string(); + out.network.tls.private_key_path = (std::path::PathBuf::from(config_directory) + .join("ssl/keys/server.key")) + .to_string_lossy() + .to_string(); + } else { + out.network.tls.certificate_path = get_default_ssl_directory( + program_name, + organization, + qualifier, + "certs/server.crt", + ); + out.network.tls.private_key_path = + get_default_ssl_directory(program_name, organization, qualifier, "keys/server.key"); + } + + out + } +} + /// The configuration built for each Veilid node during API startup #[derive(Clone)] pub struct VeilidConfig { diff --git a/veilid-flutter/example/pubspec.lock b/veilid-flutter/example/pubspec.lock index 76c2c073..30e5f619 100644 --- a/veilid-flutter/example/pubspec.lock +++ b/veilid-flutter/example/pubspec.lock @@ -283,10 +283,10 @@ packages: dependency: transitive description: name: path_provider_android - sha256: "6f01f8e37ec30b07bc424b4deabac37cacb1bc7e2e515ad74486039918a37eb7" + sha256: c464428172cb986b758c6d1724c603097febb8fb855aa265aeecc9280c294d4a url: "https://pub.dev" source: hosted - version: "2.2.10" + version: "2.2.12" path_provider_foundation: dependency: transitive description: @@ -478,10 +478,10 @@ packages: dependency: transitive description: name: xdg_directories - sha256: faea9dee56b520b55a566385b84f2e8de55e7496104adada9962e0bd11bcff1d + sha256: "7a3f37b05d989967cdddcbb571f1ea834867ae2faa29725fd085180e0883aa15" url: "https://pub.dev" source: hosted - version: "1.0.4" + version: "1.1.0" xterm: dependency: "direct main" description: @@ -499,5 +499,5 @@ packages: source: hosted version: "0.0.6" sdks: - dart: ">=3.4.0 <4.0.0" - flutter: ">=3.22.0" + dart: ">=3.5.0 <4.0.0" + flutter: ">=3.24.0" diff --git a/veilid-flutter/lib/routing_context.freezed.dart b/veilid-flutter/lib/routing_context.freezed.dart index 529a9315..b7df1089 100644 --- a/veilid-flutter/lib/routing_context.freezed.dart +++ b/veilid-flutter/lib/routing_context.freezed.dart @@ -68,8 +68,13 @@ mixin _$DHTSchema { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this DHTSchema to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of DHTSchema + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $DHTSchemaCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -92,6 +97,8 @@ class _$DHTSchemaCopyWithImpl<$Res, $Val extends DHTSchema> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of DHTSchema + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -125,6 +132,8 @@ class __$$DHTSchemaDFLTImplCopyWithImpl<$Res> _$DHTSchemaDFLTImpl _value, $Res Function(_$DHTSchemaDFLTImpl) _then) : super(_value, _then); + /// Create a copy of DHTSchema + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -167,11 +176,13 @@ class _$DHTSchemaDFLTImpl implements DHTSchemaDFLT { (identical(other.oCnt, oCnt) || other.oCnt == oCnt)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, oCnt); - @JsonKey(ignore: true) + /// Create a copy of DHTSchema + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$DHTSchemaDFLTImplCopyWith<_$DHTSchemaDFLTImpl> get copyWith => @@ -255,8 +266,11 @@ abstract class DHTSchemaDFLT implements DHTSchema { @override int get oCnt; + + /// Create a copy of DHTSchema + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$DHTSchemaDFLTImplCopyWith<_$DHTSchemaDFLTImpl> get copyWith => throw _privateConstructorUsedError; } @@ -280,6 +294,8 @@ class __$$DHTSchemaSMPLImplCopyWithImpl<$Res> _$DHTSchemaSMPLImpl _value, $Res Function(_$DHTSchemaSMPLImpl) _then) : super(_value, _then); + /// Create a copy of DHTSchema + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -339,12 +355,14 @@ class _$DHTSchemaSMPLImpl implements DHTSchemaSMPL { const DeepCollectionEquality().equals(other._members, _members)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, oCnt, const DeepCollectionEquality().hash(_members)); - @JsonKey(ignore: true) + /// Create a copy of DHTSchema + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$DHTSchemaSMPLImplCopyWith<_$DHTSchemaSMPLImpl> get copyWith => @@ -431,8 +449,11 @@ abstract class DHTSchemaSMPL implements DHTSchema { @override int get oCnt; List get members; + + /// Create a copy of DHTSchema + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$DHTSchemaSMPLImplCopyWith<_$DHTSchemaSMPLImpl> get copyWith => throw _privateConstructorUsedError; } @@ -446,8 +467,12 @@ mixin _$DHTSchemaMember { FixedEncodedString43 get mKey => throw _privateConstructorUsedError; int get mCnt => throw _privateConstructorUsedError; + /// Serializes this DHTSchemaMember to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of DHTSchemaMember + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $DHTSchemaMemberCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -471,6 +496,8 @@ class _$DHTSchemaMemberCopyWithImpl<$Res, $Val extends DHTSchemaMember> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of DHTSchemaMember + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -509,6 +536,8 @@ class __$$DHTSchemaMemberImplCopyWithImpl<$Res> _$DHTSchemaMemberImpl _value, $Res Function(_$DHTSchemaMemberImpl) _then) : super(_value, _then); + /// Create a copy of DHTSchemaMember + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -556,11 +585,13 @@ class _$DHTSchemaMemberImpl implements _DHTSchemaMember { (identical(other.mCnt, mCnt) || other.mCnt == mCnt)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, mKey, mCnt); - @JsonKey(ignore: true) + /// Create a copy of DHTSchemaMember + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$DHTSchemaMemberImplCopyWith<_$DHTSchemaMemberImpl> get copyWith => @@ -587,8 +618,11 @@ abstract class _DHTSchemaMember implements DHTSchemaMember { FixedEncodedString43 get mKey; @override int get mCnt; + + /// Create a copy of DHTSchemaMember + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$DHTSchemaMemberImplCopyWith<_$DHTSchemaMemberImpl> get copyWith => throw _privateConstructorUsedError; } @@ -604,8 +638,12 @@ mixin _$DHTRecordDescriptor { DHTSchema get schema => throw _privateConstructorUsedError; FixedEncodedString43? get ownerSecret => throw _privateConstructorUsedError; + /// Serializes this DHTRecordDescriptor to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of DHTRecordDescriptor + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $DHTRecordDescriptorCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -635,6 +673,8 @@ class _$DHTRecordDescriptorCopyWithImpl<$Res, $Val extends DHTRecordDescriptor> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of DHTRecordDescriptor + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -663,6 +703,8 @@ class _$DHTRecordDescriptorCopyWithImpl<$Res, $Val extends DHTRecordDescriptor> ) as $Val); } + /// Create a copy of DHTRecordDescriptor + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $DHTSchemaCopyWith<$Res> get schema { @@ -698,6 +740,8 @@ class __$$DHTRecordDescriptorImplCopyWithImpl<$Res> $Res Function(_$DHTRecordDescriptorImpl) _then) : super(_value, _then); + /// Create a copy of DHTRecordDescriptor + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -765,11 +809,13 @@ class _$DHTRecordDescriptorImpl implements _DHTRecordDescriptor { other.ownerSecret == ownerSecret)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, key, owner, schema, ownerSecret); - @JsonKey(ignore: true) + /// Create a copy of DHTRecordDescriptor + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$DHTRecordDescriptorImplCopyWith<_$DHTRecordDescriptorImpl> get copyWith => @@ -802,8 +848,11 @@ abstract class _DHTRecordDescriptor implements DHTRecordDescriptor { DHTSchema get schema; @override FixedEncodedString43? get ownerSecret; + + /// Create a copy of DHTRecordDescriptor + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$DHTRecordDescriptorImplCopyWith<_$DHTRecordDescriptorImpl> get copyWith => throw _privateConstructorUsedError; } @@ -819,8 +868,12 @@ mixin _$ValueData { Uint8List get data => throw _privateConstructorUsedError; FixedEncodedString43 get writer => throw _privateConstructorUsedError; + /// Serializes this ValueData to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of ValueData + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $ValueDataCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -846,6 +899,8 @@ class _$ValueDataCopyWithImpl<$Res, $Val extends ValueData> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of ValueData + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -892,6 +947,8 @@ class __$$ValueDataImplCopyWithImpl<$Res> _$ValueDataImpl _value, $Res Function(_$ValueDataImpl) _then) : super(_value, _then); + /// Create a copy of ValueData + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -951,12 +1008,14 @@ class _$ValueDataImpl implements _ValueData { (identical(other.writer, writer) || other.writer == writer)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, seq, const DeepCollectionEquality().hash(data), writer); - @JsonKey(ignore: true) + /// Create a copy of ValueData + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$ValueDataImplCopyWith<_$ValueDataImpl> get copyWith => @@ -986,8 +1045,11 @@ abstract class _ValueData implements ValueData { Uint8List get data; @override FixedEncodedString43 get writer; + + /// Create a copy of ValueData + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$ValueDataImplCopyWith<_$ValueDataImpl> get copyWith => throw _privateConstructorUsedError; } @@ -1003,8 +1065,12 @@ mixin _$SafetySpec { Sequencing get sequencing => throw _privateConstructorUsedError; String? get preferredRoute => throw _privateConstructorUsedError; + /// Serializes this SafetySpec to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of SafetySpec + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $SafetySpecCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -1032,6 +1098,8 @@ class _$SafetySpecCopyWithImpl<$Res, $Val extends SafetySpec> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of SafetySpec + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -1084,6 +1152,8 @@ class __$$SafetySpecImplCopyWithImpl<$Res> _$SafetySpecImpl _value, $Res Function(_$SafetySpecImpl) _then) : super(_value, _then); + /// Create a copy of SafetySpec + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -1154,12 +1224,14 @@ class _$SafetySpecImpl implements _SafetySpec { other.preferredRoute == preferredRoute)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, hopCount, stability, sequencing, preferredRoute); - @JsonKey(ignore: true) + /// Create a copy of SafetySpec + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$SafetySpecImplCopyWith<_$SafetySpecImpl> get copyWith => @@ -1191,8 +1263,11 @@ abstract class _SafetySpec implements SafetySpec { Sequencing get sequencing; @override String? get preferredRoute; + + /// Create a copy of SafetySpec + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$SafetySpecImplCopyWith<_$SafetySpecImpl> get copyWith => throw _privateConstructorUsedError; } @@ -1207,8 +1282,12 @@ mixin _$RouteBlob { @Uint8ListJsonConverter() Uint8List get blob => throw _privateConstructorUsedError; + /// Serializes this RouteBlob to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RouteBlob + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RouteBlobCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -1231,6 +1310,8 @@ class _$RouteBlobCopyWithImpl<$Res, $Val extends RouteBlob> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RouteBlob + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -1269,6 +1350,8 @@ class __$$RouteBlobImplCopyWithImpl<$Res> _$RouteBlobImpl _value, $Res Function(_$RouteBlobImpl) _then) : super(_value, _then); + /// Create a copy of RouteBlob + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -1317,12 +1400,14 @@ class _$RouteBlobImpl implements _RouteBlob { const DeepCollectionEquality().equals(other.blob, blob)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, routeId, const DeepCollectionEquality().hash(blob)); - @JsonKey(ignore: true) + /// Create a copy of RouteBlob + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RouteBlobImplCopyWith<_$RouteBlobImpl> get copyWith => @@ -1350,8 +1435,11 @@ abstract class _RouteBlob implements RouteBlob { @override @Uint8ListJsonConverter() Uint8List get blob; + + /// Create a copy of RouteBlob + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$RouteBlobImplCopyWith<_$RouteBlobImpl> get copyWith => throw _privateConstructorUsedError; } @@ -1368,8 +1456,12 @@ mixin _$DHTRecordReport { List get localSeqs => throw _privateConstructorUsedError; List get networkSeqs => throw _privateConstructorUsedError; + /// Serializes this DHTRecordReport to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of DHTRecordReport + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $DHTRecordReportCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -1397,6 +1489,8 @@ class _$DHTRecordReportCopyWithImpl<$Res, $Val extends DHTRecordReport> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of DHTRecordReport + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -1449,6 +1543,8 @@ class __$$DHTRecordReportImplCopyWithImpl<$Res> _$DHTRecordReportImpl _value, $Res Function(_$DHTRecordReportImpl) _then) : super(_value, _then); + /// Create a copy of DHTRecordReport + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -1545,7 +1641,7 @@ class _$DHTRecordReportImpl implements _DHTRecordReport { .equals(other._networkSeqs, _networkSeqs)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -1554,7 +1650,9 @@ class _$DHTRecordReportImpl implements _DHTRecordReport { const DeepCollectionEquality().hash(_localSeqs), const DeepCollectionEquality().hash(_networkSeqs)); - @JsonKey(ignore: true) + /// Create a copy of DHTRecordReport + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$DHTRecordReportImplCopyWith<_$DHTRecordReportImpl> get copyWith => @@ -1587,8 +1685,11 @@ abstract class _DHTRecordReport implements DHTRecordReport { List get localSeqs; @override List get networkSeqs; + + /// Create a copy of DHTRecordReport + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$DHTRecordReportImplCopyWith<_$DHTRecordReportImpl> get copyWith => throw _privateConstructorUsedError; } diff --git a/veilid-flutter/lib/veilid_config.freezed.dart b/veilid-flutter/lib/veilid_config.freezed.dart index ed022547..efab2499 100644 --- a/veilid-flutter/lib/veilid_config.freezed.dart +++ b/veilid-flutter/lib/veilid_config.freezed.dart @@ -25,8 +25,12 @@ mixin _$VeilidFFIConfigLoggingTerminal { VeilidConfigLogLevel get level => throw _privateConstructorUsedError; List get ignoreLogTargets => throw _privateConstructorUsedError; + /// Serializes this VeilidFFIConfigLoggingTerminal to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VeilidFFIConfigLoggingTerminal + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VeilidFFIConfigLoggingTerminalCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -56,6 +60,8 @@ class _$VeilidFFIConfigLoggingTerminalCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VeilidFFIConfigLoggingTerminal + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -105,6 +111,8 @@ class __$$VeilidFFIConfigLoggingTerminalImplCopyWithImpl<$Res> $Res Function(_$VeilidFFIConfigLoggingTerminalImpl) _then) : super(_value, _then); + /// Create a copy of VeilidFFIConfigLoggingTerminal + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -184,12 +192,14 @@ class _$VeilidFFIConfigLoggingTerminalImpl .equals(other._ignoreLogTargets, _ignoreLogTargets)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, enabled, level, const DeepCollectionEquality().hash(_ignoreLogTargets)); - @JsonKey(ignore: true) + /// Create a copy of VeilidFFIConfigLoggingTerminal + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VeilidFFIConfigLoggingTerminalImplCopyWith< @@ -222,8 +232,11 @@ abstract class _VeilidFFIConfigLoggingTerminal VeilidConfigLogLevel get level; @override List get ignoreLogTargets; + + /// Create a copy of VeilidFFIConfigLoggingTerminal + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VeilidFFIConfigLoggingTerminalImplCopyWith< _$VeilidFFIConfigLoggingTerminalImpl> get copyWith => throw _privateConstructorUsedError; @@ -242,8 +255,12 @@ mixin _$VeilidFFIConfigLoggingOtlp { String get serviceName => throw _privateConstructorUsedError; List get ignoreLogTargets => throw _privateConstructorUsedError; + /// Serializes this VeilidFFIConfigLoggingOtlp to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VeilidFFIConfigLoggingOtlp + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VeilidFFIConfigLoggingOtlpCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -274,6 +291,8 @@ class _$VeilidFFIConfigLoggingOtlpCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VeilidFFIConfigLoggingOtlp + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -335,6 +354,8 @@ class __$$VeilidFFIConfigLoggingOtlpImplCopyWithImpl<$Res> $Res Function(_$VeilidFFIConfigLoggingOtlpImpl) _then) : super(_value, _then); + /// Create a copy of VeilidFFIConfigLoggingOtlp + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -436,12 +457,14 @@ class _$VeilidFFIConfigLoggingOtlpImpl .equals(other._ignoreLogTargets, _ignoreLogTargets)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, enabled, level, grpcEndpoint, serviceName, const DeepCollectionEquality().hash(_ignoreLogTargets)); - @JsonKey(ignore: true) + /// Create a copy of VeilidFFIConfigLoggingOtlp + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VeilidFFIConfigLoggingOtlpImplCopyWith<_$VeilidFFIConfigLoggingOtlpImpl> @@ -478,8 +501,11 @@ abstract class _VeilidFFIConfigLoggingOtlp String get serviceName; @override List get ignoreLogTargets; + + /// Create a copy of VeilidFFIConfigLoggingOtlp + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VeilidFFIConfigLoggingOtlpImplCopyWith<_$VeilidFFIConfigLoggingOtlpImpl> get copyWith => throw _privateConstructorUsedError; } @@ -495,8 +521,12 @@ mixin _$VeilidFFIConfigLoggingApi { VeilidConfigLogLevel get level => throw _privateConstructorUsedError; List get ignoreLogTargets => throw _privateConstructorUsedError; + /// Serializes this VeilidFFIConfigLoggingApi to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VeilidFFIConfigLoggingApi + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VeilidFFIConfigLoggingApiCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -524,6 +554,8 @@ class _$VeilidFFIConfigLoggingApiCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VeilidFFIConfigLoggingApi + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -573,6 +605,8 @@ class __$$VeilidFFIConfigLoggingApiImplCopyWithImpl<$Res> $Res Function(_$VeilidFFIConfigLoggingApiImpl) _then) : super(_value, _then); + /// Create a copy of VeilidFFIConfigLoggingApi + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -651,12 +685,14 @@ class _$VeilidFFIConfigLoggingApiImpl .equals(other._ignoreLogTargets, _ignoreLogTargets)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, enabled, level, const DeepCollectionEquality().hash(_ignoreLogTargets)); - @JsonKey(ignore: true) + /// Create a copy of VeilidFFIConfigLoggingApi + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VeilidFFIConfigLoggingApiImplCopyWith<_$VeilidFFIConfigLoggingApiImpl> @@ -686,8 +722,11 @@ abstract class _VeilidFFIConfigLoggingApi implements VeilidFFIConfigLoggingApi { VeilidConfigLogLevel get level; @override List get ignoreLogTargets; + + /// Create a copy of VeilidFFIConfigLoggingApi + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VeilidFFIConfigLoggingApiImplCopyWith<_$VeilidFFIConfigLoggingApiImpl> get copyWith => throw _privateConstructorUsedError; } @@ -702,8 +741,12 @@ mixin _$VeilidFFIConfigLoggingFlame { bool get enabled => throw _privateConstructorUsedError; String get path => throw _privateConstructorUsedError; + /// Serializes this VeilidFFIConfigLoggingFlame to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VeilidFFIConfigLoggingFlame + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VeilidFFIConfigLoggingFlameCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -730,6 +773,8 @@ class _$VeilidFFIConfigLoggingFlameCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VeilidFFIConfigLoggingFlame + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -771,6 +816,8 @@ class __$$VeilidFFIConfigLoggingFlameImplCopyWithImpl<$Res> $Res Function(_$VeilidFFIConfigLoggingFlameImpl) _then) : super(_value, _then); + /// Create a copy of VeilidFFIConfigLoggingFlame + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -830,11 +877,13 @@ class _$VeilidFFIConfigLoggingFlameImpl (identical(other.path, path) || other.path == path)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, enabled, path); - @JsonKey(ignore: true) + /// Create a copy of VeilidFFIConfigLoggingFlame + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VeilidFFIConfigLoggingFlameImplCopyWith<_$VeilidFFIConfigLoggingFlameImpl> @@ -862,8 +911,11 @@ abstract class _VeilidFFIConfigLoggingFlame bool get enabled; @override String get path; + + /// Create a copy of VeilidFFIConfigLoggingFlame + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VeilidFFIConfigLoggingFlameImplCopyWith<_$VeilidFFIConfigLoggingFlameImpl> get copyWith => throw _privateConstructorUsedError; } @@ -881,8 +933,12 @@ mixin _$VeilidFFIConfigLogging { VeilidFFIConfigLoggingApi get api => throw _privateConstructorUsedError; VeilidFFIConfigLoggingFlame get flame => throw _privateConstructorUsedError; + /// Serializes this VeilidFFIConfigLogging to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VeilidFFIConfigLogging + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VeilidFFIConfigLoggingCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -916,6 +972,8 @@ class _$VeilidFFIConfigLoggingCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VeilidFFIConfigLogging + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -944,6 +1002,8 @@ class _$VeilidFFIConfigLoggingCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of VeilidFFIConfigLogging + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $VeilidFFIConfigLoggingTerminalCopyWith<$Res> get terminal { @@ -953,6 +1013,8 @@ class _$VeilidFFIConfigLoggingCopyWithImpl<$Res, }); } + /// Create a copy of VeilidFFIConfigLogging + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $VeilidFFIConfigLoggingOtlpCopyWith<$Res> get otlp { @@ -961,6 +1023,8 @@ class _$VeilidFFIConfigLoggingCopyWithImpl<$Res, }); } + /// Create a copy of VeilidFFIConfigLogging + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $VeilidFFIConfigLoggingApiCopyWith<$Res> get api { @@ -969,6 +1033,8 @@ class _$VeilidFFIConfigLoggingCopyWithImpl<$Res, }); } + /// Create a copy of VeilidFFIConfigLogging + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $VeilidFFIConfigLoggingFlameCopyWith<$Res> get flame { @@ -1013,6 +1079,8 @@ class __$$VeilidFFIConfigLoggingImplCopyWithImpl<$Res> $Res Function(_$VeilidFFIConfigLoggingImpl) _then) : super(_value, _then); + /// Create a copy of VeilidFFIConfigLogging + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -1093,11 +1161,13 @@ class _$VeilidFFIConfigLoggingImpl (identical(other.flame, flame) || other.flame == flame)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, terminal, otlp, api, flame); - @JsonKey(ignore: true) + /// Create a copy of VeilidFFIConfigLogging + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VeilidFFIConfigLoggingImplCopyWith<_$VeilidFFIConfigLoggingImpl> @@ -1131,8 +1201,11 @@ abstract class _VeilidFFIConfigLogging implements VeilidFFIConfigLogging { VeilidFFIConfigLoggingApi get api; @override VeilidFFIConfigLoggingFlame get flame; + + /// Create a copy of VeilidFFIConfigLogging + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VeilidFFIConfigLoggingImplCopyWith<_$VeilidFFIConfigLoggingImpl> get copyWith => throw _privateConstructorUsedError; } @@ -1145,8 +1218,12 @@ VeilidFFIConfig _$VeilidFFIConfigFromJson(Map json) { mixin _$VeilidFFIConfig { VeilidFFIConfigLogging get logging => throw _privateConstructorUsedError; + /// Serializes this VeilidFFIConfig to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VeilidFFIConfig + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VeilidFFIConfigCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -1172,6 +1249,8 @@ class _$VeilidFFIConfigCopyWithImpl<$Res, $Val extends VeilidFFIConfig> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VeilidFFIConfig + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -1185,6 +1264,8 @@ class _$VeilidFFIConfigCopyWithImpl<$Res, $Val extends VeilidFFIConfig> ) as $Val); } + /// Create a copy of VeilidFFIConfig + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $VeilidFFIConfigLoggingCopyWith<$Res> get logging { @@ -1216,6 +1297,8 @@ class __$$VeilidFFIConfigImplCopyWithImpl<$Res> _$VeilidFFIConfigImpl _value, $Res Function(_$VeilidFFIConfigImpl) _then) : super(_value, _then); + /// Create a copy of VeilidFFIConfig + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -1264,11 +1347,13 @@ class _$VeilidFFIConfigImpl (identical(other.logging, logging) || other.logging == logging)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, logging); - @JsonKey(ignore: true) + /// Create a copy of VeilidFFIConfig + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VeilidFFIConfigImplCopyWith<_$VeilidFFIConfigImpl> get copyWith => @@ -1292,8 +1377,11 @@ abstract class _VeilidFFIConfig implements VeilidFFIConfig { @override VeilidFFIConfigLogging get logging; + + /// Create a copy of VeilidFFIConfig + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VeilidFFIConfigImplCopyWith<_$VeilidFFIConfigImpl> get copyWith => throw _privateConstructorUsedError; } @@ -1311,8 +1399,12 @@ mixin _$VeilidWASMConfigLoggingPerformance { bool get logsInConsole => throw _privateConstructorUsedError; List get ignoreLogTargets => throw _privateConstructorUsedError; + /// Serializes this VeilidWASMConfigLoggingPerformance to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VeilidWASMConfigLoggingPerformance + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VeilidWASMConfigLoggingPerformanceCopyWith< VeilidWASMConfigLoggingPerformance> get copyWith => throw _privateConstructorUsedError; @@ -1345,6 +1437,8 @@ class _$VeilidWASMConfigLoggingPerformanceCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VeilidWASMConfigLoggingPerformance + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -1406,6 +1500,8 @@ class __$$VeilidWASMConfigLoggingPerformanceImplCopyWithImpl<$Res> $Res Function(_$VeilidWASMConfigLoggingPerformanceImpl) _then) : super(_value, _then); + /// Create a copy of VeilidWASMConfigLoggingPerformance + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -1507,12 +1603,14 @@ class _$VeilidWASMConfigLoggingPerformanceImpl .equals(other._ignoreLogTargets, _ignoreLogTargets)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, enabled, level, logsInTimings, logsInConsole, const DeepCollectionEquality().hash(_ignoreLogTargets)); - @JsonKey(ignore: true) + /// Create a copy of VeilidWASMConfigLoggingPerformance + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VeilidWASMConfigLoggingPerformanceImplCopyWith< @@ -1552,8 +1650,11 @@ abstract class _VeilidWASMConfigLoggingPerformance bool get logsInConsole; @override List get ignoreLogTargets; + + /// Create a copy of VeilidWASMConfigLoggingPerformance + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VeilidWASMConfigLoggingPerformanceImplCopyWith< _$VeilidWASMConfigLoggingPerformanceImpl> get copyWith => throw _privateConstructorUsedError; @@ -1570,8 +1671,12 @@ mixin _$VeilidWASMConfigLoggingApi { VeilidConfigLogLevel get level => throw _privateConstructorUsedError; List get ignoreLogTargets => throw _privateConstructorUsedError; + /// Serializes this VeilidWASMConfigLoggingApi to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VeilidWASMConfigLoggingApi + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VeilidWASMConfigLoggingApiCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -1600,6 +1705,8 @@ class _$VeilidWASMConfigLoggingApiCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VeilidWASMConfigLoggingApi + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -1649,6 +1756,8 @@ class __$$VeilidWASMConfigLoggingApiImplCopyWithImpl<$Res> $Res Function(_$VeilidWASMConfigLoggingApiImpl) _then) : super(_value, _then); + /// Create a copy of VeilidWASMConfigLoggingApi + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -1728,12 +1837,14 @@ class _$VeilidWASMConfigLoggingApiImpl .equals(other._ignoreLogTargets, _ignoreLogTargets)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, enabled, level, const DeepCollectionEquality().hash(_ignoreLogTargets)); - @JsonKey(ignore: true) + /// Create a copy of VeilidWASMConfigLoggingApi + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VeilidWASMConfigLoggingApiImplCopyWith<_$VeilidWASMConfigLoggingApiImpl> @@ -1764,8 +1875,11 @@ abstract class _VeilidWASMConfigLoggingApi VeilidConfigLogLevel get level; @override List get ignoreLogTargets; + + /// Create a copy of VeilidWASMConfigLoggingApi + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VeilidWASMConfigLoggingApiImplCopyWith<_$VeilidWASMConfigLoggingApiImpl> get copyWith => throw _privateConstructorUsedError; } @@ -1781,8 +1895,12 @@ mixin _$VeilidWASMConfigLogging { throw _privateConstructorUsedError; VeilidWASMConfigLoggingApi get api => throw _privateConstructorUsedError; + /// Serializes this VeilidWASMConfigLogging to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VeilidWASMConfigLogging + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VeilidWASMConfigLoggingCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -1812,6 +1930,8 @@ class _$VeilidWASMConfigLoggingCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VeilidWASMConfigLogging + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -1830,6 +1950,8 @@ class _$VeilidWASMConfigLoggingCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of VeilidWASMConfigLogging + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $VeilidWASMConfigLoggingPerformanceCopyWith<$Res> get performance { @@ -1839,6 +1961,8 @@ class _$VeilidWASMConfigLoggingCopyWithImpl<$Res, }); } + /// Create a copy of VeilidWASMConfigLogging + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $VeilidWASMConfigLoggingApiCopyWith<$Res> get api { @@ -1877,6 +2001,8 @@ class __$$VeilidWASMConfigLoggingImplCopyWithImpl<$Res> $Res Function(_$VeilidWASMConfigLoggingImpl) _then) : super(_value, _then); + /// Create a copy of VeilidWASMConfigLogging + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -1936,11 +2062,13 @@ class _$VeilidWASMConfigLoggingImpl (identical(other.api, api) || other.api == api)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, performance, api); - @JsonKey(ignore: true) + /// Create a copy of VeilidWASMConfigLogging + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VeilidWASMConfigLoggingImplCopyWith<_$VeilidWASMConfigLoggingImpl> @@ -1968,8 +2096,11 @@ abstract class _VeilidWASMConfigLogging implements VeilidWASMConfigLogging { VeilidWASMConfigLoggingPerformance get performance; @override VeilidWASMConfigLoggingApi get api; + + /// Create a copy of VeilidWASMConfigLogging + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VeilidWASMConfigLoggingImplCopyWith<_$VeilidWASMConfigLoggingImpl> get copyWith => throw _privateConstructorUsedError; } @@ -1982,8 +2113,12 @@ VeilidWASMConfig _$VeilidWASMConfigFromJson(Map json) { mixin _$VeilidWASMConfig { VeilidWASMConfigLogging get logging => throw _privateConstructorUsedError; + /// Serializes this VeilidWASMConfig to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VeilidWASMConfig + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VeilidWASMConfigCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -2009,6 +2144,8 @@ class _$VeilidWASMConfigCopyWithImpl<$Res, $Val extends VeilidWASMConfig> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VeilidWASMConfig + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -2022,6 +2159,8 @@ class _$VeilidWASMConfigCopyWithImpl<$Res, $Val extends VeilidWASMConfig> ) as $Val); } + /// Create a copy of VeilidWASMConfig + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $VeilidWASMConfigLoggingCopyWith<$Res> get logging { @@ -2053,6 +2192,8 @@ class __$$VeilidWASMConfigImplCopyWithImpl<$Res> $Res Function(_$VeilidWASMConfigImpl) _then) : super(_value, _then); + /// Create a copy of VeilidWASMConfig + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -2101,11 +2242,13 @@ class _$VeilidWASMConfigImpl (identical(other.logging, logging) || other.logging == logging)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, logging); - @JsonKey(ignore: true) + /// Create a copy of VeilidWASMConfig + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VeilidWASMConfigImplCopyWith<_$VeilidWASMConfigImpl> get copyWith => @@ -2130,8 +2273,11 @@ abstract class _VeilidWASMConfig implements VeilidWASMConfig { @override VeilidWASMConfigLogging get logging; + + /// Create a copy of VeilidWASMConfig + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VeilidWASMConfigImplCopyWith<_$VeilidWASMConfigImpl> get copyWith => throw _privateConstructorUsedError; } @@ -2147,8 +2293,12 @@ mixin _$VeilidConfigHTTPS { String get path => throw _privateConstructorUsedError; String? get url => throw _privateConstructorUsedError; + /// Serializes this VeilidConfigHTTPS to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VeilidConfigHTTPS + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VeilidConfigHTTPSCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -2172,6 +2322,8 @@ class _$VeilidConfigHTTPSCopyWithImpl<$Res, $Val extends VeilidConfigHTTPS> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VeilidConfigHTTPS + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -2220,6 +2372,8 @@ class __$$VeilidConfigHTTPSImplCopyWithImpl<$Res> $Res Function(_$VeilidConfigHTTPSImpl) _then) : super(_value, _then); + /// Create a copy of VeilidConfigHTTPS + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -2300,12 +2454,14 @@ class _$VeilidConfigHTTPSImpl (identical(other.url, url) || other.url == url)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, enabled, listenAddress, path, url); - @JsonKey(ignore: true) + /// Create a copy of VeilidConfigHTTPS + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VeilidConfigHTTPSImplCopyWith<_$VeilidConfigHTTPSImpl> get copyWith => @@ -2338,8 +2494,11 @@ abstract class _VeilidConfigHTTPS implements VeilidConfigHTTPS { String get path; @override String? get url; + + /// Create a copy of VeilidConfigHTTPS + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VeilidConfigHTTPSImplCopyWith<_$VeilidConfigHTTPSImpl> get copyWith => throw _privateConstructorUsedError; } @@ -2355,8 +2514,12 @@ mixin _$VeilidConfigHTTP { String get path => throw _privateConstructorUsedError; String? get url => throw _privateConstructorUsedError; + /// Serializes this VeilidConfigHTTP to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VeilidConfigHTTP + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VeilidConfigHTTPCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -2380,6 +2543,8 @@ class _$VeilidConfigHTTPCopyWithImpl<$Res, $Val extends VeilidConfigHTTP> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VeilidConfigHTTP + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -2428,6 +2593,8 @@ class __$$VeilidConfigHTTPImplCopyWithImpl<$Res> $Res Function(_$VeilidConfigHTTPImpl) _then) : super(_value, _then); + /// Create a copy of VeilidConfigHTTP + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -2508,12 +2675,14 @@ class _$VeilidConfigHTTPImpl (identical(other.url, url) || other.url == url)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, enabled, listenAddress, path, url); - @JsonKey(ignore: true) + /// Create a copy of VeilidConfigHTTP + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VeilidConfigHTTPImplCopyWith<_$VeilidConfigHTTPImpl> get copyWith => @@ -2546,8 +2715,11 @@ abstract class _VeilidConfigHTTP implements VeilidConfigHTTP { String get path; @override String? get url; + + /// Create a copy of VeilidConfigHTTP + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VeilidConfigHTTPImplCopyWith<_$VeilidConfigHTTPImpl> get copyWith => throw _privateConstructorUsedError; } @@ -2562,8 +2734,12 @@ mixin _$VeilidConfigApplication { VeilidConfigHTTPS get https => throw _privateConstructorUsedError; VeilidConfigHTTP get http => throw _privateConstructorUsedError; + /// Serializes this VeilidConfigApplication to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VeilidConfigApplication + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VeilidConfigApplicationCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -2591,6 +2767,8 @@ class _$VeilidConfigApplicationCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VeilidConfigApplication + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -2609,6 +2787,8 @@ class _$VeilidConfigApplicationCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of VeilidConfigApplication + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $VeilidConfigHTTPSCopyWith<$Res> get https { @@ -2617,6 +2797,8 @@ class _$VeilidConfigApplicationCopyWithImpl<$Res, }); } + /// Create a copy of VeilidConfigApplication + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $VeilidConfigHTTPCopyWith<$Res> get http { @@ -2653,6 +2835,8 @@ class __$$VeilidConfigApplicationImplCopyWithImpl<$Res> $Res Function(_$VeilidConfigApplicationImpl) _then) : super(_value, _then); + /// Create a copy of VeilidConfigApplication + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -2711,11 +2895,13 @@ class _$VeilidConfigApplicationImpl (identical(other.http, http) || other.http == http)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, https, http); - @JsonKey(ignore: true) + /// Create a copy of VeilidConfigApplication + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VeilidConfigApplicationImplCopyWith<_$VeilidConfigApplicationImpl> @@ -2742,8 +2928,11 @@ abstract class _VeilidConfigApplication implements VeilidConfigApplication { VeilidConfigHTTPS get https; @override VeilidConfigHTTP get http; + + /// Create a copy of VeilidConfigApplication + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VeilidConfigApplicationImplCopyWith<_$VeilidConfigApplicationImpl> get copyWith => throw _privateConstructorUsedError; } @@ -2759,8 +2948,12 @@ mixin _$VeilidConfigUDP { String get listenAddress => throw _privateConstructorUsedError; String? get publicAddress => throw _privateConstructorUsedError; + /// Serializes this VeilidConfigUDP to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VeilidConfigUDP + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VeilidConfigUDPCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -2788,6 +2981,8 @@ class _$VeilidConfigUDPCopyWithImpl<$Res, $Val extends VeilidConfigUDP> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VeilidConfigUDP + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -2840,6 +3035,8 @@ class __$$VeilidConfigUDPImplCopyWithImpl<$Res> _$VeilidConfigUDPImpl _value, $Res Function(_$VeilidConfigUDPImpl) _then) : super(_value, _then); + /// Create a copy of VeilidConfigUDP + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -2922,12 +3119,14 @@ class _$VeilidConfigUDPImpl other.publicAddress == publicAddress)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, enabled, socketPoolSize, listenAddress, publicAddress); - @JsonKey(ignore: true) + /// Create a copy of VeilidConfigUDP + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VeilidConfigUDPImplCopyWith<_$VeilidConfigUDPImpl> get copyWith => @@ -2960,8 +3159,11 @@ abstract class _VeilidConfigUDP implements VeilidConfigUDP { String get listenAddress; @override String? get publicAddress; + + /// Create a copy of VeilidConfigUDP + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VeilidConfigUDPImplCopyWith<_$VeilidConfigUDPImpl> get copyWith => throw _privateConstructorUsedError; } @@ -2978,8 +3180,12 @@ mixin _$VeilidConfigTCP { String get listenAddress => throw _privateConstructorUsedError; String? get publicAddress => throw _privateConstructorUsedError; + /// Serializes this VeilidConfigTCP to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VeilidConfigTCP + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VeilidConfigTCPCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -3008,6 +3214,8 @@ class _$VeilidConfigTCPCopyWithImpl<$Res, $Val extends VeilidConfigTCP> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VeilidConfigTCP + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -3066,6 +3274,8 @@ class __$$VeilidConfigTCPImplCopyWithImpl<$Res> _$VeilidConfigTCPImpl _value, $Res Function(_$VeilidConfigTCPImpl) _then) : super(_value, _then); + /// Create a copy of VeilidConfigTCP + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -3158,12 +3368,14 @@ class _$VeilidConfigTCPImpl other.publicAddress == publicAddress)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, connect, listen, maxConnections, listenAddress, publicAddress); - @JsonKey(ignore: true) + /// Create a copy of VeilidConfigTCP + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VeilidConfigTCPImplCopyWith<_$VeilidConfigTCPImpl> get copyWith => @@ -3199,8 +3411,11 @@ abstract class _VeilidConfigTCP implements VeilidConfigTCP { String get listenAddress; @override String? get publicAddress; + + /// Create a copy of VeilidConfigTCP + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VeilidConfigTCPImplCopyWith<_$VeilidConfigTCPImpl> get copyWith => throw _privateConstructorUsedError; } @@ -3218,8 +3433,12 @@ mixin _$VeilidConfigWS { String get path => throw _privateConstructorUsedError; String? get url => throw _privateConstructorUsedError; + /// Serializes this VeilidConfigWS to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VeilidConfigWS + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VeilidConfigWSCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -3249,6 +3468,8 @@ class _$VeilidConfigWSCopyWithImpl<$Res, $Val extends VeilidConfigWS> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VeilidConfigWS + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -3313,6 +3534,8 @@ class __$$VeilidConfigWSImplCopyWithImpl<$Res> _$VeilidConfigWSImpl _value, $Res Function(_$VeilidConfigWSImpl) _then) : super(_value, _then); + /// Create a copy of VeilidConfigWS + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -3414,12 +3637,14 @@ class _$VeilidConfigWSImpl (identical(other.url, url) || other.url == url)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, connect, listen, maxConnections, listenAddress, path, url); - @JsonKey(ignore: true) + /// Create a copy of VeilidConfigWS + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VeilidConfigWSImplCopyWith<_$VeilidConfigWSImpl> get copyWith => @@ -3458,8 +3683,11 @@ abstract class _VeilidConfigWS implements VeilidConfigWS { String get path; @override String? get url; + + /// Create a copy of VeilidConfigWS + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VeilidConfigWSImplCopyWith<_$VeilidConfigWSImpl> get copyWith => throw _privateConstructorUsedError; } @@ -3477,8 +3705,12 @@ mixin _$VeilidConfigWSS { String get path => throw _privateConstructorUsedError; String? get url => throw _privateConstructorUsedError; + /// Serializes this VeilidConfigWSS to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VeilidConfigWSS + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VeilidConfigWSSCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -3508,6 +3740,8 @@ class _$VeilidConfigWSSCopyWithImpl<$Res, $Val extends VeilidConfigWSS> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VeilidConfigWSS + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -3572,6 +3806,8 @@ class __$$VeilidConfigWSSImplCopyWithImpl<$Res> _$VeilidConfigWSSImpl _value, $Res Function(_$VeilidConfigWSSImpl) _then) : super(_value, _then); + /// Create a copy of VeilidConfigWSS + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -3673,12 +3909,14 @@ class _$VeilidConfigWSSImpl (identical(other.url, url) || other.url == url)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, connect, listen, maxConnections, listenAddress, path, url); - @JsonKey(ignore: true) + /// Create a copy of VeilidConfigWSS + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VeilidConfigWSSImplCopyWith<_$VeilidConfigWSSImpl> get copyWith => @@ -3717,8 +3955,11 @@ abstract class _VeilidConfigWSS implements VeilidConfigWSS { String get path; @override String? get url; + + /// Create a copy of VeilidConfigWSS + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VeilidConfigWSSImplCopyWith<_$VeilidConfigWSSImpl> get copyWith => throw _privateConstructorUsedError; } @@ -3734,8 +3975,12 @@ mixin _$VeilidConfigProtocol { VeilidConfigWS get ws => throw _privateConstructorUsedError; VeilidConfigWSS get wss => throw _privateConstructorUsedError; + /// Serializes this VeilidConfigProtocol to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VeilidConfigProtocol + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VeilidConfigProtocolCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -3769,6 +4014,8 @@ class _$VeilidConfigProtocolCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VeilidConfigProtocol + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -3797,6 +4044,8 @@ class _$VeilidConfigProtocolCopyWithImpl<$Res, ) as $Val); } + /// Create a copy of VeilidConfigProtocol + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $VeilidConfigUDPCopyWith<$Res> get udp { @@ -3805,6 +4054,8 @@ class _$VeilidConfigProtocolCopyWithImpl<$Res, }); } + /// Create a copy of VeilidConfigProtocol + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $VeilidConfigTCPCopyWith<$Res> get tcp { @@ -3813,6 +4064,8 @@ class _$VeilidConfigProtocolCopyWithImpl<$Res, }); } + /// Create a copy of VeilidConfigProtocol + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $VeilidConfigWSCopyWith<$Res> get ws { @@ -3821,6 +4074,8 @@ class _$VeilidConfigProtocolCopyWithImpl<$Res, }); } + /// Create a copy of VeilidConfigProtocol + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $VeilidConfigWSSCopyWith<$Res> get wss { @@ -3862,6 +4117,8 @@ class __$$VeilidConfigProtocolImplCopyWithImpl<$Res> $Res Function(_$VeilidConfigProtocolImpl) _then) : super(_value, _then); + /// Create a copy of VeilidConfigProtocol + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -3941,11 +4198,13 @@ class _$VeilidConfigProtocolImpl (identical(other.wss, wss) || other.wss == wss)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, udp, tcp, ws, wss); - @JsonKey(ignore: true) + /// Create a copy of VeilidConfigProtocol + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VeilidConfigProtocolImplCopyWith<_$VeilidConfigProtocolImpl> @@ -3979,8 +4238,11 @@ abstract class _VeilidConfigProtocol implements VeilidConfigProtocol { VeilidConfigWS get ws; @override VeilidConfigWSS get wss; + + /// Create a copy of VeilidConfigProtocol + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VeilidConfigProtocolImplCopyWith<_$VeilidConfigProtocolImpl> get copyWith => throw _privateConstructorUsedError; } @@ -3995,8 +4257,12 @@ mixin _$VeilidConfigTLS { String get privateKeyPath => throw _privateConstructorUsedError; int get connectionInitialTimeoutMs => throw _privateConstructorUsedError; + /// Serializes this VeilidConfigTLS to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VeilidConfigTLS + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VeilidConfigTLSCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -4023,6 +4289,8 @@ class _$VeilidConfigTLSCopyWithImpl<$Res, $Val extends VeilidConfigTLS> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VeilidConfigTLS + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -4069,6 +4337,8 @@ class __$$VeilidConfigTLSImplCopyWithImpl<$Res> _$VeilidConfigTLSImpl _value, $Res Function(_$VeilidConfigTLSImpl) _then) : super(_value, _then); + /// Create a copy of VeilidConfigTLS + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -4144,12 +4414,14 @@ class _$VeilidConfigTLSImpl connectionInitialTimeoutMs)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, certificatePath, privateKeyPath, connectionInitialTimeoutMs); - @JsonKey(ignore: true) + /// Create a copy of VeilidConfigTLS + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VeilidConfigTLSImplCopyWith<_$VeilidConfigTLSImpl> get copyWith => @@ -4179,8 +4451,11 @@ abstract class _VeilidConfigTLS implements VeilidConfigTLS { String get privateKeyPath; @override int get connectionInitialTimeoutMs; + + /// Create a copy of VeilidConfigTLS + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VeilidConfigTLSImplCopyWith<_$VeilidConfigTLSImpl> get copyWith => throw _privateConstructorUsedError; } @@ -4214,8 +4489,12 @@ mixin _$VeilidConfigDHT { int get memberWatchLimit => throw _privateConstructorUsedError; int get maxWatchExpirationMs => throw _privateConstructorUsedError; + /// Serializes this VeilidConfigDHT to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VeilidConfigDHT + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VeilidConfigDHTCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -4261,6 +4540,8 @@ class _$VeilidConfigDHTCopyWithImpl<$Res, $Val extends VeilidConfigDHT> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VeilidConfigDHT + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -4421,6 +4702,8 @@ class __$$VeilidConfigDHTImplCopyWithImpl<$Res> _$VeilidConfigDHTImpl _value, $Res Function(_$VeilidConfigDHTImpl) _then) : super(_value, _then); + /// Create a copy of VeilidConfigDHT + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -4712,7 +4995,7 @@ class _$VeilidConfigDHTImpl other.maxWatchExpirationMs == maxWatchExpirationMs)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hashAll([ runtimeType, @@ -4740,7 +5023,9 @@ class _$VeilidConfigDHTImpl maxWatchExpirationMs ]); - @JsonKey(ignore: true) + /// Create a copy of VeilidConfigDHT + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VeilidConfigDHTImplCopyWith<_$VeilidConfigDHTImpl> get copyWith => @@ -4827,8 +5112,11 @@ abstract class _VeilidConfigDHT implements VeilidConfigDHT { int get memberWatchLimit; @override int get maxWatchExpirationMs; + + /// Create a copy of VeilidConfigDHT + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VeilidConfigDHTImplCopyWith<_$VeilidConfigDHTImpl> get copyWith => throw _privateConstructorUsedError; } @@ -4847,8 +5135,12 @@ mixin _$VeilidConfigRPC { int? get maxTimestampBehindMs => throw _privateConstructorUsedError; int? get maxTimestampAheadMs => throw _privateConstructorUsedError; + /// Serializes this VeilidConfigRPC to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VeilidConfigRPC + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VeilidConfigRPCCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -4879,6 +5171,8 @@ class _$VeilidConfigRPCCopyWithImpl<$Res, $Val extends VeilidConfigRPC> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VeilidConfigRPC + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -4949,6 +5243,8 @@ class __$$VeilidConfigRPCImplCopyWithImpl<$Res> _$VeilidConfigRPCImpl _value, $Res Function(_$VeilidConfigRPCImpl) _then) : super(_value, _then); + /// Create a copy of VeilidConfigRPC + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -5065,7 +5361,7 @@ class _$VeilidConfigRPCImpl other.maxTimestampAheadMs == maxTimestampAheadMs)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -5077,7 +5373,9 @@ class _$VeilidConfigRPCImpl maxTimestampBehindMs, maxTimestampAheadMs); - @JsonKey(ignore: true) + /// Create a copy of VeilidConfigRPC + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VeilidConfigRPCImplCopyWith<_$VeilidConfigRPCImpl> get copyWith => @@ -5119,8 +5417,11 @@ abstract class _VeilidConfigRPC implements VeilidConfigRPC { int? get maxTimestampBehindMs; @override int? get maxTimestampAheadMs; + + /// Create a copy of VeilidConfigRPC + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VeilidConfigRPCImplCopyWith<_$VeilidConfigRPCImpl> get copyWith => throw _privateConstructorUsedError; } @@ -5143,8 +5444,12 @@ mixin _$VeilidConfigRoutingTable { int get limitAttachedGood => throw _privateConstructorUsedError; int get limitAttachedWeak => throw _privateConstructorUsedError; + /// Serializes this VeilidConfigRoutingTable to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VeilidConfigRoutingTable + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VeilidConfigRoutingTableCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -5177,6 +5482,8 @@ class _$VeilidConfigRoutingTableCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VeilidConfigRoutingTable + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -5256,6 +5563,8 @@ class __$$VeilidConfigRoutingTableImplCopyWithImpl<$Res> $Res Function(_$VeilidConfigRoutingTableImpl) _then) : super(_value, _then); + /// Create a copy of VeilidConfigRoutingTable + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -5403,7 +5712,7 @@ class _$VeilidConfigRoutingTableImpl other.limitAttachedWeak == limitAttachedWeak)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -5416,7 +5725,9 @@ class _$VeilidConfigRoutingTableImpl limitAttachedGood, limitAttachedWeak); - @JsonKey(ignore: true) + /// Create a copy of VeilidConfigRoutingTable + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VeilidConfigRoutingTableImplCopyWith<_$VeilidConfigRoutingTableImpl> @@ -5461,8 +5772,11 @@ abstract class _VeilidConfigRoutingTable implements VeilidConfigRoutingTable { int get limitAttachedGood; @override int get limitAttachedWeak; + + /// Create a copy of VeilidConfigRoutingTable + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VeilidConfigRoutingTableImplCopyWith<_$VeilidConfigRoutingTableImpl> get copyWith => throw _privateConstructorUsedError; } @@ -5494,8 +5808,12 @@ mixin _$VeilidConfigNetwork { VeilidConfigProtocol get protocol => throw _privateConstructorUsedError; String? get networkKeyPassword => throw _privateConstructorUsedError; + /// Serializes this VeilidConfigNetwork to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VeilidConfigNetwork + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VeilidConfigNetworkCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -5545,6 +5863,8 @@ class _$VeilidConfigNetworkCopyWithImpl<$Res, $Val extends VeilidConfigNetwork> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VeilidConfigNetwork + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -5648,6 +5968,8 @@ class _$VeilidConfigNetworkCopyWithImpl<$Res, $Val extends VeilidConfigNetwork> ) as $Val); } + /// Create a copy of VeilidConfigNetwork + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $VeilidConfigRoutingTableCopyWith<$Res> get routingTable { @@ -5657,6 +5979,8 @@ class _$VeilidConfigNetworkCopyWithImpl<$Res, $Val extends VeilidConfigNetwork> }); } + /// Create a copy of VeilidConfigNetwork + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $VeilidConfigRPCCopyWith<$Res> get rpc { @@ -5665,6 +5989,8 @@ class _$VeilidConfigNetworkCopyWithImpl<$Res, $Val extends VeilidConfigNetwork> }); } + /// Create a copy of VeilidConfigNetwork + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $VeilidConfigDHTCopyWith<$Res> get dht { @@ -5673,6 +5999,8 @@ class _$VeilidConfigNetworkCopyWithImpl<$Res, $Val extends VeilidConfigNetwork> }); } + /// Create a copy of VeilidConfigNetwork + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $VeilidConfigTLSCopyWith<$Res> get tls { @@ -5681,6 +6009,8 @@ class _$VeilidConfigNetworkCopyWithImpl<$Res, $Val extends VeilidConfigNetwork> }); } + /// Create a copy of VeilidConfigNetwork + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $VeilidConfigApplicationCopyWith<$Res> get application { @@ -5689,6 +6019,8 @@ class _$VeilidConfigNetworkCopyWithImpl<$Res, $Val extends VeilidConfigNetwork> }); } + /// Create a copy of VeilidConfigNetwork + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $VeilidConfigProtocolCopyWith<$Res> get protocol { @@ -5749,6 +6081,8 @@ class __$$VeilidConfigNetworkImplCopyWithImpl<$Res> $Res Function(_$VeilidConfigNetworkImpl) _then) : super(_value, _then); + /// Create a copy of VeilidConfigNetwork + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -6008,7 +6342,7 @@ class _$VeilidConfigNetworkImpl other.networkKeyPassword == networkKeyPassword)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hashAll([ runtimeType, @@ -6033,7 +6367,9 @@ class _$VeilidConfigNetworkImpl networkKeyPassword ]); - @JsonKey(ignore: true) + /// Create a copy of VeilidConfigNetwork + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VeilidConfigNetworkImplCopyWith<_$VeilidConfigNetworkImpl> get copyWith => @@ -6111,8 +6447,11 @@ abstract class _VeilidConfigNetwork implements VeilidConfigNetwork { VeilidConfigProtocol get protocol; @override String? get networkKeyPassword; + + /// Create a copy of VeilidConfigNetwork + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VeilidConfigNetworkImplCopyWith<_$VeilidConfigNetworkImpl> get copyWith => throw _privateConstructorUsedError; } @@ -6127,8 +6466,12 @@ mixin _$VeilidConfigTableStore { String get directory => throw _privateConstructorUsedError; bool get delete => throw _privateConstructorUsedError; + /// Serializes this VeilidConfigTableStore to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VeilidConfigTableStore + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VeilidConfigTableStoreCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -6153,6 +6496,8 @@ class _$VeilidConfigTableStoreCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VeilidConfigTableStore + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -6194,6 +6539,8 @@ class __$$VeilidConfigTableStoreImplCopyWithImpl<$Res> $Res Function(_$VeilidConfigTableStoreImpl) _then) : super(_value, _then); + /// Create a copy of VeilidConfigTableStore + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -6253,11 +6600,13 @@ class _$VeilidConfigTableStoreImpl (identical(other.delete, delete) || other.delete == delete)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, directory, delete); - @JsonKey(ignore: true) + /// Create a copy of VeilidConfigTableStore + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VeilidConfigTableStoreImplCopyWith<_$VeilidConfigTableStoreImpl> @@ -6284,8 +6633,11 @@ abstract class _VeilidConfigTableStore implements VeilidConfigTableStore { String get directory; @override bool get delete; + + /// Create a copy of VeilidConfigTableStore + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VeilidConfigTableStoreImplCopyWith<_$VeilidConfigTableStoreImpl> get copyWith => throw _privateConstructorUsedError; } @@ -6300,8 +6652,12 @@ mixin _$VeilidConfigBlockStore { String get directory => throw _privateConstructorUsedError; bool get delete => throw _privateConstructorUsedError; + /// Serializes this VeilidConfigBlockStore to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VeilidConfigBlockStore + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VeilidConfigBlockStoreCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -6326,6 +6682,8 @@ class _$VeilidConfigBlockStoreCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VeilidConfigBlockStore + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -6367,6 +6725,8 @@ class __$$VeilidConfigBlockStoreImplCopyWithImpl<$Res> $Res Function(_$VeilidConfigBlockStoreImpl) _then) : super(_value, _then); + /// Create a copy of VeilidConfigBlockStore + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -6426,11 +6786,13 @@ class _$VeilidConfigBlockStoreImpl (identical(other.delete, delete) || other.delete == delete)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, directory, delete); - @JsonKey(ignore: true) + /// Create a copy of VeilidConfigBlockStore + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VeilidConfigBlockStoreImplCopyWith<_$VeilidConfigBlockStoreImpl> @@ -6457,8 +6819,11 @@ abstract class _VeilidConfigBlockStore implements VeilidConfigBlockStore { String get directory; @override bool get delete; + + /// Create a copy of VeilidConfigBlockStore + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VeilidConfigBlockStoreImplCopyWith<_$VeilidConfigBlockStoreImpl> get copyWith => throw _privateConstructorUsedError; } @@ -6478,8 +6843,12 @@ mixin _$VeilidConfigProtectedStore { String? get newDeviceEncryptionKeyPassword => throw _privateConstructorUsedError; + /// Serializes this VeilidConfigProtectedStore to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VeilidConfigProtectedStore + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VeilidConfigProtectedStoreCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -6511,6 +6880,8 @@ class _$VeilidConfigProtectedStoreCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VeilidConfigProtectedStore + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -6578,6 +6949,8 @@ class __$$VeilidConfigProtectedStoreImplCopyWithImpl<$Res> $Res Function(_$VeilidConfigProtectedStoreImpl) _then) : super(_value, _then); + /// Create a copy of VeilidConfigProtectedStore + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -6691,7 +7064,7 @@ class _$VeilidConfigProtectedStoreImpl newDeviceEncryptionKeyPassword)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, @@ -6702,7 +7075,9 @@ class _$VeilidConfigProtectedStoreImpl deviceEncryptionKeyPassword, newDeviceEncryptionKeyPassword); - @JsonKey(ignore: true) + /// Create a copy of VeilidConfigProtectedStore + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VeilidConfigProtectedStoreImplCopyWith<_$VeilidConfigProtectedStoreImpl> @@ -6743,8 +7118,11 @@ abstract class _VeilidConfigProtectedStore String get deviceEncryptionKeyPassword; @override String? get newDeviceEncryptionKeyPassword; + + /// Create a copy of VeilidConfigProtectedStore + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VeilidConfigProtectedStoreImplCopyWith<_$VeilidConfigProtectedStoreImpl> get copyWith => throw _privateConstructorUsedError; } @@ -6758,8 +7136,12 @@ VeilidConfigCapabilities _$VeilidConfigCapabilitiesFromJson( mixin _$VeilidConfigCapabilities { List get disable => throw _privateConstructorUsedError; + /// Serializes this VeilidConfigCapabilities to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VeilidConfigCapabilities + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VeilidConfigCapabilitiesCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -6784,6 +7166,8 @@ class _$VeilidConfigCapabilitiesCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VeilidConfigCapabilities + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -6820,6 +7204,8 @@ class __$$VeilidConfigCapabilitiesImplCopyWithImpl<$Res> $Res Function(_$VeilidConfigCapabilitiesImpl) _then) : super(_value, _then); + /// Create a copy of VeilidConfigCapabilities + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -6874,12 +7260,14 @@ class _$VeilidConfigCapabilitiesImpl const DeepCollectionEquality().equals(other._disable, _disable)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_disable)); - @JsonKey(ignore: true) + /// Create a copy of VeilidConfigCapabilities + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VeilidConfigCapabilitiesImplCopyWith<_$VeilidConfigCapabilitiesImpl> @@ -6903,8 +7291,11 @@ abstract class _VeilidConfigCapabilities implements VeilidConfigCapabilities { @override List get disable; + + /// Create a copy of VeilidConfigCapabilities + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VeilidConfigCapabilitiesImplCopyWith<_$VeilidConfigCapabilitiesImpl> get copyWith => throw _privateConstructorUsedError; } @@ -6925,8 +7316,12 @@ mixin _$VeilidConfig { VeilidConfigBlockStore get blockStore => throw _privateConstructorUsedError; VeilidConfigNetwork get network => throw _privateConstructorUsedError; + /// Serializes this VeilidConfig to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VeilidConfig + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VeilidConfigCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -6963,6 +7358,8 @@ class _$VeilidConfigCopyWithImpl<$Res, $Val extends VeilidConfig> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VeilidConfig + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -7006,6 +7403,8 @@ class _$VeilidConfigCopyWithImpl<$Res, $Val extends VeilidConfig> ) as $Val); } + /// Create a copy of VeilidConfig + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $VeilidConfigCapabilitiesCopyWith<$Res> get capabilities { @@ -7015,6 +7414,8 @@ class _$VeilidConfigCopyWithImpl<$Res, $Val extends VeilidConfig> }); } + /// Create a copy of VeilidConfig + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $VeilidConfigProtectedStoreCopyWith<$Res> get protectedStore { @@ -7024,6 +7425,8 @@ class _$VeilidConfigCopyWithImpl<$Res, $Val extends VeilidConfig> }); } + /// Create a copy of VeilidConfig + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $VeilidConfigTableStoreCopyWith<$Res> get tableStore { @@ -7032,6 +7435,8 @@ class _$VeilidConfigCopyWithImpl<$Res, $Val extends VeilidConfig> }); } + /// Create a copy of VeilidConfig + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $VeilidConfigBlockStoreCopyWith<$Res> get blockStore { @@ -7040,6 +7445,8 @@ class _$VeilidConfigCopyWithImpl<$Res, $Val extends VeilidConfig> }); } + /// Create a copy of VeilidConfig + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $VeilidConfigNetworkCopyWith<$Res> get network { @@ -7086,6 +7493,8 @@ class __$$VeilidConfigImplCopyWithImpl<$Res> _$VeilidConfigImpl _value, $Res Function(_$VeilidConfigImpl) _then) : super(_value, _then); + /// Create a copy of VeilidConfig + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -7199,12 +7608,14 @@ class _$VeilidConfigImpl with DiagnosticableTreeMixin implements _VeilidConfig { (identical(other.network, network) || other.network == network)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, programName, namespace, capabilities, protectedStore, tableStore, blockStore, network); - @JsonKey(ignore: true) + /// Create a copy of VeilidConfig + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VeilidConfigImplCopyWith<_$VeilidConfigImpl> get copyWith => @@ -7245,8 +7656,11 @@ abstract class _VeilidConfig implements VeilidConfig { VeilidConfigBlockStore get blockStore; @override VeilidConfigNetwork get network; + + /// Create a copy of VeilidConfig + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VeilidConfigImplCopyWith<_$VeilidConfigImpl> get copyWith => throw _privateConstructorUsedError; } diff --git a/veilid-flutter/lib/veilid_state.dart b/veilid-flutter/lib/veilid_state.dart index 3aa8f468..7094cdb1 100644 --- a/veilid-flutter/lib/veilid_state.dart +++ b/veilid-flutter/lib/veilid_state.dart @@ -87,17 +87,74 @@ class TransferStatsDownUp with _$TransferStatsDownUp { //////////// +@freezed +class StateStats with _$StateStats { + const factory StateStats({ + required TimestampDuration span, + required TimestampDuration reliable, + required TimestampDuration unreliable, + required TimestampDuration dead, + required TimestampDuration punished, + required StateReasonStats reason, + }) = _StateStats; + + factory StateStats.fromJson(dynamic json) => + _$StateStatsFromJson(json as Map); +} + +//////////// + +@freezed +class StateReasonStats with _$StateReasonStats { + const factory StateReasonStats({ + required TimestampDuration canNotSend, + required TimestampDuration tooManyLostAnswers, + required TimestampDuration noPingResponse, + required TimestampDuration failedToSend, + required TimestampDuration lostAnswers, + required TimestampDuration notSeenConsecutively, + required TimestampDuration inUnreliablePingSpan, + }) = _StateReasonStats; + + factory StateReasonStats.fromJson(dynamic json) => + _$StateReasonStatsFromJson(json as Map); +} + +//////////// + +@freezed +class AnswerStats with _$AnswerStats { + const factory AnswerStats({ + required TimestampDuration span, + required int questions, + required int answers, + required int lostAnswers, + required int consecutiveAnswersMaximum, + required int consecutiveAnswersAverage, + required int consecutiveAnswersMinimum, + required int consecutiveLostAnswersMaximum, + required int consecutiveLostAnswersAverage, + required int consecutiveLostAnswersMinimum, + }) = _AnswerStats; + + factory AnswerStats.fromJson(dynamic json) => + _$AnswerStatsFromJson(json as Map); +} + +//////////// + @freezed class RPCStats with _$RPCStats { const factory RPCStats({ required int messagesSent, required int messagesRcvd, required int questionsInFlight, - required Timestamp? lastQuestion, + required Timestamp? lastQuestionTs, required Timestamp? lastSeenTs, required Timestamp? firstConsecutiveSeenTs, required int recentLostAnswers, required int failedToSend, + required AnswerStats answer, }) = _RPCStats; factory RPCStats.fromJson(dynamic json) => @@ -112,6 +169,7 @@ class PeerStats with _$PeerStats { required Timestamp timeAdded, required RPCStats rpcStats, required TransferStatsDownUp transfer, + required StateStats state, LatencyStats? latency, }) = _PeerStats; diff --git a/veilid-flutter/lib/veilid_state.freezed.dart b/veilid-flutter/lib/veilid_state.freezed.dart index cd23c8d4..0ccb7178 100644 --- a/veilid-flutter/lib/veilid_state.freezed.dart +++ b/veilid-flutter/lib/veilid_state.freezed.dart @@ -24,8 +24,12 @@ mixin _$LatencyStats { TimestampDuration get average => throw _privateConstructorUsedError; TimestampDuration get slowest => throw _privateConstructorUsedError; + /// Serializes this LatencyStats to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of LatencyStats + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $LatencyStatsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -52,6 +56,8 @@ class _$LatencyStatsCopyWithImpl<$Res, $Val extends LatencyStats> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of LatencyStats + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -98,6 +104,8 @@ class __$$LatencyStatsImplCopyWithImpl<$Res> _$LatencyStatsImpl _value, $Res Function(_$LatencyStatsImpl) _then) : super(_value, _then); + /// Create a copy of LatencyStats + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -153,11 +161,13 @@ class _$LatencyStatsImpl implements _LatencyStats { (identical(other.slowest, slowest) || other.slowest == slowest)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, fastest, average, slowest); - @JsonKey(ignore: true) + /// Create a copy of LatencyStats + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$LatencyStatsImplCopyWith<_$LatencyStatsImpl> get copyWith => @@ -186,8 +196,11 @@ abstract class _LatencyStats implements LatencyStats { TimestampDuration get average; @override TimestampDuration get slowest; + + /// Create a copy of LatencyStats + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$LatencyStatsImplCopyWith<_$LatencyStatsImpl> get copyWith => throw _privateConstructorUsedError; } @@ -203,8 +216,12 @@ mixin _$TransferStats { BigInt get average => throw _privateConstructorUsedError; BigInt get minimum => throw _privateConstructorUsedError; + /// Serializes this TransferStats to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of TransferStats + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $TransferStatsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -228,6 +245,8 @@ class _$TransferStatsCopyWithImpl<$Res, $Val extends TransferStats> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of TransferStats + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -276,6 +295,8 @@ class __$$TransferStatsImplCopyWithImpl<$Res> _$TransferStatsImpl _value, $Res Function(_$TransferStatsImpl) _then) : super(_value, _then); + /// Create a copy of TransferStats + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -342,12 +363,14 @@ class _$TransferStatsImpl implements _TransferStats { (identical(other.minimum, minimum) || other.minimum == minimum)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, total, maximum, average, minimum); - @JsonKey(ignore: true) + /// Create a copy of TransferStats + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$TransferStatsImplCopyWith<_$TransferStatsImpl> get copyWith => @@ -379,8 +402,11 @@ abstract class _TransferStats implements TransferStats { BigInt get average; @override BigInt get minimum; + + /// Create a copy of TransferStats + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$TransferStatsImplCopyWith<_$TransferStatsImpl> get copyWith => throw _privateConstructorUsedError; } @@ -394,8 +420,12 @@ mixin _$TransferStatsDownUp { TransferStats get down => throw _privateConstructorUsedError; TransferStats get up => throw _privateConstructorUsedError; + /// Serializes this TransferStatsDownUp to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of TransferStatsDownUp + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $TransferStatsDownUpCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -422,6 +452,8 @@ class _$TransferStatsDownUpCopyWithImpl<$Res, $Val extends TransferStatsDownUp> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of TransferStatsDownUp + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -440,6 +472,8 @@ class _$TransferStatsDownUpCopyWithImpl<$Res, $Val extends TransferStatsDownUp> ) as $Val); } + /// Create a copy of TransferStatsDownUp + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $TransferStatsCopyWith<$Res> get down { @@ -448,6 +482,8 @@ class _$TransferStatsDownUpCopyWithImpl<$Res, $Val extends TransferStatsDownUp> }); } + /// Create a copy of TransferStatsDownUp + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $TransferStatsCopyWith<$Res> get up { @@ -481,6 +517,8 @@ class __$$TransferStatsDownUpImplCopyWithImpl<$Res> $Res Function(_$TransferStatsDownUpImpl) _then) : super(_value, _then); + /// Create a copy of TransferStatsDownUp + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -527,11 +565,13 @@ class _$TransferStatsDownUpImpl implements _TransferStatsDownUp { (identical(other.up, up) || other.up == up)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, down, up); - @JsonKey(ignore: true) + /// Create a copy of TransferStatsDownUp + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$TransferStatsDownUpImplCopyWith<_$TransferStatsDownUpImpl> get copyWith => @@ -558,12 +598,941 @@ abstract class _TransferStatsDownUp implements TransferStatsDownUp { TransferStats get down; @override TransferStats get up; + + /// Create a copy of TransferStatsDownUp + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$TransferStatsDownUpImplCopyWith<_$TransferStatsDownUpImpl> get copyWith => throw _privateConstructorUsedError; } +StateStats _$StateStatsFromJson(Map json) { + return _StateStats.fromJson(json); +} + +/// @nodoc +mixin _$StateStats { + TimestampDuration get span => throw _privateConstructorUsedError; + TimestampDuration get reliable => throw _privateConstructorUsedError; + TimestampDuration get unreliable => throw _privateConstructorUsedError; + TimestampDuration get dead => throw _privateConstructorUsedError; + TimestampDuration get punished => throw _privateConstructorUsedError; + StateReasonStats get reason => throw _privateConstructorUsedError; + + /// Serializes this StateStats to a JSON map. + Map toJson() => throw _privateConstructorUsedError; + + /// Create a copy of StateStats + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $StateStatsCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $StateStatsCopyWith<$Res> { + factory $StateStatsCopyWith( + StateStats value, $Res Function(StateStats) then) = + _$StateStatsCopyWithImpl<$Res, StateStats>; + @useResult + $Res call( + {TimestampDuration span, + TimestampDuration reliable, + TimestampDuration unreliable, + TimestampDuration dead, + TimestampDuration punished, + StateReasonStats reason}); + + $StateReasonStatsCopyWith<$Res> get reason; +} + +/// @nodoc +class _$StateStatsCopyWithImpl<$Res, $Val extends StateStats> + implements $StateStatsCopyWith<$Res> { + _$StateStatsCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + /// Create a copy of StateStats + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? span = null, + Object? reliable = null, + Object? unreliable = null, + Object? dead = null, + Object? punished = null, + Object? reason = null, + }) { + return _then(_value.copyWith( + span: null == span + ? _value.span + : span // ignore: cast_nullable_to_non_nullable + as TimestampDuration, + reliable: null == reliable + ? _value.reliable + : reliable // ignore: cast_nullable_to_non_nullable + as TimestampDuration, + unreliable: null == unreliable + ? _value.unreliable + : unreliable // ignore: cast_nullable_to_non_nullable + as TimestampDuration, + dead: null == dead + ? _value.dead + : dead // ignore: cast_nullable_to_non_nullable + as TimestampDuration, + punished: null == punished + ? _value.punished + : punished // ignore: cast_nullable_to_non_nullable + as TimestampDuration, + reason: null == reason + ? _value.reason + : reason // ignore: cast_nullable_to_non_nullable + as StateReasonStats, + ) as $Val); + } + + /// Create a copy of StateStats + /// with the given fields replaced by the non-null parameter values. + @override + @pragma('vm:prefer-inline') + $StateReasonStatsCopyWith<$Res> get reason { + return $StateReasonStatsCopyWith<$Res>(_value.reason, (value) { + return _then(_value.copyWith(reason: value) as $Val); + }); + } +} + +/// @nodoc +abstract class _$$StateStatsImplCopyWith<$Res> + implements $StateStatsCopyWith<$Res> { + factory _$$StateStatsImplCopyWith( + _$StateStatsImpl value, $Res Function(_$StateStatsImpl) then) = + __$$StateStatsImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {TimestampDuration span, + TimestampDuration reliable, + TimestampDuration unreliable, + TimestampDuration dead, + TimestampDuration punished, + StateReasonStats reason}); + + @override + $StateReasonStatsCopyWith<$Res> get reason; +} + +/// @nodoc +class __$$StateStatsImplCopyWithImpl<$Res> + extends _$StateStatsCopyWithImpl<$Res, _$StateStatsImpl> + implements _$$StateStatsImplCopyWith<$Res> { + __$$StateStatsImplCopyWithImpl( + _$StateStatsImpl _value, $Res Function(_$StateStatsImpl) _then) + : super(_value, _then); + + /// Create a copy of StateStats + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? span = null, + Object? reliable = null, + Object? unreliable = null, + Object? dead = null, + Object? punished = null, + Object? reason = null, + }) { + return _then(_$StateStatsImpl( + span: null == span + ? _value.span + : span // ignore: cast_nullable_to_non_nullable + as TimestampDuration, + reliable: null == reliable + ? _value.reliable + : reliable // ignore: cast_nullable_to_non_nullable + as TimestampDuration, + unreliable: null == unreliable + ? _value.unreliable + : unreliable // ignore: cast_nullable_to_non_nullable + as TimestampDuration, + dead: null == dead + ? _value.dead + : dead // ignore: cast_nullable_to_non_nullable + as TimestampDuration, + punished: null == punished + ? _value.punished + : punished // ignore: cast_nullable_to_non_nullable + as TimestampDuration, + reason: null == reason + ? _value.reason + : reason // ignore: cast_nullable_to_non_nullable + as StateReasonStats, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$StateStatsImpl implements _StateStats { + const _$StateStatsImpl( + {required this.span, + required this.reliable, + required this.unreliable, + required this.dead, + required this.punished, + required this.reason}); + + factory _$StateStatsImpl.fromJson(Map json) => + _$$StateStatsImplFromJson(json); + + @override + final TimestampDuration span; + @override + final TimestampDuration reliable; + @override + final TimestampDuration unreliable; + @override + final TimestampDuration dead; + @override + final TimestampDuration punished; + @override + final StateReasonStats reason; + + @override + String toString() { + return 'StateStats(span: $span, reliable: $reliable, unreliable: $unreliable, dead: $dead, punished: $punished, reason: $reason)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$StateStatsImpl && + (identical(other.span, span) || other.span == span) && + (identical(other.reliable, reliable) || + other.reliable == reliable) && + (identical(other.unreliable, unreliable) || + other.unreliable == unreliable) && + (identical(other.dead, dead) || other.dead == dead) && + (identical(other.punished, punished) || + other.punished == punished) && + (identical(other.reason, reason) || other.reason == reason)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash( + runtimeType, span, reliable, unreliable, dead, punished, reason); + + /// Create a copy of StateStats + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$StateStatsImplCopyWith<_$StateStatsImpl> get copyWith => + __$$StateStatsImplCopyWithImpl<_$StateStatsImpl>(this, _$identity); + + @override + Map toJson() { + return _$$StateStatsImplToJson( + this, + ); + } +} + +abstract class _StateStats implements StateStats { + const factory _StateStats( + {required final TimestampDuration span, + required final TimestampDuration reliable, + required final TimestampDuration unreliable, + required final TimestampDuration dead, + required final TimestampDuration punished, + required final StateReasonStats reason}) = _$StateStatsImpl; + + factory _StateStats.fromJson(Map json) = + _$StateStatsImpl.fromJson; + + @override + TimestampDuration get span; + @override + TimestampDuration get reliable; + @override + TimestampDuration get unreliable; + @override + TimestampDuration get dead; + @override + TimestampDuration get punished; + @override + StateReasonStats get reason; + + /// Create a copy of StateStats + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$StateStatsImplCopyWith<_$StateStatsImpl> get copyWith => + throw _privateConstructorUsedError; +} + +StateReasonStats _$StateReasonStatsFromJson(Map json) { + return _StateReasonStats.fromJson(json); +} + +/// @nodoc +mixin _$StateReasonStats { + TimestampDuration get canNotSend => throw _privateConstructorUsedError; + TimestampDuration get tooManyLostAnswers => + throw _privateConstructorUsedError; + TimestampDuration get noPingResponse => throw _privateConstructorUsedError; + TimestampDuration get failedToSend => throw _privateConstructorUsedError; + TimestampDuration get lostAnswers => throw _privateConstructorUsedError; + TimestampDuration get notSeenConsecutively => + throw _privateConstructorUsedError; + TimestampDuration get inUnreliablePingSpan => + throw _privateConstructorUsedError; + + /// Serializes this StateReasonStats to a JSON map. + Map toJson() => throw _privateConstructorUsedError; + + /// Create a copy of StateReasonStats + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $StateReasonStatsCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $StateReasonStatsCopyWith<$Res> { + factory $StateReasonStatsCopyWith( + StateReasonStats value, $Res Function(StateReasonStats) then) = + _$StateReasonStatsCopyWithImpl<$Res, StateReasonStats>; + @useResult + $Res call( + {TimestampDuration canNotSend, + TimestampDuration tooManyLostAnswers, + TimestampDuration noPingResponse, + TimestampDuration failedToSend, + TimestampDuration lostAnswers, + TimestampDuration notSeenConsecutively, + TimestampDuration inUnreliablePingSpan}); +} + +/// @nodoc +class _$StateReasonStatsCopyWithImpl<$Res, $Val extends StateReasonStats> + implements $StateReasonStatsCopyWith<$Res> { + _$StateReasonStatsCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + /// Create a copy of StateReasonStats + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? canNotSend = null, + Object? tooManyLostAnswers = null, + Object? noPingResponse = null, + Object? failedToSend = null, + Object? lostAnswers = null, + Object? notSeenConsecutively = null, + Object? inUnreliablePingSpan = null, + }) { + return _then(_value.copyWith( + canNotSend: null == canNotSend + ? _value.canNotSend + : canNotSend // ignore: cast_nullable_to_non_nullable + as TimestampDuration, + tooManyLostAnswers: null == tooManyLostAnswers + ? _value.tooManyLostAnswers + : tooManyLostAnswers // ignore: cast_nullable_to_non_nullable + as TimestampDuration, + noPingResponse: null == noPingResponse + ? _value.noPingResponse + : noPingResponse // ignore: cast_nullable_to_non_nullable + as TimestampDuration, + failedToSend: null == failedToSend + ? _value.failedToSend + : failedToSend // ignore: cast_nullable_to_non_nullable + as TimestampDuration, + lostAnswers: null == lostAnswers + ? _value.lostAnswers + : lostAnswers // ignore: cast_nullable_to_non_nullable + as TimestampDuration, + notSeenConsecutively: null == notSeenConsecutively + ? _value.notSeenConsecutively + : notSeenConsecutively // ignore: cast_nullable_to_non_nullable + as TimestampDuration, + inUnreliablePingSpan: null == inUnreliablePingSpan + ? _value.inUnreliablePingSpan + : inUnreliablePingSpan // ignore: cast_nullable_to_non_nullable + as TimestampDuration, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$StateReasonStatsImplCopyWith<$Res> + implements $StateReasonStatsCopyWith<$Res> { + factory _$$StateReasonStatsImplCopyWith(_$StateReasonStatsImpl value, + $Res Function(_$StateReasonStatsImpl) then) = + __$$StateReasonStatsImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {TimestampDuration canNotSend, + TimestampDuration tooManyLostAnswers, + TimestampDuration noPingResponse, + TimestampDuration failedToSend, + TimestampDuration lostAnswers, + TimestampDuration notSeenConsecutively, + TimestampDuration inUnreliablePingSpan}); +} + +/// @nodoc +class __$$StateReasonStatsImplCopyWithImpl<$Res> + extends _$StateReasonStatsCopyWithImpl<$Res, _$StateReasonStatsImpl> + implements _$$StateReasonStatsImplCopyWith<$Res> { + __$$StateReasonStatsImplCopyWithImpl(_$StateReasonStatsImpl _value, + $Res Function(_$StateReasonStatsImpl) _then) + : super(_value, _then); + + /// Create a copy of StateReasonStats + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? canNotSend = null, + Object? tooManyLostAnswers = null, + Object? noPingResponse = null, + Object? failedToSend = null, + Object? lostAnswers = null, + Object? notSeenConsecutively = null, + Object? inUnreliablePingSpan = null, + }) { + return _then(_$StateReasonStatsImpl( + canNotSend: null == canNotSend + ? _value.canNotSend + : canNotSend // ignore: cast_nullable_to_non_nullable + as TimestampDuration, + tooManyLostAnswers: null == tooManyLostAnswers + ? _value.tooManyLostAnswers + : tooManyLostAnswers // ignore: cast_nullable_to_non_nullable + as TimestampDuration, + noPingResponse: null == noPingResponse + ? _value.noPingResponse + : noPingResponse // ignore: cast_nullable_to_non_nullable + as TimestampDuration, + failedToSend: null == failedToSend + ? _value.failedToSend + : failedToSend // ignore: cast_nullable_to_non_nullable + as TimestampDuration, + lostAnswers: null == lostAnswers + ? _value.lostAnswers + : lostAnswers // ignore: cast_nullable_to_non_nullable + as TimestampDuration, + notSeenConsecutively: null == notSeenConsecutively + ? _value.notSeenConsecutively + : notSeenConsecutively // ignore: cast_nullable_to_non_nullable + as TimestampDuration, + inUnreliablePingSpan: null == inUnreliablePingSpan + ? _value.inUnreliablePingSpan + : inUnreliablePingSpan // ignore: cast_nullable_to_non_nullable + as TimestampDuration, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$StateReasonStatsImpl implements _StateReasonStats { + const _$StateReasonStatsImpl( + {required this.canNotSend, + required this.tooManyLostAnswers, + required this.noPingResponse, + required this.failedToSend, + required this.lostAnswers, + required this.notSeenConsecutively, + required this.inUnreliablePingSpan}); + + factory _$StateReasonStatsImpl.fromJson(Map json) => + _$$StateReasonStatsImplFromJson(json); + + @override + final TimestampDuration canNotSend; + @override + final TimestampDuration tooManyLostAnswers; + @override + final TimestampDuration noPingResponse; + @override + final TimestampDuration failedToSend; + @override + final TimestampDuration lostAnswers; + @override + final TimestampDuration notSeenConsecutively; + @override + final TimestampDuration inUnreliablePingSpan; + + @override + String toString() { + return 'StateReasonStats(canNotSend: $canNotSend, tooManyLostAnswers: $tooManyLostAnswers, noPingResponse: $noPingResponse, failedToSend: $failedToSend, lostAnswers: $lostAnswers, notSeenConsecutively: $notSeenConsecutively, inUnreliablePingSpan: $inUnreliablePingSpan)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$StateReasonStatsImpl && + (identical(other.canNotSend, canNotSend) || + other.canNotSend == canNotSend) && + (identical(other.tooManyLostAnswers, tooManyLostAnswers) || + other.tooManyLostAnswers == tooManyLostAnswers) && + (identical(other.noPingResponse, noPingResponse) || + other.noPingResponse == noPingResponse) && + (identical(other.failedToSend, failedToSend) || + other.failedToSend == failedToSend) && + (identical(other.lostAnswers, lostAnswers) || + other.lostAnswers == lostAnswers) && + (identical(other.notSeenConsecutively, notSeenConsecutively) || + other.notSeenConsecutively == notSeenConsecutively) && + (identical(other.inUnreliablePingSpan, inUnreliablePingSpan) || + other.inUnreliablePingSpan == inUnreliablePingSpan)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash( + runtimeType, + canNotSend, + tooManyLostAnswers, + noPingResponse, + failedToSend, + lostAnswers, + notSeenConsecutively, + inUnreliablePingSpan); + + /// Create a copy of StateReasonStats + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$StateReasonStatsImplCopyWith<_$StateReasonStatsImpl> get copyWith => + __$$StateReasonStatsImplCopyWithImpl<_$StateReasonStatsImpl>( + this, _$identity); + + @override + Map toJson() { + return _$$StateReasonStatsImplToJson( + this, + ); + } +} + +abstract class _StateReasonStats implements StateReasonStats { + const factory _StateReasonStats( + {required final TimestampDuration canNotSend, + required final TimestampDuration tooManyLostAnswers, + required final TimestampDuration noPingResponse, + required final TimestampDuration failedToSend, + required final TimestampDuration lostAnswers, + required final TimestampDuration notSeenConsecutively, + required final TimestampDuration inUnreliablePingSpan}) = + _$StateReasonStatsImpl; + + factory _StateReasonStats.fromJson(Map json) = + _$StateReasonStatsImpl.fromJson; + + @override + TimestampDuration get canNotSend; + @override + TimestampDuration get tooManyLostAnswers; + @override + TimestampDuration get noPingResponse; + @override + TimestampDuration get failedToSend; + @override + TimestampDuration get lostAnswers; + @override + TimestampDuration get notSeenConsecutively; + @override + TimestampDuration get inUnreliablePingSpan; + + /// Create a copy of StateReasonStats + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$StateReasonStatsImplCopyWith<_$StateReasonStatsImpl> get copyWith => + throw _privateConstructorUsedError; +} + +AnswerStats _$AnswerStatsFromJson(Map json) { + return _AnswerStats.fromJson(json); +} + +/// @nodoc +mixin _$AnswerStats { + TimestampDuration get span => throw _privateConstructorUsedError; + int get questions => throw _privateConstructorUsedError; + int get answers => throw _privateConstructorUsedError; + int get lostAnswers => throw _privateConstructorUsedError; + int get consecutiveAnswersMaximum => throw _privateConstructorUsedError; + int get consecutiveAnswersAverage => throw _privateConstructorUsedError; + int get consecutiveAnswersMinimum => throw _privateConstructorUsedError; + int get consecutiveLostAnswersMaximum => throw _privateConstructorUsedError; + int get consecutiveLostAnswersAverage => throw _privateConstructorUsedError; + int get consecutiveLostAnswersMinimum => throw _privateConstructorUsedError; + + /// Serializes this AnswerStats to a JSON map. + Map toJson() => throw _privateConstructorUsedError; + + /// Create a copy of AnswerStats + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + $AnswerStatsCopyWith get copyWith => + throw _privateConstructorUsedError; +} + +/// @nodoc +abstract class $AnswerStatsCopyWith<$Res> { + factory $AnswerStatsCopyWith( + AnswerStats value, $Res Function(AnswerStats) then) = + _$AnswerStatsCopyWithImpl<$Res, AnswerStats>; + @useResult + $Res call( + {TimestampDuration span, + int questions, + int answers, + int lostAnswers, + int consecutiveAnswersMaximum, + int consecutiveAnswersAverage, + int consecutiveAnswersMinimum, + int consecutiveLostAnswersMaximum, + int consecutiveLostAnswersAverage, + int consecutiveLostAnswersMinimum}); +} + +/// @nodoc +class _$AnswerStatsCopyWithImpl<$Res, $Val extends AnswerStats> + implements $AnswerStatsCopyWith<$Res> { + _$AnswerStatsCopyWithImpl(this._value, this._then); + + // ignore: unused_field + final $Val _value; + // ignore: unused_field + final $Res Function($Val) _then; + + /// Create a copy of AnswerStats + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? span = null, + Object? questions = null, + Object? answers = null, + Object? lostAnswers = null, + Object? consecutiveAnswersMaximum = null, + Object? consecutiveAnswersAverage = null, + Object? consecutiveAnswersMinimum = null, + Object? consecutiveLostAnswersMaximum = null, + Object? consecutiveLostAnswersAverage = null, + Object? consecutiveLostAnswersMinimum = null, + }) { + return _then(_value.copyWith( + span: null == span + ? _value.span + : span // ignore: cast_nullable_to_non_nullable + as TimestampDuration, + questions: null == questions + ? _value.questions + : questions // ignore: cast_nullable_to_non_nullable + as int, + answers: null == answers + ? _value.answers + : answers // ignore: cast_nullable_to_non_nullable + as int, + lostAnswers: null == lostAnswers + ? _value.lostAnswers + : lostAnswers // ignore: cast_nullable_to_non_nullable + as int, + consecutiveAnswersMaximum: null == consecutiveAnswersMaximum + ? _value.consecutiveAnswersMaximum + : consecutiveAnswersMaximum // ignore: cast_nullable_to_non_nullable + as int, + consecutiveAnswersAverage: null == consecutiveAnswersAverage + ? _value.consecutiveAnswersAverage + : consecutiveAnswersAverage // ignore: cast_nullable_to_non_nullable + as int, + consecutiveAnswersMinimum: null == consecutiveAnswersMinimum + ? _value.consecutiveAnswersMinimum + : consecutiveAnswersMinimum // ignore: cast_nullable_to_non_nullable + as int, + consecutiveLostAnswersMaximum: null == consecutiveLostAnswersMaximum + ? _value.consecutiveLostAnswersMaximum + : consecutiveLostAnswersMaximum // ignore: cast_nullable_to_non_nullable + as int, + consecutiveLostAnswersAverage: null == consecutiveLostAnswersAverage + ? _value.consecutiveLostAnswersAverage + : consecutiveLostAnswersAverage // ignore: cast_nullable_to_non_nullable + as int, + consecutiveLostAnswersMinimum: null == consecutiveLostAnswersMinimum + ? _value.consecutiveLostAnswersMinimum + : consecutiveLostAnswersMinimum // ignore: cast_nullable_to_non_nullable + as int, + ) as $Val); + } +} + +/// @nodoc +abstract class _$$AnswerStatsImplCopyWith<$Res> + implements $AnswerStatsCopyWith<$Res> { + factory _$$AnswerStatsImplCopyWith( + _$AnswerStatsImpl value, $Res Function(_$AnswerStatsImpl) then) = + __$$AnswerStatsImplCopyWithImpl<$Res>; + @override + @useResult + $Res call( + {TimestampDuration span, + int questions, + int answers, + int lostAnswers, + int consecutiveAnswersMaximum, + int consecutiveAnswersAverage, + int consecutiveAnswersMinimum, + int consecutiveLostAnswersMaximum, + int consecutiveLostAnswersAverage, + int consecutiveLostAnswersMinimum}); +} + +/// @nodoc +class __$$AnswerStatsImplCopyWithImpl<$Res> + extends _$AnswerStatsCopyWithImpl<$Res, _$AnswerStatsImpl> + implements _$$AnswerStatsImplCopyWith<$Res> { + __$$AnswerStatsImplCopyWithImpl( + _$AnswerStatsImpl _value, $Res Function(_$AnswerStatsImpl) _then) + : super(_value, _then); + + /// Create a copy of AnswerStats + /// with the given fields replaced by the non-null parameter values. + @pragma('vm:prefer-inline') + @override + $Res call({ + Object? span = null, + Object? questions = null, + Object? answers = null, + Object? lostAnswers = null, + Object? consecutiveAnswersMaximum = null, + Object? consecutiveAnswersAverage = null, + Object? consecutiveAnswersMinimum = null, + Object? consecutiveLostAnswersMaximum = null, + Object? consecutiveLostAnswersAverage = null, + Object? consecutiveLostAnswersMinimum = null, + }) { + return _then(_$AnswerStatsImpl( + span: null == span + ? _value.span + : span // ignore: cast_nullable_to_non_nullable + as TimestampDuration, + questions: null == questions + ? _value.questions + : questions // ignore: cast_nullable_to_non_nullable + as int, + answers: null == answers + ? _value.answers + : answers // ignore: cast_nullable_to_non_nullable + as int, + lostAnswers: null == lostAnswers + ? _value.lostAnswers + : lostAnswers // ignore: cast_nullable_to_non_nullable + as int, + consecutiveAnswersMaximum: null == consecutiveAnswersMaximum + ? _value.consecutiveAnswersMaximum + : consecutiveAnswersMaximum // ignore: cast_nullable_to_non_nullable + as int, + consecutiveAnswersAverage: null == consecutiveAnswersAverage + ? _value.consecutiveAnswersAverage + : consecutiveAnswersAverage // ignore: cast_nullable_to_non_nullable + as int, + consecutiveAnswersMinimum: null == consecutiveAnswersMinimum + ? _value.consecutiveAnswersMinimum + : consecutiveAnswersMinimum // ignore: cast_nullable_to_non_nullable + as int, + consecutiveLostAnswersMaximum: null == consecutiveLostAnswersMaximum + ? _value.consecutiveLostAnswersMaximum + : consecutiveLostAnswersMaximum // ignore: cast_nullable_to_non_nullable + as int, + consecutiveLostAnswersAverage: null == consecutiveLostAnswersAverage + ? _value.consecutiveLostAnswersAverage + : consecutiveLostAnswersAverage // ignore: cast_nullable_to_non_nullable + as int, + consecutiveLostAnswersMinimum: null == consecutiveLostAnswersMinimum + ? _value.consecutiveLostAnswersMinimum + : consecutiveLostAnswersMinimum // ignore: cast_nullable_to_non_nullable + as int, + )); + } +} + +/// @nodoc +@JsonSerializable() +class _$AnswerStatsImpl implements _AnswerStats { + const _$AnswerStatsImpl( + {required this.span, + required this.questions, + required this.answers, + required this.lostAnswers, + required this.consecutiveAnswersMaximum, + required this.consecutiveAnswersAverage, + required this.consecutiveAnswersMinimum, + required this.consecutiveLostAnswersMaximum, + required this.consecutiveLostAnswersAverage, + required this.consecutiveLostAnswersMinimum}); + + factory _$AnswerStatsImpl.fromJson(Map json) => + _$$AnswerStatsImplFromJson(json); + + @override + final TimestampDuration span; + @override + final int questions; + @override + final int answers; + @override + final int lostAnswers; + @override + final int consecutiveAnswersMaximum; + @override + final int consecutiveAnswersAverage; + @override + final int consecutiveAnswersMinimum; + @override + final int consecutiveLostAnswersMaximum; + @override + final int consecutiveLostAnswersAverage; + @override + final int consecutiveLostAnswersMinimum; + + @override + String toString() { + return 'AnswerStats(span: $span, questions: $questions, answers: $answers, lostAnswers: $lostAnswers, consecutiveAnswersMaximum: $consecutiveAnswersMaximum, consecutiveAnswersAverage: $consecutiveAnswersAverage, consecutiveAnswersMinimum: $consecutiveAnswersMinimum, consecutiveLostAnswersMaximum: $consecutiveLostAnswersMaximum, consecutiveLostAnswersAverage: $consecutiveLostAnswersAverage, consecutiveLostAnswersMinimum: $consecutiveLostAnswersMinimum)'; + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other.runtimeType == runtimeType && + other is _$AnswerStatsImpl && + (identical(other.span, span) || other.span == span) && + (identical(other.questions, questions) || + other.questions == questions) && + (identical(other.answers, answers) || other.answers == answers) && + (identical(other.lostAnswers, lostAnswers) || + other.lostAnswers == lostAnswers) && + (identical(other.consecutiveAnswersMaximum, + consecutiveAnswersMaximum) || + other.consecutiveAnswersMaximum == consecutiveAnswersMaximum) && + (identical(other.consecutiveAnswersAverage, + consecutiveAnswersAverage) || + other.consecutiveAnswersAverage == consecutiveAnswersAverage) && + (identical(other.consecutiveAnswersMinimum, + consecutiveAnswersMinimum) || + other.consecutiveAnswersMinimum == consecutiveAnswersMinimum) && + (identical(other.consecutiveLostAnswersMaximum, + consecutiveLostAnswersMaximum) || + other.consecutiveLostAnswersMaximum == + consecutiveLostAnswersMaximum) && + (identical(other.consecutiveLostAnswersAverage, + consecutiveLostAnswersAverage) || + other.consecutiveLostAnswersAverage == + consecutiveLostAnswersAverage) && + (identical(other.consecutiveLostAnswersMinimum, + consecutiveLostAnswersMinimum) || + other.consecutiveLostAnswersMinimum == + consecutiveLostAnswersMinimum)); + } + + @JsonKey(includeFromJson: false, includeToJson: false) + @override + int get hashCode => Object.hash( + runtimeType, + span, + questions, + answers, + lostAnswers, + consecutiveAnswersMaximum, + consecutiveAnswersAverage, + consecutiveAnswersMinimum, + consecutiveLostAnswersMaximum, + consecutiveLostAnswersAverage, + consecutiveLostAnswersMinimum); + + /// Create a copy of AnswerStats + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) + @override + @pragma('vm:prefer-inline') + _$$AnswerStatsImplCopyWith<_$AnswerStatsImpl> get copyWith => + __$$AnswerStatsImplCopyWithImpl<_$AnswerStatsImpl>(this, _$identity); + + @override + Map toJson() { + return _$$AnswerStatsImplToJson( + this, + ); + } +} + +abstract class _AnswerStats implements AnswerStats { + const factory _AnswerStats( + {required final TimestampDuration span, + required final int questions, + required final int answers, + required final int lostAnswers, + required final int consecutiveAnswersMaximum, + required final int consecutiveAnswersAverage, + required final int consecutiveAnswersMinimum, + required final int consecutiveLostAnswersMaximum, + required final int consecutiveLostAnswersAverage, + required final int consecutiveLostAnswersMinimum}) = _$AnswerStatsImpl; + + factory _AnswerStats.fromJson(Map json) = + _$AnswerStatsImpl.fromJson; + + @override + TimestampDuration get span; + @override + int get questions; + @override + int get answers; + @override + int get lostAnswers; + @override + int get consecutiveAnswersMaximum; + @override + int get consecutiveAnswersAverage; + @override + int get consecutiveAnswersMinimum; + @override + int get consecutiveLostAnswersMaximum; + @override + int get consecutiveLostAnswersAverage; + @override + int get consecutiveLostAnswersMinimum; + + /// Create a copy of AnswerStats + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) + _$$AnswerStatsImplCopyWith<_$AnswerStatsImpl> get copyWith => + throw _privateConstructorUsedError; +} + RPCStats _$RPCStatsFromJson(Map json) { return _RPCStats.fromJson(json); } @@ -573,14 +1542,19 @@ mixin _$RPCStats { int get messagesSent => throw _privateConstructorUsedError; int get messagesRcvd => throw _privateConstructorUsedError; int get questionsInFlight => throw _privateConstructorUsedError; - Timestamp? get lastQuestion => throw _privateConstructorUsedError; + Timestamp? get lastQuestionTs => throw _privateConstructorUsedError; Timestamp? get lastSeenTs => throw _privateConstructorUsedError; Timestamp? get firstConsecutiveSeenTs => throw _privateConstructorUsedError; int get recentLostAnswers => throw _privateConstructorUsedError; int get failedToSend => throw _privateConstructorUsedError; + AnswerStats get answer => throw _privateConstructorUsedError; + /// Serializes this RPCStats to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of RPCStats + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $RPCStatsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -594,11 +1568,14 @@ abstract class $RPCStatsCopyWith<$Res> { {int messagesSent, int messagesRcvd, int questionsInFlight, - Timestamp? lastQuestion, + Timestamp? lastQuestionTs, Timestamp? lastSeenTs, Timestamp? firstConsecutiveSeenTs, int recentLostAnswers, - int failedToSend}); + int failedToSend, + AnswerStats answer}); + + $AnswerStatsCopyWith<$Res> get answer; } /// @nodoc @@ -611,17 +1588,20 @@ class _$RPCStatsCopyWithImpl<$Res, $Val extends RPCStats> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of RPCStats + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? messagesSent = null, Object? messagesRcvd = null, Object? questionsInFlight = null, - Object? lastQuestion = freezed, + Object? lastQuestionTs = freezed, Object? lastSeenTs = freezed, Object? firstConsecutiveSeenTs = freezed, Object? recentLostAnswers = null, Object? failedToSend = null, + Object? answer = null, }) { return _then(_value.copyWith( messagesSent: null == messagesSent @@ -636,9 +1616,9 @@ class _$RPCStatsCopyWithImpl<$Res, $Val extends RPCStats> ? _value.questionsInFlight : questionsInFlight // ignore: cast_nullable_to_non_nullable as int, - lastQuestion: freezed == lastQuestion - ? _value.lastQuestion - : lastQuestion // ignore: cast_nullable_to_non_nullable + lastQuestionTs: freezed == lastQuestionTs + ? _value.lastQuestionTs + : lastQuestionTs // ignore: cast_nullable_to_non_nullable as Timestamp?, lastSeenTs: freezed == lastSeenTs ? _value.lastSeenTs @@ -656,8 +1636,22 @@ class _$RPCStatsCopyWithImpl<$Res, $Val extends RPCStats> ? _value.failedToSend : failedToSend // ignore: cast_nullable_to_non_nullable as int, + answer: null == answer + ? _value.answer + : answer // ignore: cast_nullable_to_non_nullable + as AnswerStats, ) as $Val); } + + /// Create a copy of RPCStats + /// with the given fields replaced by the non-null parameter values. + @override + @pragma('vm:prefer-inline') + $AnswerStatsCopyWith<$Res> get answer { + return $AnswerStatsCopyWith<$Res>(_value.answer, (value) { + return _then(_value.copyWith(answer: value) as $Val); + }); + } } /// @nodoc @@ -672,11 +1666,15 @@ abstract class _$$RPCStatsImplCopyWith<$Res> {int messagesSent, int messagesRcvd, int questionsInFlight, - Timestamp? lastQuestion, + Timestamp? lastQuestionTs, Timestamp? lastSeenTs, Timestamp? firstConsecutiveSeenTs, int recentLostAnswers, - int failedToSend}); + int failedToSend, + AnswerStats answer}); + + @override + $AnswerStatsCopyWith<$Res> get answer; } /// @nodoc @@ -687,17 +1685,20 @@ class __$$RPCStatsImplCopyWithImpl<$Res> _$RPCStatsImpl _value, $Res Function(_$RPCStatsImpl) _then) : super(_value, _then); + /// Create a copy of RPCStats + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? messagesSent = null, Object? messagesRcvd = null, Object? questionsInFlight = null, - Object? lastQuestion = freezed, + Object? lastQuestionTs = freezed, Object? lastSeenTs = freezed, Object? firstConsecutiveSeenTs = freezed, Object? recentLostAnswers = null, Object? failedToSend = null, + Object? answer = null, }) { return _then(_$RPCStatsImpl( messagesSent: null == messagesSent @@ -712,9 +1713,9 @@ class __$$RPCStatsImplCopyWithImpl<$Res> ? _value.questionsInFlight : questionsInFlight // ignore: cast_nullable_to_non_nullable as int, - lastQuestion: freezed == lastQuestion - ? _value.lastQuestion - : lastQuestion // ignore: cast_nullable_to_non_nullable + lastQuestionTs: freezed == lastQuestionTs + ? _value.lastQuestionTs + : lastQuestionTs // ignore: cast_nullable_to_non_nullable as Timestamp?, lastSeenTs: freezed == lastSeenTs ? _value.lastSeenTs @@ -732,6 +1733,10 @@ class __$$RPCStatsImplCopyWithImpl<$Res> ? _value.failedToSend : failedToSend // ignore: cast_nullable_to_non_nullable as int, + answer: null == answer + ? _value.answer + : answer // ignore: cast_nullable_to_non_nullable + as AnswerStats, )); } } @@ -743,11 +1748,12 @@ class _$RPCStatsImpl implements _RPCStats { {required this.messagesSent, required this.messagesRcvd, required this.questionsInFlight, - required this.lastQuestion, + required this.lastQuestionTs, required this.lastSeenTs, required this.firstConsecutiveSeenTs, required this.recentLostAnswers, - required this.failedToSend}); + required this.failedToSend, + required this.answer}); factory _$RPCStatsImpl.fromJson(Map json) => _$$RPCStatsImplFromJson(json); @@ -759,7 +1765,7 @@ class _$RPCStatsImpl implements _RPCStats { @override final int questionsInFlight; @override - final Timestamp? lastQuestion; + final Timestamp? lastQuestionTs; @override final Timestamp? lastSeenTs; @override @@ -768,10 +1774,12 @@ class _$RPCStatsImpl implements _RPCStats { final int recentLostAnswers; @override final int failedToSend; + @override + final AnswerStats answer; @override String toString() { - return 'RPCStats(messagesSent: $messagesSent, messagesRcvd: $messagesRcvd, questionsInFlight: $questionsInFlight, lastQuestion: $lastQuestion, lastSeenTs: $lastSeenTs, firstConsecutiveSeenTs: $firstConsecutiveSeenTs, recentLostAnswers: $recentLostAnswers, failedToSend: $failedToSend)'; + return 'RPCStats(messagesSent: $messagesSent, messagesRcvd: $messagesRcvd, questionsInFlight: $questionsInFlight, lastQuestionTs: $lastQuestionTs, lastSeenTs: $lastSeenTs, firstConsecutiveSeenTs: $firstConsecutiveSeenTs, recentLostAnswers: $recentLostAnswers, failedToSend: $failedToSend, answer: $answer)'; } @override @@ -785,8 +1793,8 @@ class _$RPCStatsImpl implements _RPCStats { other.messagesRcvd == messagesRcvd) && (identical(other.questionsInFlight, questionsInFlight) || other.questionsInFlight == questionsInFlight) && - (identical(other.lastQuestion, lastQuestion) || - other.lastQuestion == lastQuestion) && + (identical(other.lastQuestionTs, lastQuestionTs) || + other.lastQuestionTs == lastQuestionTs) && (identical(other.lastSeenTs, lastSeenTs) || other.lastSeenTs == lastSeenTs) && (identical(other.firstConsecutiveSeenTs, firstConsecutiveSeenTs) || @@ -794,23 +1802,27 @@ class _$RPCStatsImpl implements _RPCStats { (identical(other.recentLostAnswers, recentLostAnswers) || other.recentLostAnswers == recentLostAnswers) && (identical(other.failedToSend, failedToSend) || - other.failedToSend == failedToSend)); + other.failedToSend == failedToSend) && + (identical(other.answer, answer) || other.answer == answer)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, messagesSent, messagesRcvd, questionsInFlight, - lastQuestion, + lastQuestionTs, lastSeenTs, firstConsecutiveSeenTs, recentLostAnswers, - failedToSend); + failedToSend, + answer); - @JsonKey(ignore: true) + /// Create a copy of RPCStats + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$RPCStatsImplCopyWith<_$RPCStatsImpl> get copyWith => @@ -829,11 +1841,12 @@ abstract class _RPCStats implements RPCStats { {required final int messagesSent, required final int messagesRcvd, required final int questionsInFlight, - required final Timestamp? lastQuestion, + required final Timestamp? lastQuestionTs, required final Timestamp? lastSeenTs, required final Timestamp? firstConsecutiveSeenTs, required final int recentLostAnswers, - required final int failedToSend}) = _$RPCStatsImpl; + required final int failedToSend, + required final AnswerStats answer}) = _$RPCStatsImpl; factory _RPCStats.fromJson(Map json) = _$RPCStatsImpl.fromJson; @@ -845,7 +1858,7 @@ abstract class _RPCStats implements RPCStats { @override int get questionsInFlight; @override - Timestamp? get lastQuestion; + Timestamp? get lastQuestionTs; @override Timestamp? get lastSeenTs; @override @@ -855,7 +1868,12 @@ abstract class _RPCStats implements RPCStats { @override int get failedToSend; @override - @JsonKey(ignore: true) + AnswerStats get answer; + + /// Create a copy of RPCStats + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) _$$RPCStatsImplCopyWith<_$RPCStatsImpl> get copyWith => throw _privateConstructorUsedError; } @@ -869,10 +1887,15 @@ mixin _$PeerStats { Timestamp get timeAdded => throw _privateConstructorUsedError; RPCStats get rpcStats => throw _privateConstructorUsedError; TransferStatsDownUp get transfer => throw _privateConstructorUsedError; + StateStats get state => throw _privateConstructorUsedError; LatencyStats? get latency => throw _privateConstructorUsedError; + /// Serializes this PeerStats to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of PeerStats + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $PeerStatsCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -886,10 +1909,12 @@ abstract class $PeerStatsCopyWith<$Res> { {Timestamp timeAdded, RPCStats rpcStats, TransferStatsDownUp transfer, + StateStats state, LatencyStats? latency}); $RPCStatsCopyWith<$Res> get rpcStats; $TransferStatsDownUpCopyWith<$Res> get transfer; + $StateStatsCopyWith<$Res> get state; $LatencyStatsCopyWith<$Res>? get latency; } @@ -903,12 +1928,15 @@ class _$PeerStatsCopyWithImpl<$Res, $Val extends PeerStats> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of PeerStats + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? timeAdded = null, Object? rpcStats = null, Object? transfer = null, + Object? state = null, Object? latency = freezed, }) { return _then(_value.copyWith( @@ -924,6 +1952,10 @@ class _$PeerStatsCopyWithImpl<$Res, $Val extends PeerStats> ? _value.transfer : transfer // ignore: cast_nullable_to_non_nullable as TransferStatsDownUp, + state: null == state + ? _value.state + : state // ignore: cast_nullable_to_non_nullable + as StateStats, latency: freezed == latency ? _value.latency : latency // ignore: cast_nullable_to_non_nullable @@ -931,6 +1963,8 @@ class _$PeerStatsCopyWithImpl<$Res, $Val extends PeerStats> ) as $Val); } + /// Create a copy of PeerStats + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $RPCStatsCopyWith<$Res> get rpcStats { @@ -939,6 +1973,8 @@ class _$PeerStatsCopyWithImpl<$Res, $Val extends PeerStats> }); } + /// Create a copy of PeerStats + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $TransferStatsDownUpCopyWith<$Res> get transfer { @@ -947,6 +1983,18 @@ class _$PeerStatsCopyWithImpl<$Res, $Val extends PeerStats> }); } + /// Create a copy of PeerStats + /// with the given fields replaced by the non-null parameter values. + @override + @pragma('vm:prefer-inline') + $StateStatsCopyWith<$Res> get state { + return $StateStatsCopyWith<$Res>(_value.state, (value) { + return _then(_value.copyWith(state: value) as $Val); + }); + } + + /// Create a copy of PeerStats + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $LatencyStatsCopyWith<$Res>? get latency { @@ -972,6 +2020,7 @@ abstract class _$$PeerStatsImplCopyWith<$Res> {Timestamp timeAdded, RPCStats rpcStats, TransferStatsDownUp transfer, + StateStats state, LatencyStats? latency}); @override @@ -979,6 +2028,8 @@ abstract class _$$PeerStatsImplCopyWith<$Res> @override $TransferStatsDownUpCopyWith<$Res> get transfer; @override + $StateStatsCopyWith<$Res> get state; + @override $LatencyStatsCopyWith<$Res>? get latency; } @@ -990,12 +2041,15 @@ class __$$PeerStatsImplCopyWithImpl<$Res> _$PeerStatsImpl _value, $Res Function(_$PeerStatsImpl) _then) : super(_value, _then); + /// Create a copy of PeerStats + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ Object? timeAdded = null, Object? rpcStats = null, Object? transfer = null, + Object? state = null, Object? latency = freezed, }) { return _then(_$PeerStatsImpl( @@ -1011,6 +2065,10 @@ class __$$PeerStatsImplCopyWithImpl<$Res> ? _value.transfer : transfer // ignore: cast_nullable_to_non_nullable as TransferStatsDownUp, + state: null == state + ? _value.state + : state // ignore: cast_nullable_to_non_nullable + as StateStats, latency: freezed == latency ? _value.latency : latency // ignore: cast_nullable_to_non_nullable @@ -1026,6 +2084,7 @@ class _$PeerStatsImpl implements _PeerStats { {required this.timeAdded, required this.rpcStats, required this.transfer, + required this.state, this.latency}); factory _$PeerStatsImpl.fromJson(Map json) => @@ -1038,11 +2097,13 @@ class _$PeerStatsImpl implements _PeerStats { @override final TransferStatsDownUp transfer; @override + final StateStats state; + @override final LatencyStats? latency; @override String toString() { - return 'PeerStats(timeAdded: $timeAdded, rpcStats: $rpcStats, transfer: $transfer, latency: $latency)'; + return 'PeerStats(timeAdded: $timeAdded, rpcStats: $rpcStats, transfer: $transfer, state: $state, latency: $latency)'; } @override @@ -1056,15 +2117,18 @@ class _$PeerStatsImpl implements _PeerStats { other.rpcStats == rpcStats) && (identical(other.transfer, transfer) || other.transfer == transfer) && + (identical(other.state, state) || other.state == state) && (identical(other.latency, latency) || other.latency == latency)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => - Object.hash(runtimeType, timeAdded, rpcStats, transfer, latency); + Object.hash(runtimeType, timeAdded, rpcStats, transfer, state, latency); - @JsonKey(ignore: true) + /// Create a copy of PeerStats + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$PeerStatsImplCopyWith<_$PeerStatsImpl> get copyWith => @@ -1083,6 +2147,7 @@ abstract class _PeerStats implements PeerStats { {required final Timestamp timeAdded, required final RPCStats rpcStats, required final TransferStatsDownUp transfer, + required final StateStats state, final LatencyStats? latency}) = _$PeerStatsImpl; factory _PeerStats.fromJson(Map json) = @@ -1095,9 +2160,14 @@ abstract class _PeerStats implements PeerStats { @override TransferStatsDownUp get transfer; @override - LatencyStats? get latency; + StateStats get state; @override - @JsonKey(ignore: true) + LatencyStats? get latency; + + /// Create a copy of PeerStats + /// with the given fields replaced by the non-null parameter values. + @override + @JsonKey(includeFromJson: false, includeToJson: false) _$$PeerStatsImplCopyWith<_$PeerStatsImpl> get copyWith => throw _privateConstructorUsedError; } @@ -1113,8 +2183,12 @@ mixin _$PeerTableData { String get peerAddress => throw _privateConstructorUsedError; PeerStats get peerStats => throw _privateConstructorUsedError; + /// Serializes this PeerTableData to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of PeerTableData + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $PeerTableDataCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -1143,6 +2217,8 @@ class _$PeerTableDataCopyWithImpl<$Res, $Val extends PeerTableData> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of PeerTableData + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -1166,6 +2242,8 @@ class _$PeerTableDataCopyWithImpl<$Res, $Val extends PeerTableData> ) as $Val); } + /// Create a copy of PeerTableData + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $PeerStatsCopyWith<$Res> get peerStats { @@ -1200,6 +2278,8 @@ class __$$PeerTableDataImplCopyWithImpl<$Res> _$PeerTableDataImpl _value, $Res Function(_$PeerTableDataImpl) _then) : super(_value, _then); + /// Create a copy of PeerTableData + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -1266,12 +2346,14 @@ class _$PeerTableDataImpl implements _PeerTableData { other.peerStats == peerStats)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(_nodeIds), peerAddress, peerStats); - @JsonKey(ignore: true) + /// Create a copy of PeerTableData + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$PeerTableDataImplCopyWith<_$PeerTableDataImpl> get copyWith => @@ -1300,8 +2382,11 @@ abstract class _PeerTableData implements PeerTableData { String get peerAddress; @override PeerStats get peerStats; + + /// Create a copy of PeerTableData + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$PeerTableDataImplCopyWith<_$PeerTableDataImpl> get copyWith => throw _privateConstructorUsedError; } @@ -1458,6 +2543,8 @@ mixin _$VeilidUpdate { required TResult orElse(), }) => throw _privateConstructorUsedError; + + /// Serializes this VeilidUpdate to a JSON map. Map toJson() => throw _privateConstructorUsedError; } @@ -1477,6 +2564,9 @@ class _$VeilidUpdateCopyWithImpl<$Res, $Val extends VeilidUpdate> final $Val _value; // ignore: unused_field final $Res Function($Val) _then; + + /// Create a copy of VeilidUpdate + /// with the given fields replaced by the non-null parameter values. } /// @nodoc @@ -1496,6 +2586,8 @@ class __$$VeilidLogImplCopyWithImpl<$Res> _$VeilidLogImpl _value, $Res Function(_$VeilidLogImpl) _then) : super(_value, _then); + /// Create a copy of VeilidUpdate + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -1560,11 +2652,13 @@ class _$VeilidLogImpl implements VeilidLog { other.backtrace == backtrace)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, logLevel, message, backtrace); - @JsonKey(ignore: true) + /// Create a copy of VeilidUpdate + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VeilidLogImplCopyWith<_$VeilidLogImpl> get copyWith => @@ -1739,7 +2833,10 @@ abstract class VeilidLog implements VeilidUpdate { VeilidLogLevel get logLevel; String get message; String? get backtrace; - @JsonKey(ignore: true) + + /// Create a copy of VeilidUpdate + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$VeilidLogImplCopyWith<_$VeilidLogImpl> get copyWith => throw _privateConstructorUsedError; } @@ -1764,6 +2861,8 @@ class __$$VeilidAppMessageImplCopyWithImpl<$Res> $Res Function(_$VeilidAppMessageImpl) _then) : super(_value, _then); + /// Create a copy of VeilidUpdate + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -1827,12 +2926,14 @@ class _$VeilidAppMessageImpl implements VeilidAppMessage { (identical(other.routeId, routeId) || other.routeId == routeId)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(message), sender, routeId); - @JsonKey(ignore: true) + /// Create a copy of VeilidUpdate + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VeilidAppMessageImplCopyWith<_$VeilidAppMessageImpl> get copyWith => @@ -2009,7 +3110,10 @@ abstract class VeilidAppMessage implements VeilidUpdate { Uint8List get message; Typed? get sender; String? get routeId; - @JsonKey(ignore: true) + + /// Create a copy of VeilidUpdate + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$VeilidAppMessageImplCopyWith<_$VeilidAppMessageImpl> get copyWith => throw _privateConstructorUsedError; } @@ -2035,6 +3139,8 @@ class __$$VeilidAppCallImplCopyWithImpl<$Res> _$VeilidAppCallImpl _value, $Res Function(_$VeilidAppCallImpl) _then) : super(_value, _then); + /// Create a copy of VeilidUpdate + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -2107,12 +3213,14 @@ class _$VeilidAppCallImpl implements VeilidAppCall { (identical(other.routeId, routeId) || other.routeId == routeId)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, const DeepCollectionEquality().hash(message), callId, sender, routeId); - @JsonKey(ignore: true) + /// Create a copy of VeilidUpdate + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VeilidAppCallImplCopyWith<_$VeilidAppCallImpl> get copyWith => @@ -2290,7 +3398,10 @@ abstract class VeilidAppCall implements VeilidUpdate { String get callId; Typed? get sender; String? get routeId; - @JsonKey(ignore: true) + + /// Create a copy of VeilidUpdate + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$VeilidAppCallImplCopyWith<_$VeilidAppCallImpl> get copyWith => throw _privateConstructorUsedError; } @@ -2317,6 +3428,8 @@ class __$$VeilidUpdateAttachmentImplCopyWithImpl<$Res> $Res Function(_$VeilidUpdateAttachmentImpl) _then) : super(_value, _then); + /// Create a copy of VeilidUpdate + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -2381,12 +3494,14 @@ class _$VeilidUpdateAttachmentImpl implements VeilidUpdateAttachment { other.localNetworkReady == localNetworkReady)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, state, publicInternetReady, localNetworkReady); - @JsonKey(ignore: true) + /// Create a copy of VeilidUpdate + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VeilidUpdateAttachmentImplCopyWith<_$VeilidUpdateAttachmentImpl> @@ -2562,7 +3677,10 @@ abstract class VeilidUpdateAttachment implements VeilidUpdate { AttachmentState get state; bool get publicInternetReady; bool get localNetworkReady; - @JsonKey(ignore: true) + + /// Create a copy of VeilidUpdate + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$VeilidUpdateAttachmentImplCopyWith<_$VeilidUpdateAttachmentImpl> get copyWith => throw _privateConstructorUsedError; } @@ -2585,6 +3703,8 @@ class __$$VeilidUpdateNetworkImplCopyWithImpl<$Res> $Res Function(_$VeilidUpdateNetworkImpl) _then) : super(_value, _then); + /// Create a copy of VeilidUpdate + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -2662,12 +3782,14 @@ class _$VeilidUpdateNetworkImpl implements VeilidUpdateNetwork { const DeepCollectionEquality().equals(other._peers, _peers)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, started, bpsDown, bpsUp, const DeepCollectionEquality().hash(_peers)); - @JsonKey(ignore: true) + /// Create a copy of VeilidUpdate + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VeilidUpdateNetworkImplCopyWith<_$VeilidUpdateNetworkImpl> get copyWith => @@ -2845,7 +3967,10 @@ abstract class VeilidUpdateNetwork implements VeilidUpdate { BigInt get bpsDown; BigInt get bpsUp; List get peers; - @JsonKey(ignore: true) + + /// Create a copy of VeilidUpdate + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$VeilidUpdateNetworkImplCopyWith<_$VeilidUpdateNetworkImpl> get copyWith => throw _privateConstructorUsedError; } @@ -2869,6 +3994,8 @@ class __$$VeilidUpdateConfigImplCopyWithImpl<$Res> $Res Function(_$VeilidUpdateConfigImpl) _then) : super(_value, _then); + /// Create a copy of VeilidUpdate + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -2882,6 +4009,8 @@ class __$$VeilidUpdateConfigImplCopyWithImpl<$Res> )); } + /// Create a copy of VeilidUpdate + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $VeilidConfigCopyWith<$Res> get config { @@ -2919,11 +4048,13 @@ class _$VeilidUpdateConfigImpl implements VeilidUpdateConfig { (identical(other.config, config) || other.config == config)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, config); - @JsonKey(ignore: true) + /// Create a copy of VeilidUpdate + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VeilidUpdateConfigImplCopyWith<_$VeilidUpdateConfigImpl> get copyWith => @@ -3095,7 +4226,10 @@ abstract class VeilidUpdateConfig implements VeilidUpdate { _$VeilidUpdateConfigImpl.fromJson; VeilidConfig get config; - @JsonKey(ignore: true) + + /// Create a copy of VeilidUpdate + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$VeilidUpdateConfigImplCopyWith<_$VeilidUpdateConfigImpl> get copyWith => throw _privateConstructorUsedError; } @@ -3119,6 +4253,8 @@ class __$$VeilidUpdateRouteChangeImplCopyWithImpl<$Res> $Res Function(_$VeilidUpdateRouteChangeImpl) _then) : super(_value, _then); + /// Create a copy of VeilidUpdate + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -3188,14 +4324,16 @@ class _$VeilidUpdateRouteChangeImpl implements VeilidUpdateRouteChange { .equals(other._deadRemoteRoutes, _deadRemoteRoutes)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash( runtimeType, const DeepCollectionEquality().hash(_deadRoutes), const DeepCollectionEquality().hash(_deadRemoteRoutes)); - @JsonKey(ignore: true) + /// Create a copy of VeilidUpdate + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VeilidUpdateRouteChangeImplCopyWith<_$VeilidUpdateRouteChangeImpl> @@ -3370,7 +4508,10 @@ abstract class VeilidUpdateRouteChange implements VeilidUpdate { List get deadRoutes; List get deadRemoteRoutes; - @JsonKey(ignore: true) + + /// Create a copy of VeilidUpdate + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$VeilidUpdateRouteChangeImplCopyWith<_$VeilidUpdateRouteChangeImpl> get copyWith => throw _privateConstructorUsedError; } @@ -3400,6 +4541,8 @@ class __$$VeilidUpdateValueChangeImplCopyWithImpl<$Res> $Res Function(_$VeilidUpdateValueChangeImpl) _then) : super(_value, _then); + /// Create a copy of VeilidUpdate + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -3428,6 +4571,8 @@ class __$$VeilidUpdateValueChangeImplCopyWithImpl<$Res> )); } + /// Create a copy of VeilidUpdate + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $ValueDataCopyWith<$Res>? get value { @@ -3490,12 +4635,14 @@ class _$VeilidUpdateValueChangeImpl implements VeilidUpdateValueChange { (identical(other.value, value) || other.value == value)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, key, const DeepCollectionEquality().hash(_subkeys), count, value); - @JsonKey(ignore: true) + /// Create a copy of VeilidUpdate + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VeilidUpdateValueChangeImplCopyWith<_$VeilidUpdateValueChangeImpl> @@ -3673,7 +4820,10 @@ abstract class VeilidUpdateValueChange implements VeilidUpdate { List get subkeys; int get count; ValueData? get value; - @JsonKey(ignore: true) + + /// Create a copy of VeilidUpdate + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) _$$VeilidUpdateValueChangeImplCopyWith<_$VeilidUpdateValueChangeImpl> get copyWith => throw _privateConstructorUsedError; } @@ -3689,8 +4839,12 @@ mixin _$VeilidStateAttachment { bool get publicInternetReady => throw _privateConstructorUsedError; bool get localNetworkReady => throw _privateConstructorUsedError; + /// Serializes this VeilidStateAttachment to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VeilidStateAttachment + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VeilidStateAttachmentCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -3718,6 +4872,8 @@ class _$VeilidStateAttachmentCopyWithImpl<$Res, // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VeilidStateAttachment + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -3766,6 +4922,8 @@ class __$$VeilidStateAttachmentImplCopyWithImpl<$Res> $Res Function(_$VeilidStateAttachmentImpl) _then) : super(_value, _then); + /// Create a copy of VeilidStateAttachment + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -3825,12 +4983,14 @@ class _$VeilidStateAttachmentImpl implements _VeilidStateAttachment { other.localNetworkReady == localNetworkReady)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, state, publicInternetReady, localNetworkReady); - @JsonKey(ignore: true) + /// Create a copy of VeilidStateAttachment + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VeilidStateAttachmentImplCopyWith<_$VeilidStateAttachmentImpl> @@ -3860,8 +5020,11 @@ abstract class _VeilidStateAttachment implements VeilidStateAttachment { bool get publicInternetReady; @override bool get localNetworkReady; + + /// Create a copy of VeilidStateAttachment + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VeilidStateAttachmentImplCopyWith<_$VeilidStateAttachmentImpl> get copyWith => throw _privateConstructorUsedError; } @@ -3877,8 +5040,12 @@ mixin _$VeilidStateNetwork { BigInt get bpsUp => throw _privateConstructorUsedError; List get peers => throw _privateConstructorUsedError; + /// Serializes this VeilidStateNetwork to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VeilidStateNetwork + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VeilidStateNetworkCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -3903,6 +5070,8 @@ class _$VeilidStateNetworkCopyWithImpl<$Res, $Val extends VeilidStateNetwork> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VeilidStateNetwork + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -3952,6 +5121,8 @@ class __$$VeilidStateNetworkImplCopyWithImpl<$Res> $Res Function(_$VeilidStateNetworkImpl) _then) : super(_value, _then); + /// Create a copy of VeilidStateNetwork + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -4024,12 +5195,14 @@ class _$VeilidStateNetworkImpl implements _VeilidStateNetwork { const DeepCollectionEquality().equals(other._peers, _peers)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, started, bpsDown, bpsUp, const DeepCollectionEquality().hash(_peers)); - @JsonKey(ignore: true) + /// Create a copy of VeilidStateNetwork + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VeilidStateNetworkImplCopyWith<_$VeilidStateNetworkImpl> get copyWith => @@ -4062,8 +5235,11 @@ abstract class _VeilidStateNetwork implements VeilidStateNetwork { BigInt get bpsUp; @override List get peers; + + /// Create a copy of VeilidStateNetwork + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VeilidStateNetworkImplCopyWith<_$VeilidStateNetworkImpl> get copyWith => throw _privateConstructorUsedError; } @@ -4076,8 +5252,12 @@ VeilidStateConfig _$VeilidStateConfigFromJson(Map json) { mixin _$VeilidStateConfig { VeilidConfig get config => throw _privateConstructorUsedError; + /// Serializes this VeilidStateConfig to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VeilidStateConfig + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VeilidStateConfigCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -4103,6 +5283,8 @@ class _$VeilidStateConfigCopyWithImpl<$Res, $Val extends VeilidStateConfig> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VeilidStateConfig + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -4116,6 +5298,8 @@ class _$VeilidStateConfigCopyWithImpl<$Res, $Val extends VeilidStateConfig> ) as $Val); } + /// Create a copy of VeilidStateConfig + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $VeilidConfigCopyWith<$Res> get config { @@ -4147,6 +5331,8 @@ class __$$VeilidStateConfigImplCopyWithImpl<$Res> $Res Function(_$VeilidStateConfigImpl) _then) : super(_value, _then); + /// Create a copy of VeilidStateConfig + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -4185,11 +5371,13 @@ class _$VeilidStateConfigImpl implements _VeilidStateConfig { (identical(other.config, config) || other.config == config)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, config); - @JsonKey(ignore: true) + /// Create a copy of VeilidStateConfig + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VeilidStateConfigImplCopyWith<_$VeilidStateConfigImpl> get copyWith => @@ -4213,8 +5401,11 @@ abstract class _VeilidStateConfig implements VeilidStateConfig { @override VeilidConfig get config; + + /// Create a copy of VeilidStateConfig + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VeilidStateConfigImplCopyWith<_$VeilidStateConfigImpl> get copyWith => throw _privateConstructorUsedError; } @@ -4229,8 +5420,12 @@ mixin _$VeilidState { VeilidStateNetwork get network => throw _privateConstructorUsedError; VeilidStateConfig get config => throw _privateConstructorUsedError; + /// Serializes this VeilidState to a JSON map. Map toJson() => throw _privateConstructorUsedError; - @JsonKey(ignore: true) + + /// Create a copy of VeilidState + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) $VeilidStateCopyWith get copyWith => throw _privateConstructorUsedError; } @@ -4261,6 +5456,8 @@ class _$VeilidStateCopyWithImpl<$Res, $Val extends VeilidState> // ignore: unused_field final $Res Function($Val) _then; + /// Create a copy of VeilidState + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -4284,6 +5481,8 @@ class _$VeilidStateCopyWithImpl<$Res, $Val extends VeilidState> ) as $Val); } + /// Create a copy of VeilidState + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $VeilidStateAttachmentCopyWith<$Res> get attachment { @@ -4292,6 +5491,8 @@ class _$VeilidStateCopyWithImpl<$Res, $Val extends VeilidState> }); } + /// Create a copy of VeilidState + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $VeilidStateNetworkCopyWith<$Res> get network { @@ -4300,6 +5501,8 @@ class _$VeilidStateCopyWithImpl<$Res, $Val extends VeilidState> }); } + /// Create a copy of VeilidState + /// with the given fields replaced by the non-null parameter values. @override @pragma('vm:prefer-inline') $VeilidStateConfigCopyWith<$Res> get config { @@ -4338,6 +5541,8 @@ class __$$VeilidStateImplCopyWithImpl<$Res> _$VeilidStateImpl _value, $Res Function(_$VeilidStateImpl) _then) : super(_value, _then); + /// Create a copy of VeilidState + /// with the given fields replaced by the non-null parameter values. @pragma('vm:prefer-inline') @override $Res call({ @@ -4394,11 +5599,13 @@ class _$VeilidStateImpl implements _VeilidState { (identical(other.config, config) || other.config == config)); } - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) @override int get hashCode => Object.hash(runtimeType, attachment, network, config); - @JsonKey(ignore: true) + /// Create a copy of VeilidState + /// with the given fields replaced by the non-null parameter values. + @JsonKey(includeFromJson: false, includeToJson: false) @override @pragma('vm:prefer-inline') _$$VeilidStateImplCopyWith<_$VeilidStateImpl> get copyWith => @@ -4427,8 +5634,11 @@ abstract class _VeilidState implements VeilidState { VeilidStateNetwork get network; @override VeilidStateConfig get config; + + /// Create a copy of VeilidState + /// with the given fields replaced by the non-null parameter values. @override - @JsonKey(ignore: true) + @JsonKey(includeFromJson: false, includeToJson: false) _$$VeilidStateImplCopyWith<_$VeilidStateImpl> get copyWith => throw _privateConstructorUsedError; } diff --git a/veilid-flutter/lib/veilid_state.g.dart b/veilid-flutter/lib/veilid_state.g.dart index b7e42369..10a982ef 100644 --- a/veilid-flutter/lib/veilid_state.g.dart +++ b/veilid-flutter/lib/veilid_state.g.dart @@ -50,14 +50,98 @@ Map _$$TransferStatsDownUpImplToJson( 'up': instance.up.toJson(), }; +_$StateStatsImpl _$$StateStatsImplFromJson(Map json) => + _$StateStatsImpl( + span: TimestampDuration.fromJson(json['span']), + reliable: TimestampDuration.fromJson(json['reliable']), + unreliable: TimestampDuration.fromJson(json['unreliable']), + dead: TimestampDuration.fromJson(json['dead']), + punished: TimestampDuration.fromJson(json['punished']), + reason: StateReasonStats.fromJson(json['reason']), + ); + +Map _$$StateStatsImplToJson(_$StateStatsImpl instance) => + { + 'span': instance.span.toJson(), + 'reliable': instance.reliable.toJson(), + 'unreliable': instance.unreliable.toJson(), + 'dead': instance.dead.toJson(), + 'punished': instance.punished.toJson(), + 'reason': instance.reason.toJson(), + }; + +_$StateReasonStatsImpl _$$StateReasonStatsImplFromJson( + Map json) => + _$StateReasonStatsImpl( + canNotSend: TimestampDuration.fromJson(json['can_not_send']), + tooManyLostAnswers: + TimestampDuration.fromJson(json['too_many_lost_answers']), + noPingResponse: TimestampDuration.fromJson(json['no_ping_response']), + failedToSend: TimestampDuration.fromJson(json['failed_to_send']), + lostAnswers: TimestampDuration.fromJson(json['lost_answers']), + notSeenConsecutively: + TimestampDuration.fromJson(json['not_seen_consecutively']), + inUnreliablePingSpan: + TimestampDuration.fromJson(json['in_unreliable_ping_span']), + ); + +Map _$$StateReasonStatsImplToJson( + _$StateReasonStatsImpl instance) => + { + 'can_not_send': instance.canNotSend.toJson(), + 'too_many_lost_answers': instance.tooManyLostAnswers.toJson(), + 'no_ping_response': instance.noPingResponse.toJson(), + 'failed_to_send': instance.failedToSend.toJson(), + 'lost_answers': instance.lostAnswers.toJson(), + 'not_seen_consecutively': instance.notSeenConsecutively.toJson(), + 'in_unreliable_ping_span': instance.inUnreliablePingSpan.toJson(), + }; + +_$AnswerStatsImpl _$$AnswerStatsImplFromJson(Map json) => + _$AnswerStatsImpl( + span: TimestampDuration.fromJson(json['span']), + questions: (json['questions'] as num).toInt(), + answers: (json['answers'] as num).toInt(), + lostAnswers: (json['lost_answers'] as num).toInt(), + consecutiveAnswersMaximum: + (json['consecutive_answers_maximum'] as num).toInt(), + consecutiveAnswersAverage: + (json['consecutive_answers_average'] as num).toInt(), + consecutiveAnswersMinimum: + (json['consecutive_answers_minimum'] as num).toInt(), + consecutiveLostAnswersMaximum: + (json['consecutive_lost_answers_maximum'] as num).toInt(), + consecutiveLostAnswersAverage: + (json['consecutive_lost_answers_average'] as num).toInt(), + consecutiveLostAnswersMinimum: + (json['consecutive_lost_answers_minimum'] as num).toInt(), + ); + +Map _$$AnswerStatsImplToJson(_$AnswerStatsImpl instance) => + { + 'span': instance.span.toJson(), + 'questions': instance.questions, + 'answers': instance.answers, + 'lost_answers': instance.lostAnswers, + 'consecutive_answers_maximum': instance.consecutiveAnswersMaximum, + 'consecutive_answers_average': instance.consecutiveAnswersAverage, + 'consecutive_answers_minimum': instance.consecutiveAnswersMinimum, + 'consecutive_lost_answers_maximum': + instance.consecutiveLostAnswersMaximum, + 'consecutive_lost_answers_average': + instance.consecutiveLostAnswersAverage, + 'consecutive_lost_answers_minimum': + instance.consecutiveLostAnswersMinimum, + }; + _$RPCStatsImpl _$$RPCStatsImplFromJson(Map json) => _$RPCStatsImpl( messagesSent: (json['messages_sent'] as num).toInt(), messagesRcvd: (json['messages_rcvd'] as num).toInt(), questionsInFlight: (json['questions_in_flight'] as num).toInt(), - lastQuestion: json['last_question'] == null + lastQuestionTs: json['last_question_ts'] == null ? null - : Timestamp.fromJson(json['last_question']), + : Timestamp.fromJson(json['last_question_ts']), lastSeenTs: json['last_seen_ts'] == null ? null : Timestamp.fromJson(json['last_seen_ts']), @@ -66,6 +150,7 @@ _$RPCStatsImpl _$$RPCStatsImplFromJson(Map json) => : Timestamp.fromJson(json['first_consecutive_seen_ts']), recentLostAnswers: (json['recent_lost_answers'] as num).toInt(), failedToSend: (json['failed_to_send'] as num).toInt(), + answer: AnswerStats.fromJson(json['answer']), ); Map _$$RPCStatsImplToJson(_$RPCStatsImpl instance) => @@ -73,11 +158,12 @@ Map _$$RPCStatsImplToJson(_$RPCStatsImpl instance) => 'messages_sent': instance.messagesSent, 'messages_rcvd': instance.messagesRcvd, 'questions_in_flight': instance.questionsInFlight, - 'last_question': instance.lastQuestion?.toJson(), + 'last_question_ts': instance.lastQuestionTs?.toJson(), 'last_seen_ts': instance.lastSeenTs?.toJson(), 'first_consecutive_seen_ts': instance.firstConsecutiveSeenTs?.toJson(), 'recent_lost_answers': instance.recentLostAnswers, 'failed_to_send': instance.failedToSend, + 'answer': instance.answer.toJson(), }; _$PeerStatsImpl _$$PeerStatsImplFromJson(Map json) => @@ -85,6 +171,7 @@ _$PeerStatsImpl _$$PeerStatsImplFromJson(Map json) => timeAdded: Timestamp.fromJson(json['time_added']), rpcStats: RPCStats.fromJson(json['rpc_stats']), transfer: TransferStatsDownUp.fromJson(json['transfer']), + state: StateStats.fromJson(json['state']), latency: json['latency'] == null ? null : LatencyStats.fromJson(json['latency']), @@ -95,6 +182,7 @@ Map _$$PeerStatsImplToJson(_$PeerStatsImpl instance) => 'time_added': instance.timeAdded.toJson(), 'rpc_stats': instance.rpcStats.toJson(), 'transfer': instance.transfer.toJson(), + 'state': instance.state.toJson(), 'latency': instance.latency?.toJson(), }; diff --git a/veilid-python/pyproject.toml b/veilid-python/pyproject.toml index 737fc5fe..efea2645 100644 --- a/veilid-python/pyproject.toml +++ b/veilid-python/pyproject.toml @@ -9,7 +9,7 @@ readme = "README.md" packages = [{ include = "veilid" }] [tool.poetry.dependencies] -python = "^3.11" +python = "^3.12.5" jsonschema = "^4.17.3" [tool.poetry.group.dev.dependencies] diff --git a/veilid-python/tests/test_dht.py b/veilid-python/tests/test_dht.py index e0c39ae0..0e9b479e 100644 --- a/veilid-python/tests/test_dht.py +++ b/veilid-python/tests/test_dht.py @@ -395,13 +395,17 @@ async def test_dht_integration_writer_reader(): print(f' {n}') print('syncing records to the network') + recleft = len(records) for desc0 in records: while True: rr = await rc0.inspect_dht_record(desc0.key, []) - if len(rr.offline_subkeys) == 0: + left = 0; [left := left + (x[1]-x[0]+1) for x in rr.offline_subkeys] + if left == 0: await rc0.close_dht_record(desc0.key) break - time.sleep(1) + print(f' {recleft} records {left} subkeys left') + time.sleep(0.1) + recleft-=1 # read dht records on server 1 print(f'reading {COUNT} records') @@ -455,19 +459,31 @@ async def test_dht_write_read_local(): print(f' {n}') - print(f'syncing records to the network') - for desc0 in records: - while True: + print('syncing records to the network') + + syncrecords = records.copy() + while len(syncrecords) > 0: + donerecords = set() + subkeysleft = 0 + for desc0 in records: rr = await rc0.inspect_dht_record(desc0.key, []) - if len(rr.offline_subkeys) == 0: - await rc0.close_dht_record(desc0.key) - break - time.sleep(0.1) + left = 0; [left := left + (x[1]-x[0]+1) for x in rr.offline_subkeys] + if left == 0: + donerecords.add(desc0) + else: + subkeysleft += left + syncrecords = [x for x in syncrecords if x not in donerecords] + print(f' {len(syncrecords)} records {subkeysleft} subkeys left') + time.sleep(1) + + await api0.debug("record purge local") + await api0.debug("record purge remote") # read dht records on server 0 print(f'reading {COUNT} records') n = 0 for desc0 in records: + await rc0.close_dht_record(desc0.key) desc1 = await rc0.open_dht_record(desc0.key) vd0 = await rc0.get_dht_value(desc1.key, ValueSubkey(0), force_refresh=True) diff --git a/veilid-python/veilid/json_api.py b/veilid-python/veilid/json_api.py index fb123b37..50ab0328 100644 --- a/veilid-python/veilid/json_api.py +++ b/veilid-python/veilid/json_api.py @@ -2,6 +2,8 @@ import asyncio import importlib.resources as importlib_resources import json import os +import traceback + from typing import Awaitable, Callable, Optional, Self from jsonschema import exceptions, validators @@ -49,6 +51,8 @@ from .types import ( urlsafe_b64decode_no_pad, ) +_STREAM_LIMIT = (65536 * 4) + ############################################################## @@ -139,17 +143,15 @@ class _JsonVeilidAPI(VeilidAPI): self.lock.release() # Cancel it handle_recv_messages_task.cancel() - try: - await handle_recv_messages_task - except asyncio.CancelledError: - pass + await handle_recv_messages_task + self.done = True @classmethod async def connect( cls, host: str, port: int, update_callback: Callable[[VeilidUpdate], Awaitable] ) -> Self: - reader, writer = await asyncio.open_connection(host, port) + reader, writer = await asyncio.open_connection(host, port, limit=_STREAM_LIMIT) veilid_api = cls(reader, writer, update_callback) veilid_api.handle_recv_messages_task = asyncio.create_task( veilid_api.handle_recv_messages(), name="JsonVeilidAPI.handle_recv_messages" @@ -173,9 +175,9 @@ class _JsonVeilidAPI(VeilidAPI): lambda: protocol, path, **kwds) writer = asyncio.StreamWriter(transport, protocol, reader, loop) return reader, writer - reader, writer = await open_windows_pipe(ipc_path) + reader, writer = await open_windows_pipe(ipc_path, limit=_STREAM_LIMIT) else: - reader, writer = await asyncio.open_unix_connection(ipc_path) + reader, writer = await asyncio.open_unix_connection(ipc_path, limit=_STREAM_LIMIT) veilid_api = cls(reader, writer, update_callback) veilid_api.handle_recv_messages_task = asyncio.create_task( @@ -211,12 +213,15 @@ class _JsonVeilidAPI(VeilidAPI): if self.validate_schema: _schema_validate(_VALIDATOR_RECV_MESSAGE, j) - # Process the message if j["type"] == "Response": await self.handle_recv_message_response(j) elif j["type"] == "Update": await self.update_callback(VeilidUpdate.from_json(j)) + except ValueError: + pass + except asyncio.CancelledError: + pass finally: await self._cleanup_close() @@ -236,6 +241,7 @@ class _JsonVeilidAPI(VeilidAPI): try: reqfuture = self.in_flight_requests.pop(id, None) if reqfuture is not None: + print("ass") reqfuture.cancel() finally: self.lock.release() @@ -267,6 +273,9 @@ class _JsonVeilidAPI(VeilidAPI): id = self.next_id self.next_id += 1 writer = self.writer + + if self.writer is None: + return finally: self.lock.release() diff --git a/veilid-python/veilid/schema/RecvMessage.json b/veilid-python/veilid/schema/RecvMessage.json index ba59e0e7..45d6d3ce 100644 --- a/veilid-python/veilid/schema/RecvMessage.json +++ b/veilid-python/veilid/schema/RecvMessage.json @@ -2797,6 +2797,82 @@ } ], "definitions": { + "AnswerStats": { + "description": "Measurement of round-trip RPC question/answer performance", + "type": "object", + "required": [ + "answers", + "consecutive_answers_average", + "consecutive_answers_maximum", + "consecutive_answers_minimum", + "consecutive_lost_answers_average", + "consecutive_lost_answers_maximum", + "consecutive_lost_answers_minimum", + "lost_answers", + "questions", + "span" + ], + "properties": { + "answers": { + "description": "number of answers received in this span", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "consecutive_answers_average": { + "description": "average number of received answers before a lost answer in this span", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "consecutive_answers_maximum": { + "description": "maximum number of received answers before a lost answer in this span", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "consecutive_answers_minimum": { + "description": "minimum number of received answers before a lost answer in this span", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "consecutive_lost_answers_average": { + "description": "average number of timeouts before a received answer in this span", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "consecutive_lost_answers_maximum": { + "description": "maximum number of timeouts before a received answer in this span", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "consecutive_lost_answers_minimum": { + "description": "minimum number of timeouts before a received answer in this span", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "lost_answers": { + "description": "number of lost answers in this span", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "questions": { + "description": "number of questions sent in this span", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "span": { + "description": "total amount of time measured", + "type": "string" + } + } + }, "AttachmentState": { "description": "Attachment abstraction for network 'signal strength'.", "type": "string", @@ -3006,6 +3082,7 @@ "minItems": 4 }, "LatencyStats": { + "description": "Measurement of communications latency to this node over all RPC questions", "type": "object", "required": [ "average", @@ -3014,12 +3091,15 @@ ], "properties": { "average": { + "description": "average latency over the ROLLING_LATENCIES_SIZE last latencies", "type": "string" }, "fastest": { + "description": "fastest latency in the ROLLING_LATENCIES_SIZE last latencies", "type": "string" }, "slowest": { + "description": "slowest latency in the ROLLING_LATENCIES_SIZE last latencies", "type": "string" } } @@ -3040,14 +3120,15 @@ } }, "PeerStats": { + "description": "Statistics for a peer in the routing table", "type": "object", "required": [ - "rpc_stats", - "time_added", - "transfer" + "time_added" ], "properties": { "latency": { + "description": "latency stats for this peer", + "default": null, "anyOf": [ { "$ref": "#/definitions/LatencyStats" @@ -3058,13 +3139,84 @@ ] }, "rpc_stats": { - "$ref": "#/definitions/RPCStats" + "description": "information about RPCs", + "default": { + "answer": { + "answers": 0, + "consecutive_answers_average": 0, + "consecutive_answers_maximum": 0, + "consecutive_answers_minimum": 0, + "consecutive_lost_answers_average": 0, + "consecutive_lost_answers_maximum": 0, + "consecutive_lost_answers_minimum": 0, + "lost_answers": 0, + "questions": 0, + "span": "0" + }, + "failed_to_send": 0, + "first_consecutive_seen_ts": null, + "last_question_ts": null, + "last_seen_ts": null, + "messages_rcvd": 0, + "messages_sent": 0, + "questions_in_flight": 0, + "recent_lost_answers": 0 + }, + "allOf": [ + { + "$ref": "#/definitions/RPCStats" + } + ] + }, + "state": { + "description": "state stats for this peer", + "default": { + "dead": "0", + "punished": "0", + "reason": { + "can_not_send": "0", + "failed_to_send": "0", + "in_unreliable_ping_span": "0", + "lost_answers": "0", + "no_ping_response": "0", + "not_seen_consecutively": "0", + "too_many_lost_answers": "0" + }, + "reliable": "0", + "span": "0", + "unreliable": "0" + }, + "allOf": [ + { + "$ref": "#/definitions/StateStats" + } + ] }, "time_added": { + "description": "when the peer was added to the routing table", "type": "string" }, "transfer": { - "$ref": "#/definitions/TransferStatsDownUp" + "description": "transfer stats for this peer", + "default": { + "down": { + "average": "0", + "maximum": "0", + "minimum": "0", + "total": "0" + }, + "up": { + "average": "0", + "maximum": "0", + "minimum": "0", + "total": "0" + } + }, + "allOf": [ + { + "$ref": "#/definitions/TransferStatsDownUp" + } + ] } } }, @@ -3099,6 +3251,7 @@ } }, "RPCStats": { + "description": "Statistics for RPC operations performed on a node", "type": "object", "required": [ "failed_to_send", @@ -3108,45 +3261,73 @@ "recent_lost_answers" ], "properties": { + "answer": { + "description": "rpc answer stats for this peer", + "default": { + "answers": 0, + "consecutive_answers_average": 0, + "consecutive_answers_maximum": 0, + "consecutive_answers_minimum": 0, + "consecutive_lost_answers_average": 0, + "consecutive_lost_answers_maximum": 0, + "consecutive_lost_answers_minimum": 0, + "lost_answers": 0, + "questions": 0, + "span": "0" + }, + "allOf": [ + { + "$ref": "#/definitions/AnswerStats" + } + ] + }, "failed_to_send": { + "description": "number of messages that have failed to send or connections dropped since we last successfully sent one", "type": "integer", "format": "uint32", "minimum": 0.0 }, "first_consecutive_seen_ts": { + "description": "the timestamp of the first consecutive proof-of-life for this node (an answer or received question)", "type": [ "string", "null" ] }, "last_question_ts": { + "description": "when the peer was last questioned (either successfully or not) and we wanted an answer", "type": [ "string", "null" ] }, "last_seen_ts": { + "description": "when the peer was last seen for any reason, including when we first attempted to reach out to it", "type": [ "string", "null" ] }, "messages_rcvd": { + "description": "number of rpcs that have been received in the total entry time range", "type": "integer", "format": "uint32", "minimum": 0.0 }, "messages_sent": { + "description": "number of rpcs that have been sent in the total entry time range", "type": "integer", "format": "uint32", "minimum": 0.0 }, "questions_in_flight": { + "description": "number of questions issued that have yet to be answered", "type": "integer", "format": "uint32", "minimum": 0.0 }, "recent_lost_answers": { + "description": "number of answers that have been lost consecutively", "type": "integer", "format": "uint32", "minimum": 0.0 @@ -3239,7 +3420,101 @@ "Reliable" ] }, + "StateReasonStats": { + "description": "Measurement of what state reasons the node has been in over a time span", + "type": "object", + "required": [ + "can_not_send", + "failed_to_send", + "in_unreliable_ping_span", + "lost_answers", + "no_ping_response", + "not_seen_consecutively", + "too_many_lost_answers" + ], + "properties": { + "can_not_send": { + "description": "time spent dead due to being unable to send", + "type": "string" + }, + "failed_to_send": { + "description": "time spent unreliable because of failures to send", + "type": "string" + }, + "in_unreliable_ping_span": { + "description": "time spent unreliable because we are in the unreliable ping span", + "type": "string" + }, + "lost_answers": { + "description": "time spent unreliable because of lost answers", + "type": "string" + }, + "no_ping_response": { + "description": "time spent dead because of no ping response", + "type": "string" + }, + "not_seen_consecutively": { + "description": "time spent unreliable because of not being seen consecutively", + "type": "string" + }, + "too_many_lost_answers": { + "description": "time spent dead because of too many lost answers", + "type": "string" + } + } + }, + "StateStats": { + "description": "Measurement of what states the node has been in over a time span", + "type": "object", + "required": [ + "dead", + "punished", + "reliable", + "span", + "unreliable" + ], + "properties": { + "dead": { + "description": "amount of time spent in a dead state", + "type": "string" + }, + "punished": { + "description": "amount of time spent in a punished state", + "type": "string" + }, + "reason": { + "description": "state reason stats for this peer", + "default": { + "can_not_send": "0", + "failed_to_send": "0", + "in_unreliable_ping_span": "0", + "lost_answers": "0", + "no_ping_response": "0", + "not_seen_consecutively": "0", + "too_many_lost_answers": "0" + }, + "allOf": [ + { + "$ref": "#/definitions/StateReasonStats" + } + ] + }, + "reliable": { + "description": "amount of time spent in a reliable state", + "type": "string" + }, + "span": { + "description": "total amount of time measured", + "type": "string" + }, + "unreliable": { + "description": "amount of time spent in an unreliable state", + "type": "string" + } + } + }, "TransferStats": { + "description": "Measurement of how much data has transferred to or from this node over a time span", "type": "object", "required": [ "average", @@ -3249,20 +3524,25 @@ ], "properties": { "average": { + "description": "average rate over the ROLLING_TRANSFERS_SIZE last amounts", "type": "string" }, "maximum": { + "description": "maximum rate over the ROLLING_TRANSFERS_SIZE last amounts", "type": "string" }, "minimum": { + "description": "minimum rate over the ROLLING_TRANSFERS_SIZE last amounts", "type": "string" }, "total": { + "description": "total amount transferred ever", "type": "string" } } }, "TransferStatsDownUp": { + "description": "Transfer statistics from a node to our own (down) and", "type": "object", "required": [ "down", @@ -3603,7 +3883,7 @@ } }, "VeilidConfigDHT": { - "description": "Configure the Distributed Hash Table (DHT).", + "description": "Configure the Distributed Hash Table (DHT). Defaults should be used here unless you are absolutely sure you know what you're doing. If you change the count/fanout/timeout parameters, you may render your node inoperable for correct DHT operations.", "type": "object", "required": [ "get_value_count", @@ -3795,6 +4075,7 @@ } }, "VeilidConfigInner": { + "description": "Top level of the Veilid configuration tree", "type": "object", "required": [ "block_store", @@ -3807,25 +4088,52 @@ ], "properties": { "block_store": { - "$ref": "#/definitions/VeilidConfigBlockStore" + "description": "Configuring the block store (storage of large content-addressable content)", + "allOf": [ + { + "$ref": "#/definitions/VeilidConfigBlockStore" + } + ] }, "capabilities": { - "$ref": "#/definitions/VeilidConfigCapabilities" + "description": "Capabilities to enable for your application/node", + "allOf": [ + { + "$ref": "#/definitions/VeilidConfigCapabilities" + } + ] }, "namespace": { + "description": "To run multiple Veilid nodes within the same application, either through a single process running api_startup/api_startup_json multiple times, or your application running mulitple times side-by-side there needs to be a key used to partition the application's storage (in the TableStore, ProtectedStore, etc). An empty value here is the default, but if you run multiple veilid nodes concurrently, you should set this to a string that uniquely identifies this -instance- within the same 'program_name'. Must be a valid filename for all Veilid-capable systems, which means no backslashes or forward slashes in the name. Stick to a-z,0-9,_ and space and you should be fine.", "type": "string" }, "network": { - "$ref": "#/definitions/VeilidConfigNetwork" + "description": "Configuring how Veilid interacts with the low level network", + "allOf": [ + { + "$ref": "#/definitions/VeilidConfigNetwork" + } + ] }, "program_name": { + "description": "An identifier used to describe the program using veilid-core. Used to partition storage locations in places like the ProtectedStore. Must be non-empty and a valid filename for all Veilid-capable systems, which means no backslashes or forward slashes in the name. Stick to a-z,0-9,_ and space and you should be fine.\n\nCaution: If you change this string, there is no migration support. Your app's protected store and table store will very likely experience data loss. Pick a program name and stick with it. This is not a 'visible' identifier and it should uniquely identify your application.", "type": "string" }, "protected_store": { - "$ref": "#/definitions/VeilidConfigProtectedStore" + "description": "Configuring the protected store (keychain/keyring/etc)", + "allOf": [ + { + "$ref": "#/definitions/VeilidConfigProtectedStore" + } + ] }, "table_store": { - "$ref": "#/definitions/VeilidConfigTableStore" + "description": "Configuring the table store (persistent encrypted database)", + "allOf": [ + { + "$ref": "#/definitions/VeilidConfigTableStore" + } + ] } } }, diff --git a/veilid-python/veilid/state.py b/veilid-python/veilid/state.py index 1a86e1be..d65c708b 100644 --- a/veilid-python/veilid/state.py +++ b/veilid-python/veilid/state.py @@ -52,6 +52,60 @@ class VeilidStateAttachment: ) + +class AnswerStats: + span: TimestampDuration + questions: int + answers: int + lost_answers: int + consecutive_answers_maximum: int + consecutive_answers_average: int + consecutive_answers_minimum: int + consecutive_lost_answers_maximum: int + consecutive_lost_answers_average: int + consecutive_lost_answers_minimum: int + + def __init__( + self, + span: TimestampDuration, + questions: int, + answers: int, + lost_answers: int, + consecutive_answers_maximum: int, + consecutive_answers_average: int, + consecutive_answers_minimum: int, + consecutive_lost_answers_maximum: int, + consecutive_lost_answers_average: int, + consecutive_lost_answers_minimum: int, + ): + self.span = span + self.questions = questions + self.answers = answers + self.lost_answers = lost_answers + self.consecutive_answers_maximum = consecutive_answers_maximum + self.consecutive_answers_average = consecutive_answers_average + self.consecutive_answers_minimum = consecutive_answers_minimum + self.consecutive_lost_answers_maximum = consecutive_lost_answers_maximum + self.consecutive_lost_answers_average = consecutive_lost_answers_average + self.consecutive_lost_answers_minimum = consecutive_lost_answers_minimum + + + @classmethod + def from_json(cls, j: dict) -> Self: + """JSON object hook""" + return cls( + j["span"], + j["questions"], + j["answers"], + j["lost_answers"], + j["consecutive_answers_maximum"], + j["consecutive_answers_average"], + j["consecutive_answers_minimum"], + j["consecutive_lost_answers_maximum"], + j["consecutive_lost_answers_average"], + j["consecutive_lost_answers_minimum"], + ) + class RPCStats: messages_sent: int messages_rcvd: int @@ -61,6 +115,7 @@ class RPCStats: first_consecutive_seen_ts: Optional[Timestamp] recent_lost_answers: int failed_to_send: int + answer: AnswerStats def __init__( self, @@ -72,6 +127,7 @@ class RPCStats: first_consecutive_seen_ts: Optional[Timestamp], recent_lost_answers: int, failed_to_send: int, + answer: AnswerStats, ): self.messages_sent = messages_sent self.messages_rcvd = messages_rcvd @@ -81,6 +137,7 @@ class RPCStats: self.first_consecutive_seen_ts = first_consecutive_seen_ts self.recent_lost_answers = recent_lost_answers self.failed_to_send = failed_to_send + self.answer = answer @classmethod def from_json(cls, j: dict) -> Self: @@ -96,6 +153,7 @@ class RPCStats: else Timestamp(j["first_consecutive_seen_ts"]), j["recent_lost_answers"], j["failed_to_send"], + AnswerStats.from_json(j["answer"]), ) @@ -166,12 +224,89 @@ class TransferStatsDownUp: """JSON object hook""" return cls(TransferStats.from_json(j["down"]), TransferStats.from_json(j["up"])) +class StateReasonStats: + can_not_send: TimestampDuration + too_many_lost_answers: TimestampDuration + no_ping_response: TimestampDuration + failed_to_send: TimestampDuration + lost_answers: TimestampDuration + not_seen_consecutively: TimestampDuration + in_unreliable_ping_span: TimestampDuration + + def __init__( + self, + can_not_send: TimestampDuration, + too_many_lost_answers: TimestampDuration, + no_ping_response: TimestampDuration, + failed_to_send: TimestampDuration, + lost_answers: TimestampDuration, + not_seen_consecutively: TimestampDuration, + in_unreliable_ping_span: TimestampDuration, + ): + self.can_not_send = can_not_send + self.too_many_lost_answers = too_many_lost_answers + self.no_ping_response = no_ping_response + self.failed_to_send = failed_to_send + self.lost_answers = lost_answers + self.not_seen_consecutively = not_seen_consecutively + self.in_unreliable_ping_span = in_unreliable_ping_span + + @classmethod + def from_json(cls, j: dict) -> Self: + """JSON object hook""" + return cls( + j["can_not_send"], + j["too_many_lost_answers"], + j["no_ping_response"], + j["failed_to_send"], + j["lost_answers"], + j["not_seen_consecutively"], + j["in_unreliable_ping_span"], + ) + +class StateStats: + span: TimestampDuration + reliable: TimestampDuration + unreliable: TimestampDuration + dead: TimestampDuration + punished: TimestampDuration + reason: StateReasonStats + + def __init__( + self, + span: TimestampDuration, + reliable: TimestampDuration, + unreliable: TimestampDuration, + dead: TimestampDuration, + punished: TimestampDuration, + reason: StateReasonStats, + ): + self.span = span + self.reliable = reliable + self.unreliable = unreliable + self.dead = dead + self.punished = punished + self.reason = reason + + @classmethod + def from_json(cls, j: dict) -> Self: + """JSON object hook""" + return cls( + j["span"], + j["reliable"], + j["unreliable"], + j["dead"], + j["punished"], + StateReasonStats.from_json(j["reason"]), + ) + class PeerStats: time_added: Timestamp rpc_stats: RPCStats latency: Optional[LatencyStats] transfer: TransferStatsDownUp + state: StateStats def __init__( self, @@ -179,11 +314,13 @@ class PeerStats: rpc_stats: RPCStats, latency: Optional[LatencyStats], transfer: TransferStatsDownUp, + state: StateStats, ): self.time_added = time_added self.rpc_stats = rpc_stats self.latency = latency self.transfer = transfer + self.state = state @classmethod def from_json(cls, j: dict) -> Self: @@ -193,6 +330,7 @@ class PeerStats: RPCStats.from_json(j["rpc_stats"]), None if j["latency"] is None else LatencyStats.from_json(j["latency"]), TransferStatsDownUp.from_json(j["transfer"]), + StateStats.from_json(j["state"]), ) diff --git a/veilid-server/src/client_api.rs b/veilid-server/src/client_api.rs index cef2d5bc..47dc0886 100644 --- a/veilid-server/src/client_api.rs +++ b/veilid-server/src/client_api.rs @@ -133,7 +133,13 @@ impl ClientApi { // Make wait group for all incoming connections let awg = AsyncWaitGroup::new(); - let stop_token = self.inner.lock().stop.as_ref().unwrap().token(); + let stop_token = match self.inner.lock().stop.as_ref() { + Some(stop) => stop.token(), + None => { + debug!(target: "client_api", "Already stopped"); + return Ok(()); + } + }; while let Ok(Some(stream_result)) = incoming_stream.next().timeout_at(stop_token.clone()).await { @@ -174,7 +180,13 @@ impl ClientApi { // Make wait group for all incoming connections let awg = AsyncWaitGroup::new(); - let stop_token = self.inner.lock().stop.as_ref().unwrap().token(); + let stop_token = match self.inner.lock().stop.as_ref() { + Some(stop) => stop.token(), + None => { + debug!(target: "client_api", "Already stopped"); + return Ok(()); + } + }; while let Ok(Some(stream_result)) = incoming_stream.next().timeout_at(stop_token.clone()).await { diff --git a/veilid-server/src/main.rs b/veilid-server/src/main.rs index e8ef7970..be71a51b 100644 --- a/veilid-server/src/main.rs +++ b/veilid-server/src/main.rs @@ -53,7 +53,7 @@ pub struct CmdlineArgs { foreground: bool, /// Specify a configuration file to use - #[arg(short, long, value_name = "FILE", default_value = OsString::from(Settings::get_default_config_path()))] + #[arg(short, long, value_name = "FILE", default_value = OsString::from(Settings::get_default_veilid_server_conf_path()))] config_file: Option, /// Specify configuration value to set (key in dot format, value in json format), eg: logging.api.enabled=true diff --git a/veilid-server/src/settings.rs b/veilid-server/src/settings.rs index 5c4cce3e..e9678e25 100644 --- a/veilid-server/src/settings.rs +++ b/veilid-server/src/settings.rs @@ -196,23 +196,23 @@ core: ) .replace( "%TABLE_STORE_DIRECTORY%", - &VeilidConfigTableStore::default().directory, + &Settings::get_default_table_store_directory().to_string_lossy(), ) .replace( "%BLOCK_STORE_DIRECTORY%", - &VeilidConfigBlockStore::default().directory, + &Settings::get_default_block_store_directory().to_string_lossy(), ) .replace( "%DIRECTORY%", - &VeilidConfigProtectedStore::default().directory, + &Settings::get_default_protected_store_directory().to_string_lossy(), ) .replace( "%CERTIFICATE_PATH%", - &VeilidConfigTLS::default().certificate_path, + &Settings::get_default_tls_certificate_path().to_string_lossy(), ) .replace( "%PRIVATE_KEY_PATH%", - &VeilidConfigTLS::default().private_key_path, + &Settings::get_default_tls_private_key_path().to_string_lossy(), ) .replace( "%REMOTE_MAX_SUBKEY_CACHE_MEMORY_MB%", @@ -860,18 +860,23 @@ impl Settings { /// `C:\Users\\AppData\Roaming\Veilid\Veilid`, and for macOS, at /// `/Users//Library/Application Support/org.Veilid.Veilid` /// - pub fn get_default_config_path() -> PathBuf { + pub fn get_default_config_path(subpath: &str) -> PathBuf { #[cfg(unix)] { - let default_path = PathBuf::from("/etc/veilid-server/veilid-server.conf"); - if default_path.exists() { - return default_path; + let globalpath = PathBuf::from("/var/db/veilid-server"); + + if globalpath.exists() { + return globalpath.join(subpath); } } - ProjectDirs::from("org", "Veilid", "Veilid") - .map(|dirs| dirs.config_dir().join("veilid-server.conf")) - .unwrap_or_else(|| PathBuf::from("./veilid-server.conf")) + let mut ts_path = if let Some(my_proj_dirs) = ProjectDirs::from("org", "Veilid", "Veilid") { + PathBuf::from(my_proj_dirs.config_dir()) + } else { + PathBuf::from("./") + }; + ts_path.push(subpath); + ts_path } /// Determine default flamegraph output path @@ -935,6 +940,25 @@ impl Settings { } } + pub fn get_default_veilid_server_conf_path() -> PathBuf { + Settings::get_default_config_path("veilid-server.conf") + } + pub fn get_default_table_store_directory() -> PathBuf { + Settings::get_default_directory("table_store") + } + pub fn get_default_block_store_directory() -> PathBuf { + Settings::get_default_directory("block_store") + } + pub fn get_default_protected_store_directory() -> PathBuf { + Settings::get_default_directory("protected_store") + } + pub fn get_default_tls_certificate_path() -> PathBuf { + Settings::get_default_config_path("ssl/certs/server.crt") + } + pub fn get_default_tls_private_key_path() -> PathBuf { + Settings::get_default_config_path("ssl/keys/server.key") + } + pub fn get_default_remote_max_subkey_cache_memory_mb() -> u32 { if sysinfo::IS_SUPPORTED_SYSTEM { ((SYSTEM.free_memory() / (1024u64 * 1024u64)) / 16) as u32 @@ -1594,13 +1618,17 @@ mod tests { assert_eq!( s.core.table_store.directory, - VeilidConfigTableStore::default().directory, + Settings::get_default_table_store_directory() + .to_string_lossy() + .to_string() ); assert!(!s.core.table_store.delete); assert_eq!( s.core.block_store.directory, - VeilidConfigBlockStore::default().directory, + Settings::get_default_block_store_directory() + .to_string_lossy() + .to_string() ); assert!(!s.core.block_store.delete); @@ -1608,7 +1636,9 @@ mod tests { assert!(s.core.protected_store.always_use_insecure_storage); assert_eq!( s.core.protected_store.directory, - VeilidConfigProtectedStore::default().directory + Settings::get_default_protected_store_directory() + .to_string_lossy() + .to_string() ); assert!(!s.core.protected_store.delete); assert_eq!(s.core.protected_store.device_encryption_key_password, ""); @@ -1669,11 +1699,15 @@ mod tests { // assert_eq!( s.core.network.tls.certificate_path, - VeilidConfigTLS::default().certificate_path + Settings::get_default_tls_certificate_path() + .to_string_lossy() + .to_string() ); assert_eq!( s.core.network.tls.private_key_path, - VeilidConfigTLS::default().private_key_path + Settings::get_default_tls_private_key_path() + .to_string_lossy() + .to_string() ); assert_eq!(s.core.network.tls.connection_initial_timeout_ms, 2_000u32); // diff --git a/veilid-tools/src/deferred_stream_processor.rs b/veilid-tools/src/deferred_stream_processor.rs index c3745a84..5afdb252 100644 --- a/veilid-tools/src/deferred_stream_processor.rs +++ b/veilid-tools/src/deferred_stream_processor.rs @@ -9,6 +9,7 @@ use super::*; /// Background processor for streams /// Handles streams to completion, passing each item from the stream to a callback +#[derive(Debug)] pub struct DeferredStreamProcessor { pub opt_deferred_stream_channel: Option>>, pub opt_stopper: Option, @@ -98,9 +99,9 @@ impl DeferredStreamProcessor { /// * 'handler' is the callback to handle each item from the stream /// /// Returns 'true' if the stream was added for processing, and 'false' if the stream could not be added, possibly due to not being initialized. - pub fn add( + pub fn add + Unpin + Send + 'static>( &mut self, - receiver: flume::Receiver, + mut receiver: S, mut handler: impl FnMut(T) -> SendPinBoxFuture + Send + 'static, ) -> bool { let Some(st) = self.opt_stopper.as_ref().map(|s| s.token()) else { @@ -110,7 +111,7 @@ impl DeferredStreamProcessor { return false; }; let drp = Box::pin(async move { - while let Ok(Ok(res)) = receiver.recv_async().timeout_at(st.clone()).await { + while let Ok(Some(res)) = receiver.next().timeout_at(st.clone()).await { if !handler(res).await { break; } diff --git a/veilid-tools/src/future_queue.rs b/veilid-tools/src/future_queue.rs new file mode 100644 index 00000000..1a3a828b --- /dev/null +++ b/veilid-tools/src/future_queue.rs @@ -0,0 +1,21 @@ +use super::*; +use futures_util::StreamExt as _; +use stop_token::future::FutureExt as _; + +pub async fn process_batched_future_queue( + future_queue: I, + batch_size: usize, + stop_token: StopToken, + result_callback: C, +) where + I: IntoIterator, + C: Fn(R) -> F, + F: Future, + ::Item: core::future::Future, +{ + let mut buffered_futures = + futures_util::stream::iter(future_queue).buffer_unordered(batch_size); + while let Ok(Some(res)) = buffered_futures.next().timeout_at(stop_token.clone()).await { + result_callback(res).await; + } +} diff --git a/veilid-tools/src/lib.rs b/veilid-tools/src/lib.rs index f6d1f911..615dc52d 100644 --- a/veilid-tools/src/lib.rs +++ b/veilid-tools/src/lib.rs @@ -34,6 +34,7 @@ pub mod eventual; pub mod eventual_base; pub mod eventual_value; pub mod eventual_value_clone; +pub mod future_queue; pub mod interval; pub mod ip_addr_port; pub mod ip_extra; @@ -201,6 +202,8 @@ pub use eventual_value::*; #[doc(inline)] pub use eventual_value_clone::*; #[doc(inline)] +pub use future_queue::*; +#[doc(inline)] pub use interval::*; #[doc(inline)] pub use ip_addr_port::*; diff --git a/veilid-tools/src/network_result.rs b/veilid-tools/src/network_result.rs index e65f8bf9..ee7cd1b7 100644 --- a/veilid-tools/src/network_result.rs +++ b/veilid-tools/src/network_result.rs @@ -24,6 +24,29 @@ pub trait IoNetworkResultExt { fn into_network_result(self) -> io::Result>; } +fn io_error_kind_from_error(e: io::Error) -> io::Result> { + #[cfg(not(target_arch = "wasm32"))] + if let Some(os_err) = e.raw_os_error() { + if os_err == libc::EHOSTUNREACH || os_err == libc::ENETUNREACH { + return Ok(NetworkResult::NoConnection(e)); + } + } + match e.kind() { + io::ErrorKind::TimedOut => Ok(NetworkResult::Timeout), + io::ErrorKind::UnexpectedEof + | io::ErrorKind::NotConnected + | io::ErrorKind::BrokenPipe + | io::ErrorKind::ConnectionAborted + | io::ErrorKind::ConnectionRefused + | io::ErrorKind::ConnectionReset => Ok(NetworkResult::NoConnection(e)), + io::ErrorKind::InvalidInput | io::ErrorKind::InvalidData => { + Ok(NetworkResult::InvalidMessage(e.to_string())) + } + io::ErrorKind::AddrNotAvailable => Ok(NetworkResult::AlreadyExists(e)), + _ => Err(e), + } +} + impl IoNetworkResultExt for io::Result { fn into_network_result(self) -> io::Result> { match self { @@ -43,28 +66,7 @@ impl IoNetworkResultExt for io::Result { // _ => Err(e), // }, // #[cfg(not(feature = "io_error_more"))] - Err(e) => { - #[cfg(not(target_arch = "wasm32"))] - if let Some(os_err) = e.raw_os_error() { - if os_err == libc::EHOSTUNREACH || os_err == libc::ENETUNREACH { - return Ok(NetworkResult::NoConnection(e)); - } - } - match e.kind() { - io::ErrorKind::TimedOut => Ok(NetworkResult::Timeout), - io::ErrorKind::UnexpectedEof - | io::ErrorKind::NotConnected - | io::ErrorKind::BrokenPipe - | io::ErrorKind::ConnectionAborted - | io::ErrorKind::ConnectionRefused - | io::ErrorKind::ConnectionReset => Ok(NetworkResult::NoConnection(e)), - io::ErrorKind::InvalidInput | io::ErrorKind::InvalidData => { - Ok(NetworkResult::InvalidMessage(e.to_string())) - } - io::ErrorKind::AddrNotAvailable => Ok(NetworkResult::AlreadyExists(e)), - _ => Err(e), - } - } + Err(e) => io_error_kind_from_error(e), } } } @@ -108,22 +110,7 @@ impl FoldedNetworkResultExt for io::Result> { // _ => Err(e), // }, // #[cfg(not(feature = "io_error_more"))] - Err(e) => { - #[cfg(not(target_arch = "wasm32"))] - if let Some(os_err) = e.raw_os_error() { - if os_err == libc::EHOSTUNREACH || os_err == libc::ENETUNREACH { - return Ok(NetworkResult::NoConnection(e)); - } - } - match e.kind() { - io::ErrorKind::TimedOut => Ok(NetworkResult::Timeout), - io::ErrorKind::ConnectionAborted - | io::ErrorKind::ConnectionRefused - | io::ErrorKind::ConnectionReset => Ok(NetworkResult::NoConnection(e)), - io::ErrorKind::AddrNotAvailable => Ok(NetworkResult::AlreadyExists(e)), - _ => Err(e), - } - } + Err(e) => io_error_kind_from_error(e), } } } @@ -144,22 +131,7 @@ impl FoldedNetworkResultExt for io::Result> { // _ => Err(e), // }, // #[cfg(not(feature = "io_error_more"))] - Err(e) => { - #[cfg(not(target_arch = "wasm32"))] - if let Some(os_err) = e.raw_os_error() { - if os_err == libc::EHOSTUNREACH || os_err == libc::ENETUNREACH { - return Ok(NetworkResult::NoConnection(e)); - } - } - match e.kind() { - io::ErrorKind::TimedOut => Ok(NetworkResult::Timeout), - io::ErrorKind::ConnectionAborted - | io::ErrorKind::ConnectionRefused - | io::ErrorKind::ConnectionReset => Ok(NetworkResult::NoConnection(e)), - io::ErrorKind::AddrNotAvailable => Ok(NetworkResult::AlreadyExists(e)), - _ => Err(e), - } - } + Err(e) => io_error_kind_from_error(e), } } } diff --git a/veilid-tools/src/timestamp.rs b/veilid-tools/src/timestamp.rs index ada1a925..61d978d5 100644 --- a/veilid-tools/src/timestamp.rs +++ b/veilid-tools/src/timestamp.rs @@ -12,7 +12,7 @@ cfg_if! { } } - pub fn debug_ts(ts: u64) -> String { + pub fn display_ts(ts: u64) -> String { if is_browser() { let now = Date::new_0(); now.set_time(Date::now()); @@ -66,7 +66,7 @@ cfg_if! { } } - pub fn debug_ts(ts: u64) -> String { + pub fn display_ts(ts: u64) -> String { let now = chrono::DateTime::::from(SystemTime::now()); let date = chrono::DateTime::::from(UNIX_EPOCH + Duration::from_micros(ts)); @@ -110,7 +110,7 @@ const MIN: u64 = 1_000_000u64 * 60; const SEC: u64 = 1_000_000u64; const MSEC: u64 = 1_000u64; -pub fn debug_duration(dur: u64) -> String { +pub fn display_duration(dur: u64) -> String { let days = dur / DAY; let dur = dur % DAY; let hours = dur / HOUR; diff --git a/veilid-wasm/src/lib.rs b/veilid-wasm/src/lib.rs index 17736bd4..85d2ffb9 100644 --- a/veilid-wasm/src/lib.rs +++ b/veilid-wasm/src/lib.rs @@ -25,8 +25,8 @@ use tracing_subscriber::prelude::*; use tracing_subscriber::*; use tracing_wasm::{WASMLayerConfigBuilder, *}; use tsify::*; -use veilid_core::tools::*; use veilid_core::*; +use veilid_core::{tools::*, VeilidAPIError}; use wasm_bindgen::prelude::*; use wasm_bindgen_futures::*; @@ -249,7 +249,9 @@ pub fn initialize_veilid_core(platform_config: String) { #[wasm_bindgen()] pub fn change_log_level(layer: String, log_level: String) { let layer = if layer == "all" { "".to_owned() } else { layer }; - let log_level: veilid_core::VeilidConfigLogLevel = deserialize_json(&log_level).unwrap(); + let Ok(log_level) = deserialize_json::(&log_level) else { + return; + }; let filters = (*FILTERS).borrow(); if layer.is_empty() { // Change all layers @@ -258,8 +260,9 @@ pub fn change_log_level(layer: String, log_level: String) { } } else { // Change a specific layer - let f = filters.get(layer.as_str()).unwrap(); - f.set_max_level(log_level); + if let Some(f) = filters.get(layer.as_str()) { + f.set_max_level(log_level); + } } } @@ -278,11 +281,12 @@ pub fn change_log_ignore(layer: String, log_ignore: String) { } } else { // Change a specific layer - let f = filters.get(layer.as_str()).unwrap(); - f.set_ignore_list(Some(VeilidLayerFilter::apply_ignore_change( - &f.ignore_list(), - log_ignore.clone(), - ))); + if let Some(f) = filters.get(layer.as_str()) { + f.set_ignore_list(Some(VeilidLayerFilter::apply_ignore_change( + &f.ignore_list(), + log_ignore.clone(), + ))); + } } } @@ -446,10 +450,11 @@ pub fn routing_context_safety(id: u32) -> Promise { #[wasm_bindgen()] pub fn routing_context_app_call(id: u32, target_string: String, request: String) -> Promise { - let request: Vec = data_encoding::BASE64URL_NOPAD - .decode(request.as_bytes()) - .unwrap(); wrap_api_future_plain(async move { + let request: Vec = data_encoding::BASE64URL_NOPAD + .decode(request.as_bytes()) + .map_err(VeilidAPIError::generic)?; + let routing_context = get_routing_context(id, "routing_context_app_call")?; let veilid_api = get_veilid_api()?; @@ -462,10 +467,10 @@ pub fn routing_context_app_call(id: u32, target_string: String, request: String) #[wasm_bindgen()] pub fn routing_context_app_message(id: u32, target_string: String, message: String) -> Promise { - let message: Vec = data_encoding::BASE64URL_NOPAD - .decode(message.as_bytes()) - .unwrap(); wrap_api_future_void(async move { + let message: Vec = data_encoding::BASE64URL_NOPAD + .decode(message.as_bytes()) + .map_err(VeilidAPIError::generic)?; let routing_context = get_routing_context(id, "routing_context_app_message")?; let veilid_api = get_veilid_api()?; @@ -477,14 +482,15 @@ pub fn routing_context_app_message(id: u32, target_string: String, message: Stri #[wasm_bindgen()] pub fn routing_context_create_dht_record(id: u32, schema: String, kind: u32) -> Promise { - let crypto_kind = if kind == 0 { - None - } else { - Some(veilid_core::FourCC::from(kind)) - }; - let schema: veilid_core::DHTSchema = veilid_core::deserialize_json(&schema).unwrap(); - wrap_api_future_json(async move { + let crypto_kind = if kind == 0 { + None + } else { + Some(veilid_core::FourCC::from(kind)) + }; + let schema: veilid_core::DHTSchema = + veilid_core::deserialize_json(&schema).map_err(VeilidAPIError::generic)?; + let routing_context = get_routing_context(id, "routing_context_create_dht_record")?; let dht_record_descriptor = routing_context @@ -496,10 +502,14 @@ pub fn routing_context_create_dht_record(id: u32, schema: String, kind: u32) -> #[wasm_bindgen()] pub fn routing_context_open_dht_record(id: u32, key: String, writer: Option) -> Promise { - let key: veilid_core::TypedKey = veilid_core::deserialize_json(&key).unwrap(); - let writer: Option = - writer.map(|s| veilid_core::deserialize_json(&s).unwrap()); wrap_api_future_json(async move { + let key: veilid_core::TypedKey = + veilid_core::deserialize_json(&key).map_err(VeilidAPIError::generic)?; + let writer: Option = match writer { + Some(s) => Some(veilid_core::deserialize_json(&s).map_err(VeilidAPIError::generic)?), + None => None, + }; + let routing_context = get_routing_context(id, "routing_context_open_dht_record")?; let dht_record_descriptor = routing_context.open_dht_record(key, writer).await?; @@ -509,8 +519,10 @@ pub fn routing_context_open_dht_record(id: u32, key: String, writer: Option Promise { - let key: veilid_core::TypedKey = veilid_core::deserialize_json(&key).unwrap(); wrap_api_future_void(async move { + let key: veilid_core::TypedKey = + veilid_core::deserialize_json(&key).map_err(VeilidAPIError::generic)?; + let routing_context = get_routing_context(id, "routing_context_close_dht_record")?; routing_context.close_dht_record(key).await?; @@ -520,8 +532,10 @@ pub fn routing_context_close_dht_record(id: u32, key: String) -> Promise { #[wasm_bindgen()] pub fn routing_context_delete_dht_record(id: u32, key: String) -> Promise { - let key: veilid_core::TypedKey = veilid_core::deserialize_json(&key).unwrap(); wrap_api_future_void(async move { + let key: veilid_core::TypedKey = + veilid_core::deserialize_json(&key).map_err(VeilidAPIError::generic)?; + let routing_context = get_routing_context(id, "routing_context_delete_dht_record")?; routing_context.delete_dht_record(key).await?; @@ -536,8 +550,10 @@ pub fn routing_context_get_dht_value( subkey: u32, force_refresh: bool, ) -> Promise { - let key: veilid_core::TypedKey = veilid_core::deserialize_json(&key).unwrap(); wrap_api_future_json(async move { + let key: veilid_core::TypedKey = + veilid_core::deserialize_json(&key).map_err(VeilidAPIError::generic)?; + let routing_context = get_routing_context(id, "routing_context_get_dht_value")?; let res = routing_context @@ -555,14 +571,17 @@ pub fn routing_context_set_dht_value( data: String, writer: Option, ) -> Promise { - let key: veilid_core::TypedKey = veilid_core::deserialize_json(&key).unwrap(); - let data: Vec = data_encoding::BASE64URL_NOPAD - .decode(data.as_bytes()) - .unwrap(); - let writer: Option = - writer.map(|s| veilid_core::deserialize_json(&s).unwrap()); - wrap_api_future_json(async move { + let key: veilid_core::TypedKey = + veilid_core::deserialize_json(&key).map_err(VeilidAPIError::generic)?; + let data: Vec = data_encoding::BASE64URL_NOPAD + .decode(data.as_bytes()) + .map_err(VeilidAPIError::generic)?; + let writer: Option = match writer { + Some(s) => veilid_core::deserialize_json(&s).map_err(VeilidAPIError::generic)?, + None => None, + }; + let routing_context = get_routing_context(id, "routing_context_set_dht_value")?; let res = routing_context @@ -580,12 +599,15 @@ pub fn routing_context_watch_dht_values( expiration: String, count: u32, ) -> Promise { - let key: veilid_core::TypedKey = veilid_core::deserialize_json(&key).unwrap(); - let subkeys: veilid_core::ValueSubkeyRangeSet = - veilid_core::deserialize_json(&subkeys).unwrap(); - let expiration = veilid_core::Timestamp::from_str(&expiration).unwrap(); - wrap_api_future_plain(async move { + let key: veilid_core::TypedKey = + veilid_core::deserialize_json(&key).map_err(VeilidAPIError::generic)?; + let subkeys: veilid_core::ValueSubkeyRangeSet = + veilid_core::deserialize_json(&subkeys).map_err(VeilidAPIError::generic)?; + let expiration = veilid_core::Timestamp::new( + u64::from_str(&expiration).map_err(VeilidAPIError::generic)?, + ); + let routing_context = get_routing_context(id, "routing_context_watch_dht_values")?; let res = routing_context @@ -597,11 +619,12 @@ pub fn routing_context_watch_dht_values( #[wasm_bindgen()] pub fn routing_context_cancel_dht_watch(id: u32, key: String, subkeys: String) -> Promise { - let key: veilid_core::TypedKey = veilid_core::deserialize_json(&key).unwrap(); - let subkeys: veilid_core::ValueSubkeyRangeSet = - veilid_core::deserialize_json(&subkeys).unwrap(); - wrap_api_future_plain(async move { + let key: veilid_core::TypedKey = + veilid_core::deserialize_json(&key).map_err(VeilidAPIError::generic)?; + let subkeys: veilid_core::ValueSubkeyRangeSet = + veilid_core::deserialize_json(&subkeys).map_err(VeilidAPIError::generic)?; + let routing_context = get_routing_context(id, "routing_context_cancel_dht_watch")?; let res = routing_context.cancel_dht_watch(key, subkeys).await?; @@ -616,12 +639,14 @@ pub fn routing_context_inspect_dht_record( subkeys: String, scope: String, ) -> Promise { - let key: veilid_core::TypedKey = veilid_core::deserialize_json(&key).unwrap(); - let subkeys: veilid_core::ValueSubkeyRangeSet = - veilid_core::deserialize_json(&subkeys).unwrap(); - let scope: veilid_core::DHTReportScope = veilid_core::deserialize_json(&scope).unwrap(); - wrap_api_future_json(async move { + let key: veilid_core::TypedKey = + veilid_core::deserialize_json(&key).map_err(VeilidAPIError::generic)?; + let subkeys: veilid_core::ValueSubkeyRangeSet = + veilid_core::deserialize_json(&subkeys).map_err(VeilidAPIError::generic)?; + let scope: veilid_core::DHTReportScope = + veilid_core::deserialize_json(&scope).map_err(VeilidAPIError::generic)?; + let routing_context = get_routing_context(id, "routing_context_inspect_dht_record")?; let res = routing_context @@ -647,10 +672,12 @@ pub fn new_private_route() -> Promise { #[wasm_bindgen()] pub fn new_custom_private_route(stability: String, sequencing: String) -> Promise { - let stability: veilid_core::Stability = veilid_core::deserialize_json(&stability).unwrap(); - let sequencing: veilid_core::Sequencing = veilid_core::deserialize_json(&sequencing).unwrap(); - wrap_api_future_json(async move { + let stability: veilid_core::Stability = + veilid_core::deserialize_json(&stability).map_err(VeilidAPIError::generic)?; + let sequencing: veilid_core::Sequencing = + veilid_core::deserialize_json(&sequencing).map_err(VeilidAPIError::generic)?; + let veilid_api = get_veilid_api()?; let (route_id, blob) = veilid_api @@ -665,10 +692,10 @@ pub fn new_custom_private_route(stability: String, sequencing: String) -> Promis #[wasm_bindgen()] pub fn import_remote_private_route(blob: String) -> Promise { - let blob: Vec = data_encoding::BASE64URL_NOPAD - .decode(blob.as_bytes()) - .unwrap(); wrap_api_future_plain(async move { + let blob: Vec = data_encoding::BASE64URL_NOPAD + .decode(blob.as_bytes()) + .map_err(VeilidAPIError::generic)?; let veilid_api = get_veilid_api()?; let key = veilid_api.import_remote_private_route(blob)?; @@ -679,8 +706,9 @@ pub fn import_remote_private_route(blob: String) -> Promise { #[wasm_bindgen()] pub fn release_private_route(route_id: String) -> Promise { - let route_id: veilid_core::RouteId = veilid_core::RouteId::try_decode(&route_id).unwrap(); wrap_api_future_void(async move { + let route_id: veilid_core::RouteId = + veilid_core::RouteId::try_decode(&route_id).map_err(VeilidAPIError::generic)?; let veilid_api = get_veilid_api()?; veilid_api.release_private_route(route_id)?; APIRESULT_UNDEFINED @@ -689,10 +717,10 @@ pub fn release_private_route(route_id: String) -> Promise { #[wasm_bindgen()] pub fn app_call_reply(call_id: String, message: String) -> Promise { - let message: Vec = data_encoding::BASE64URL_NOPAD - .decode(message.as_bytes()) - .unwrap(); wrap_api_future_void(async move { + let message: Vec = data_encoding::BASE64URL_NOPAD + .decode(message.as_bytes()) + .map_err(VeilidAPIError::generic)?; let call_id = match call_id.parse() { Ok(v) => v, Err(e) => { @@ -853,13 +881,14 @@ pub fn table_db_transaction_rollback(id: u32) -> Promise { #[wasm_bindgen()] pub fn table_db_transaction_store(id: u32, col: u32, key: String, value: String) -> Promise { - let key: Vec = data_encoding::BASE64URL_NOPAD - .decode(key.as_bytes()) - .unwrap(); - let value: Vec = data_encoding::BASE64URL_NOPAD - .decode(value.as_bytes()) - .unwrap(); wrap_api_future_void(async move { + let key: Vec = data_encoding::BASE64URL_NOPAD + .decode(key.as_bytes()) + .map_err(VeilidAPIError::generic)?; + let value: Vec = data_encoding::BASE64URL_NOPAD + .decode(value.as_bytes()) + .map_err(VeilidAPIError::generic)?; + let tdbt = get_table_db_transaction(id, "table_db_transaction_store")?; tdbt.store(col, &key, &value)?; @@ -869,10 +898,11 @@ pub fn table_db_transaction_store(id: u32, col: u32, key: String, value: String) #[wasm_bindgen()] pub fn table_db_transaction_delete(id: u32, col: u32, key: String) -> Promise { - let key: Vec = data_encoding::BASE64URL_NOPAD - .decode(key.as_bytes()) - .unwrap(); wrap_api_future_void(async move { + let key: Vec = data_encoding::BASE64URL_NOPAD + .decode(key.as_bytes()) + .map_err(VeilidAPIError::generic)?; + let tdbt = get_table_db_transaction(id, "table_db_transaction_delete")?; tdbt.delete(col, &key)?; @@ -882,13 +912,13 @@ pub fn table_db_transaction_delete(id: u32, col: u32, key: String) -> Promise { #[wasm_bindgen()] pub fn table_db_store(id: u32, col: u32, key: String, value: String) -> Promise { - let key: Vec = data_encoding::BASE64URL_NOPAD - .decode(key.as_bytes()) - .unwrap(); - let value: Vec = data_encoding::BASE64URL_NOPAD - .decode(value.as_bytes()) - .unwrap(); wrap_api_future_void(async move { + let key: Vec = data_encoding::BASE64URL_NOPAD + .decode(key.as_bytes()) + .map_err(VeilidAPIError::generic)?; + let value: Vec = data_encoding::BASE64URL_NOPAD + .decode(value.as_bytes()) + .map_err(VeilidAPIError::generic)?; let table_db = get_table_db(id, "table_db_store")?; table_db.store(col, &key, &value).await?; @@ -898,10 +928,10 @@ pub fn table_db_store(id: u32, col: u32, key: String, value: String) -> Promise #[wasm_bindgen()] pub fn table_db_load(id: u32, col: u32, key: String) -> Promise { - let key: Vec = data_encoding::BASE64URL_NOPAD - .decode(key.as_bytes()) - .unwrap(); wrap_api_future_plain(async move { + let key: Vec = data_encoding::BASE64URL_NOPAD + .decode(key.as_bytes()) + .map_err(VeilidAPIError::generic)?; let table_db = get_table_db(id, "table_db_load")?; let out = table_db.load(col, &key).await?; @@ -912,10 +942,10 @@ pub fn table_db_load(id: u32, col: u32, key: String) -> Promise { #[wasm_bindgen()] pub fn table_db_delete(id: u32, col: u32, key: String) -> Promise { - let key: Vec = data_encoding::BASE64URL_NOPAD - .decode(key.as_bytes()) - .unwrap(); wrap_api_future_plain(async move { + let key: Vec = data_encoding::BASE64URL_NOPAD + .decode(key.as_bytes()) + .map_err(VeilidAPIError::generic)?; let table_db = get_table_db(id, "table_db_delete")?; let out = table_db.delete(col, &key).await?; @@ -941,16 +971,16 @@ pub fn best_crypto_kind() -> u32 { #[wasm_bindgen()] pub fn verify_signatures(node_ids: String, data: String, signatures: String) -> Promise { - let node_ids: Vec = veilid_core::deserialize_json(&node_ids).unwrap(); - - let data: Vec = data_encoding::BASE64URL_NOPAD - .decode(data.as_bytes()) - .unwrap(); - - let typed_signatures: Vec = - veilid_core::deserialize_json(&signatures).unwrap(); - wrap_api_future_json(async move { + let node_ids: Vec = + veilid_core::deserialize_json(&node_ids).map_err(VeilidAPIError::generic)?; + + let data: Vec = data_encoding::BASE64URL_NOPAD + .decode(data.as_bytes()) + .map_err(VeilidAPIError::generic)?; + + let typed_signatures: Vec = + veilid_core::deserialize_json(&signatures).map_err(VeilidAPIError::generic)?; let veilid_api = get_veilid_api()?; let crypto = veilid_api.crypto()?; let out = crypto.verify_signatures(&node_ids, &data, &typed_signatures)?; @@ -960,14 +990,13 @@ pub fn verify_signatures(node_ids: String, data: String, signatures: String) -> #[wasm_bindgen()] pub fn generate_signatures(data: String, key_pairs: String) -> Promise { - let data: Vec = data_encoding::BASE64URL_NOPAD - .decode(data.as_bytes()) - .unwrap(); - - let key_pairs: Vec = - veilid_core::deserialize_json(&key_pairs).unwrap(); - wrap_api_future_json(async move { + let data: Vec = data_encoding::BASE64URL_NOPAD + .decode(data.as_bytes()) + .map_err(VeilidAPIError::generic)?; + + let key_pairs: Vec = + veilid_core::deserialize_json(&key_pairs).map_err(VeilidAPIError::generic)?; let veilid_api = get_veilid_api()?; let crypto = veilid_api.crypto()?; let out = crypto.generate_signatures(&data, &key_pairs, |k, s| { @@ -989,12 +1018,14 @@ pub fn generate_key_pair(kind: u32) -> Promise { #[wasm_bindgen()] pub fn crypto_cached_dh(kind: u32, key: String, secret: String) -> Promise { - let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); - - let key: veilid_core::PublicKey = veilid_core::deserialize_json(&key).unwrap(); - let secret: veilid_core::SecretKey = veilid_core::deserialize_json(&secret).unwrap(); - wrap_api_future_json(async move { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + let key: veilid_core::PublicKey = + veilid_core::deserialize_json(&key).map_err(VeilidAPIError::generic)?; + let secret: veilid_core::SecretKey = + veilid_core::deserialize_json(&secret).map_err(VeilidAPIError::generic)?; + let veilid_api = get_veilid_api()?; let crypto = veilid_api.crypto()?; let csv = crypto.get(kind).ok_or_else(|| { @@ -1011,12 +1042,14 @@ pub fn crypto_cached_dh(kind: u32, key: String, secret: String) -> Promise { #[wasm_bindgen()] pub fn crypto_compute_dh(kind: u32, key: String, secret: String) -> Promise { - let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); - - let key: veilid_core::PublicKey = veilid_core::deserialize_json(&key).unwrap(); - let secret: veilid_core::SecretKey = veilid_core::deserialize_json(&secret).unwrap(); - wrap_api_future_json(async move { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + let key: veilid_core::PublicKey = + veilid_core::deserialize_json(&key).map_err(VeilidAPIError::generic)?; + let secret: veilid_core::SecretKey = + veilid_core::deserialize_json(&secret).map_err(VeilidAPIError::generic)?; + let veilid_api = get_veilid_api()?; let crypto = veilid_api.crypto()?; let csv = crypto.get(kind).ok_or_else(|| { @@ -1038,15 +1071,17 @@ pub fn crypto_generate_shared_secret( secret: String, domain: String, ) -> Promise { - let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); - - let key: veilid_core::PublicKey = veilid_core::deserialize_json(&key).unwrap(); - let secret: veilid_core::SecretKey = veilid_core::deserialize_json(&secret).unwrap(); - let domain: Vec = data_encoding::BASE64URL_NOPAD - .decode(domain.as_bytes()) - .unwrap(); - wrap_api_future_json(async move { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + let key: veilid_core::PublicKey = + veilid_core::deserialize_json(&key).map_err(VeilidAPIError::generic)?; + let secret: veilid_core::SecretKey = + veilid_core::deserialize_json(&secret).map_err(VeilidAPIError::generic)?; + let domain: Vec = data_encoding::BASE64URL_NOPAD + .decode(domain.as_bytes()) + .map_err(VeilidAPIError::generic)?; + let veilid_api = get_veilid_api()?; let crypto = veilid_api.crypto()?; let csv = crypto.get(kind).ok_or_else(|| { @@ -1102,15 +1137,15 @@ pub fn crypto_default_salt_length(kind: u32) -> Promise { #[wasm_bindgen()] pub fn crypto_hash_password(kind: u32, password: String, salt: String) -> Promise { - let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); - let password: Vec = data_encoding::BASE64URL_NOPAD - .decode(password.as_bytes()) - .unwrap(); - let salt: Vec = data_encoding::BASE64URL_NOPAD - .decode(salt.as_bytes()) - .unwrap(); - wrap_api_future_plain(async move { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + let password: Vec = data_encoding::BASE64URL_NOPAD + .decode(password.as_bytes()) + .map_err(VeilidAPIError::generic)?; + let salt: Vec = data_encoding::BASE64URL_NOPAD + .decode(salt.as_bytes()) + .map_err(VeilidAPIError::generic)?; + let veilid_api = get_veilid_api()?; let crypto = veilid_api.crypto()?; let csv = crypto.get(kind).ok_or_else(|| { @@ -1127,12 +1162,11 @@ pub fn crypto_hash_password(kind: u32, password: String, salt: String) -> Promis #[wasm_bindgen()] pub fn crypto_verify_password(kind: u32, password: String, password_hash: String) -> Promise { - let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); - let password: Vec = data_encoding::BASE64URL_NOPAD - .decode(password.as_bytes()) - .unwrap(); - wrap_api_future_plain(async move { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + let password: Vec = data_encoding::BASE64URL_NOPAD + .decode(password.as_bytes()) + .map_err(VeilidAPIError::generic)?; let veilid_api = get_veilid_api()?; let crypto = veilid_api.crypto()?; let csv = crypto.get(kind).ok_or_else(|| { @@ -1149,15 +1183,15 @@ pub fn crypto_verify_password(kind: u32, password: String, password_hash: String #[wasm_bindgen()] pub fn crypto_derive_shared_secret(kind: u32, password: String, salt: String) -> Promise { - let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); - let password: Vec = data_encoding::BASE64URL_NOPAD - .decode(password.as_bytes()) - .unwrap(); - let salt: Vec = data_encoding::BASE64URL_NOPAD - .decode(salt.as_bytes()) - .unwrap(); - wrap_api_future_json(async move { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + let password: Vec = data_encoding::BASE64URL_NOPAD + .decode(password.as_bytes()) + .map_err(VeilidAPIError::generic)?; + let salt: Vec = data_encoding::BASE64URL_NOPAD + .decode(salt.as_bytes()) + .map_err(VeilidAPIError::generic)?; + let veilid_api = get_veilid_api()?; let crypto = veilid_api.crypto()?; let csv = crypto.get(kind).ok_or_else(|| { @@ -1174,9 +1208,9 @@ pub fn crypto_derive_shared_secret(kind: u32, password: String, salt: String) -> #[wasm_bindgen()] pub fn crypto_random_nonce(kind: u32) -> Promise { - let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); - wrap_api_future_json(async move { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + let veilid_api = get_veilid_api()?; let crypto = veilid_api.crypto()?; let csv = crypto.get(kind).ok_or_else(|| { @@ -1231,13 +1265,13 @@ pub fn crypto_generate_key_pair(kind: u32) -> Promise { #[wasm_bindgen()] pub fn crypto_generate_hash(kind: u32, data: String) -> Promise { - let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); - - let data: Vec = data_encoding::BASE64URL_NOPAD - .decode(data.as_bytes()) - .unwrap(); - wrap_api_future_json(async move { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + let data: Vec = data_encoding::BASE64URL_NOPAD + .decode(data.as_bytes()) + .map_err(VeilidAPIError::generic)?; + let veilid_api = get_veilid_api()?; let crypto = veilid_api.crypto()?; let csv = crypto.get(kind).ok_or_else(|| { @@ -1254,12 +1288,14 @@ pub fn crypto_generate_hash(kind: u32, data: String) -> Promise { #[wasm_bindgen()] pub fn crypto_validate_key_pair(kind: u32, key: String, secret: String) -> Promise { - let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); - - let key: veilid_core::PublicKey = veilid_core::deserialize_json(&key).unwrap(); - let secret: veilid_core::SecretKey = veilid_core::deserialize_json(&secret).unwrap(); - wrap_api_future_plain(async move { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + let key: veilid_core::PublicKey = + veilid_core::deserialize_json(&key).map_err(VeilidAPIError::generic)?; + let secret: veilid_core::SecretKey = + veilid_core::deserialize_json(&secret).map_err(VeilidAPIError::generic)?; + let veilid_api = get_veilid_api()?; let crypto = veilid_api.crypto()?; let csv = crypto.get(kind).ok_or_else(|| { @@ -1276,15 +1312,16 @@ pub fn crypto_validate_key_pair(kind: u32, key: String, secret: String) -> Promi #[wasm_bindgen()] pub fn crypto_validate_hash(kind: u32, data: String, hash: String) -> Promise { - let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); - - let data: Vec = data_encoding::BASE64URL_NOPAD - .decode(data.as_bytes()) - .unwrap(); - - let hash: veilid_core::HashDigest = veilid_core::deserialize_json(&hash).unwrap(); - wrap_api_future_plain(async move { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + let data: Vec = data_encoding::BASE64URL_NOPAD + .decode(data.as_bytes()) + .map_err(VeilidAPIError::generic)?; + + let hash: veilid_core::HashDigest = + veilid_core::deserialize_json(&hash).map_err(VeilidAPIError::generic)?; + let veilid_api = get_veilid_api()?; let crypto = veilid_api.crypto()?; let csv = crypto.get(kind).ok_or_else(|| { @@ -1301,12 +1338,14 @@ pub fn crypto_validate_hash(kind: u32, data: String, hash: String) -> Promise { #[wasm_bindgen()] pub fn crypto_distance(kind: u32, key1: String, key2: String) -> Promise { - let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); - - let key1: veilid_core::CryptoKey = veilid_core::deserialize_json(&key1).unwrap(); - let key2: veilid_core::CryptoKey = veilid_core::deserialize_json(&key2).unwrap(); - wrap_api_future_json(async move { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + let key1: veilid_core::CryptoKey = + veilid_core::deserialize_json(&key1).map_err(VeilidAPIError::generic)?; + let key2: veilid_core::CryptoKey = + veilid_core::deserialize_json(&key2).map_err(VeilidAPIError::generic)?; + let veilid_api = get_veilid_api()?; let crypto = veilid_api.crypto()?; let csv = crypto.get(kind).ok_or_else(|| { @@ -1323,16 +1362,18 @@ pub fn crypto_distance(kind: u32, key1: String, key2: String) -> Promise { #[wasm_bindgen()] pub fn crypto_sign(kind: u32, key: String, secret: String, data: String) -> Promise { - let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); - - let key: veilid_core::CryptoKey = veilid_core::deserialize_json(&key).unwrap(); - let secret: veilid_core::CryptoKey = veilid_core::deserialize_json(&secret).unwrap(); - - let data: Vec = data_encoding::BASE64URL_NOPAD - .decode(data.as_bytes()) - .unwrap(); - wrap_api_future_json(async move { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + let key: veilid_core::CryptoKey = + veilid_core::deserialize_json(&key).map_err(VeilidAPIError::generic)?; + let secret: veilid_core::CryptoKey = + veilid_core::deserialize_json(&secret).map_err(VeilidAPIError::generic)?; + + let data: Vec = data_encoding::BASE64URL_NOPAD + .decode(data.as_bytes()) + .map_err(VeilidAPIError::generic)?; + let veilid_api = get_veilid_api()?; let crypto = veilid_api.crypto()?; let csv = crypto.get(kind).ok_or_else(|| { @@ -1345,15 +1386,17 @@ pub fn crypto_sign(kind: u32, key: String, secret: String, data: String) -> Prom #[wasm_bindgen()] pub fn crypto_verify(kind: u32, key: String, data: String, signature: String) -> Promise { - let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); - - let key: veilid_core::CryptoKey = veilid_core::deserialize_json(&key).unwrap(); - let data: Vec = data_encoding::BASE64URL_NOPAD - .decode(data.as_bytes()) - .unwrap(); - let signature: veilid_core::Signature = veilid_core::deserialize_json(&signature).unwrap(); - wrap_api_future_plain(async move { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + let key: veilid_core::CryptoKey = + veilid_core::deserialize_json(&key).map_err(VeilidAPIError::generic)?; + let data: Vec = data_encoding::BASE64URL_NOPAD + .decode(data.as_bytes()) + .map_err(VeilidAPIError::generic)?; + let signature: veilid_core::Signature = + veilid_core::deserialize_json(&signature).map_err(VeilidAPIError::generic)?; + let veilid_api = get_veilid_api()?; let crypto = veilid_api.crypto()?; let csv = crypto.get(kind).ok_or_else(|| { @@ -1366,9 +1409,9 @@ pub fn crypto_verify(kind: u32, key: String, data: String, signature: String) -> #[wasm_bindgen()] pub fn crypto_aead_overhead(kind: u32) -> Promise { - let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); - wrap_api_future_plain(async move { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + let veilid_api = get_veilid_api()?; let crypto = veilid_api.crypto()?; let csv = crypto.get(kind).ok_or_else(|| { @@ -1391,24 +1434,28 @@ pub fn crypto_decrypt_aead( shared_secret: String, associated_data: Option, ) -> Promise { - let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); - - let body: Vec = data_encoding::BASE64URL_NOPAD - .decode(body.as_bytes()) - .unwrap(); - - let nonce: veilid_core::Nonce = veilid_core::deserialize_json(&nonce).unwrap(); - - let shared_secret: veilid_core::SharedSecret = - veilid_core::deserialize_json(&shared_secret).unwrap(); - - let associated_data: Option> = associated_data.map(|ad| { - data_encoding::BASE64URL_NOPAD - .decode(ad.as_bytes()) - .unwrap() - }); - wrap_api_future_plain(async move { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + let body: Vec = data_encoding::BASE64URL_NOPAD + .decode(body.as_bytes()) + .map_err(VeilidAPIError::generic)?; + + let nonce: veilid_core::Nonce = + veilid_core::deserialize_json(&nonce).map_err(VeilidAPIError::generic)?; + + let shared_secret: veilid_core::SharedSecret = + veilid_core::deserialize_json(&shared_secret).map_err(VeilidAPIError::generic)?; + + let associated_data: Option> = match associated_data { + Some(ad) => Some( + data_encoding::BASE64URL_NOPAD + .decode(ad.as_bytes()) + .map_err(VeilidAPIError::generic)?, + ), + None => None, + }; + let veilid_api = get_veilid_api()?; let crypto = veilid_api.crypto()?; let csv = crypto.get(kind).ok_or_else(|| { @@ -1440,24 +1487,28 @@ pub fn crypto_encrypt_aead( shared_secret: String, associated_data: Option, ) -> Promise { - let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); - - let body: Vec = data_encoding::BASE64URL_NOPAD - .decode(body.as_bytes()) - .unwrap(); - - let nonce: veilid_core::Nonce = veilid_core::deserialize_json(&nonce).unwrap(); - - let shared_secret: veilid_core::SharedSecret = - veilid_core::deserialize_json(&shared_secret).unwrap(); - - let associated_data: Option> = associated_data.map(|ad| { - data_encoding::BASE64URL_NOPAD - .decode(ad.as_bytes()) - .unwrap() - }); - wrap_api_future_plain(async move { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + let body: Vec = data_encoding::BASE64URL_NOPAD + .decode(body.as_bytes()) + .map_err(VeilidAPIError::generic)?; + + let nonce: veilid_core::Nonce = + veilid_core::deserialize_json(&nonce).map_err(VeilidAPIError::generic)?; + + let shared_secret: veilid_core::SharedSecret = + veilid_core::deserialize_json(&shared_secret).map_err(VeilidAPIError::generic)?; + + let associated_data: Option> = match associated_data { + Some(ad) => Some( + data_encoding::BASE64URL_NOPAD + .decode(ad.as_bytes()) + .map_err(VeilidAPIError::generic)?, + ), + None => None, + }; + let veilid_api = get_veilid_api()?; let crypto = veilid_api.crypto()?; let csv = crypto.get(kind).ok_or_else(|| { @@ -1488,18 +1539,19 @@ pub fn crypto_crypt_no_auth( nonce: String, shared_secret: String, ) -> Promise { - let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); - - let mut body: Vec = data_encoding::BASE64URL_NOPAD - .decode(body.as_bytes()) - .unwrap(); - - let nonce: veilid_core::Nonce = veilid_core::deserialize_json(&nonce).unwrap(); - - let shared_secret: veilid_core::SharedSecret = - veilid_core::deserialize_json(&shared_secret).unwrap(); - wrap_api_future_plain(async move { + let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind); + + let mut body: Vec = data_encoding::BASE64URL_NOPAD + .decode(body.as_bytes()) + .map_err(VeilidAPIError::generic)?; + + let nonce: veilid_core::Nonce = + veilid_core::deserialize_json(&nonce).map_err(VeilidAPIError::generic)?; + + let shared_secret: veilid_core::SharedSecret = + veilid_core::deserialize_json(&shared_secret).map_err(VeilidAPIError::generic)?; + let veilid_api = get_veilid_api()?; let crypto = veilid_api.crypto()?; let csv = crypto.get(kind).ok_or_else(|| { diff --git a/veilid-wasm/src/veilid_client_js.rs b/veilid-wasm/src/veilid_client_js.rs index 17c609d3..0c9fa2f3 100644 --- a/veilid-wasm/src/veilid_client_js.rs +++ b/veilid-wasm/src/veilid_client_js.rs @@ -124,8 +124,9 @@ impl VeilidClient { } } else { // Change a specific layer - let f = filters.get(layer.as_str()).unwrap(); - f.set_max_level(log_level); + if let Some(f) = filters.get(layer.as_str()) { + f.set_max_level(log_level); + } } } @@ -142,10 +143,11 @@ impl VeilidClient { } } else { // Change a specific layer - let f = filters.get(layer.as_str()).unwrap(); - let mut ignore_list = f.ignore_list(); - VeilidLayerFilter::apply_ignore_change_list(&mut ignore_list, &changes); - f.set_ignore_list(Some(ignore_list)); + if let Some(f) = filters.get(layer.as_str()) { + let mut ignore_list = f.ignore_list(); + VeilidLayerFilter::apply_ignore_change_list(&mut ignore_list, &changes); + f.set_ignore_list(Some(ignore_list)); + } } } /// Shut down Veilid and terminate the API. diff --git a/veilid-wasm/src/veilid_routing_context_js.rs b/veilid-wasm/src/veilid_routing_context_js.rs index 0834ac4a..8cbfa5f4 100644 --- a/veilid-wasm/src/veilid_routing_context_js.rs +++ b/veilid-wasm/src/veilid_routing_context_js.rs @@ -331,7 +331,9 @@ impl VeilidRoutingContext { let key = TypedKey::from_str(&key)?; let subkeys = subkeys.unwrap_or_default(); let expiration = if let Some(expiration) = expiration { - veilid_core::Timestamp::from_str(&expiration).map_err(VeilidAPIError::generic)? + veilid_core::Timestamp::new( + u64::from_str(&expiration).map_err(VeilidAPIError::generic)?, + ) } else { veilid_core::Timestamp::default() };