From 5ce238d4fd2d9e1587271978154118a7e30eebbb Mon Sep 17 00:00:00 2001 From: Christien Rioux Date: Sat, 1 Mar 2025 16:26:06 -0500 Subject: [PATCH 01/17] [ci skip] fix TTL race condition in hole punch. check relays with both unordered and ordered protocols in ping validator. fix 'make_not_dead' to only run when nodes are actualy dead --- .../src/network_manager/native/network_udp.rs | 3 +- .../network_manager/native/protocol/udp.rs | 111 ++++++++++++----- veilid-core/src/routing_table/bucket_entry.rs | 15 ++- .../src/routing_table/tasks/ping_validator.rs | 113 +++++++++++------- veilid-core/src/rpc_processor/destination.rs | 2 +- veilid-core/src/rpc_processor/mod.rs | 6 +- 6 files changed, 166 insertions(+), 84 deletions(-) diff --git a/veilid-core/src/network_manager/native/network_udp.rs b/veilid-core/src/network_manager/native/network_udp.rs index a3d7267f..2e370ed9 100644 --- a/veilid-core/src/network_manager/native/network_udp.rs +++ b/veilid-core/src/network_manager/native/network_udp.rs @@ -118,7 +118,8 @@ impl Network { let socket_arc = Arc::new(udp_socket); // Create protocol handler - let protocol_handler = RawUdpProtocolHandler::new(self.registry(), socket_arc); + let protocol_handler = + RawUdpProtocolHandler::new(self.registry(), socket_arc, addr.is_ipv6()); // Record protocol handler let mut inner = self.inner.lock(); diff --git a/veilid-core/src/network_manager/native/protocol/udp.rs b/veilid-core/src/network_manager/native/protocol/udp.rs index 81cee3f5..d152c1a9 100644 --- a/veilid-core/src/network_manager/native/protocol/udp.rs +++ b/veilid-core/src/network_manager/native/protocol/udp.rs @@ -7,16 +7,30 @@ pub struct RawUdpProtocolHandler { registry: VeilidComponentRegistry, socket: Arc, assembly_buffer: AssemblyBuffer, + is_ipv6: bool, + default_ttl: u32, + current_ttl: Arc>, } impl_veilid_component_registry_accessor!(RawUdpProtocolHandler); impl RawUdpProtocolHandler { - pub fn new(registry: VeilidComponentRegistry, socket: Arc) -> Self { + pub fn new(registry: VeilidComponentRegistry, socket: Arc, is_ipv6: bool) -> Self { + // Get original TTL + let default_ttl = if is_ipv6 { + socket2_operation(socket.as_ref(), |s| s.unicast_hops_v6()) + .expect("getting IPV6_UNICAST_HOPS should not fail") + } else { + socket2_operation(socket.as_ref(), |s| s.ttl()).expect("getting IP_TTL should not fail") + }; + Self { registry, socket, assembly_buffer: AssemblyBuffer::new(), + is_ipv6, + default_ttl, + current_ttl: Arc::new(AsyncMutex::new(default_ttl)), } } @@ -104,24 +118,35 @@ impl RawUdpProtocolHandler { return Ok(NetworkResult::no_connection_other("punished")); } - // Fragment and send - let sender = |framed_chunk: Vec, remote_addr: SocketAddr| async move { - let len = network_result_try!(self - .socket - .send_to(&framed_chunk, remote_addr) - .await - .into_network_result()?); - if len != framed_chunk.len() { - bail_io_error_other!("UDP partial send") + // Ensure the TTL for sent packets is the default, + // then fragment and send the packets + { + let current_ttl = self.current_ttl.lock().await; + if *current_ttl != self.default_ttl { + veilid_log!(self error "Incorrect TTL on sent UDP packet ({} != {}): len={}, remote_addr={:?}", *current_ttl, self.default_ttl, data.len(), remote_addr); } - Ok(NetworkResult::value(())) - }; - network_result_try!( - self.assembly_buffer - .split_message(data, remote_addr, sender) - .await? - ); + // Fragment and send + let sender = |framed_chunk: Vec, remote_addr: SocketAddr| async move { + let len = network_result_try!(self + .socket + .send_to(&framed_chunk, remote_addr) + .await + .into_network_result()?); + if len != framed_chunk.len() { + bail_io_error_other!("UDP partial send") + } + + veilid_log!(self trace "udp::send_message:chunk(len={}) {:?}", len, remote_addr); + Ok(NetworkResult::value(())) + }; + + network_result_try!( + self.assembly_buffer + .split_message(data, remote_addr, sender) + .await? + ); + } // Return a flow for the sent message let peer_addr = PeerAddress::new( @@ -157,22 +182,44 @@ impl RawUdpProtocolHandler { return Ok(NetworkResult::no_connection_other("punished")); } - // Get synchronous socket - let res = socket2_operation(self.socket.as_ref(), |s| { - // Get original TTL - let original_ttl = s.ttl()?; + // Ensure the TTL for sent packets is the default, + // then fragment and send the packets + let res = { + let mut current_ttl = self.current_ttl.lock().await; + if *current_ttl != self.default_ttl { + veilid_log!(self error "Incorrect TTL before sending holepunch UDP packet ({} != {}): remote_addr={:?}", *current_ttl, self.default_ttl, remote_addr); + } - // Set TTL - s.set_ttl(ttl)?; + // Get synchronous socket + socket2_operation(self.socket.as_ref(), |s| { + // Set TTL + let ttl_res = if self.is_ipv6 { + s.set_unicast_hops_v6(ttl) + } else { + s.set_ttl(ttl) + }; + ttl_res.inspect_err(|e| { + veilid_log!(self error "Failed to set TTL on holepunch UDP socket: {} remote_addr={:?}", e, remote_addr); + })?; + *current_ttl = ttl; - // Send zero length packet - let res = s.send_to(&[], &remote_addr.into()); + // Send zero length packet + let res = s.send_to(&[], &remote_addr.into()); - // Restore TTL immediately - s.set_ttl(original_ttl)?; + // Restore TTL immediately + let ttl_res = if self.is_ipv6 { + s.set_unicast_hops_v6(self.default_ttl) + } else { + s.set_ttl(self.default_ttl) + }; + ttl_res.inspect_err(|e| { + veilid_log!(self error "Failed to reset TTL on holepunch UDP socket: {} remote_addr={:?}", e, remote_addr); + })?; + *current_ttl = self.default_ttl; - res - }); + res + }) + }; // Check for errors let len = network_result_try!(res.into_network_result()?); @@ -208,6 +255,10 @@ impl RawUdpProtocolHandler { let local_socket_addr = compatible_unspecified_socket_addr(socket_addr); let socket = bind_async_udp_socket(local_socket_addr)? .ok_or(io::Error::from(io::ErrorKind::AddrInUse))?; - Ok(RawUdpProtocolHandler::new(registry, Arc::new(socket))) + Ok(RawUdpProtocolHandler::new( + registry, + Arc::new(socket), + local_socket_addr.is_ipv6(), + )) } } diff --git a/veilid-core/src/routing_table/bucket_entry.rs b/veilid-core/src/routing_table/bucket_entry.rs index 7f4bb9b4..c0f81088 100644 --- a/veilid-core/src/routing_table/bucket_entry.rs +++ b/veilid-core/src/routing_table/bucket_entry.rs @@ -23,7 +23,7 @@ const UNRELIABLE_PING_SPAN_SECS: u32 = 60; const UNRELIABLE_PING_INTERVAL_SECS: u32 = 5; /// - Number of consecutive lost answers on an unordered protocol we will /// tolerate before we call something unreliable -const UNRELIABLE_LOST_ANSWERS_UNORDERED: u32 = 1; +const UNRELIABLE_LOST_ANSWERS_UNORDERED: u32 = 2; /// - Number of consecutive lost answers on an ordered protocol we will /// tolerate before we call something unreliable const UNRELIABLE_LOST_ANSWERS_ORDERED: u32 = 0; @@ -1068,11 +1068,14 @@ impl BucketEntryInner { } pub(super) fn make_not_dead(&mut self, cur_ts: Timestamp) { - self.peer_stats.rpc_stats.last_seen_ts = None; - self.peer_stats.rpc_stats.failed_to_send = 0; - self.peer_stats.rpc_stats.recent_lost_answers_unordered = 0; - self.peer_stats.rpc_stats.recent_lost_answers_ordered = 0; - assert!(self.check_dead(cur_ts).is_none()); + if self.check_dead(cur_ts).is_some() { + self.peer_stats.rpc_stats.last_seen_ts = None; + self.peer_stats.rpc_stats.first_consecutive_seen_ts = None; + self.peer_stats.rpc_stats.failed_to_send = 0; + self.peer_stats.rpc_stats.recent_lost_answers_unordered = 0; + self.peer_stats.rpc_stats.recent_lost_answers_ordered = 0; + assert!(self.check_dead(cur_ts).is_none()); + } } pub(super) fn _state_debug_info(&self, cur_ts: Timestamp) -> String { diff --git a/veilid-core/src/routing_table/tasks/ping_validator.rs b/veilid-core/src/routing_table/tasks/ping_validator.rs index d71be651..6824beff 100644 --- a/veilid-core/src/routing_table/tasks/ping_validator.rs +++ b/veilid-core/src/routing_table/tasks/ping_validator.rs @@ -95,39 +95,17 @@ impl RoutingTable { //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - // Ping the relay to keep it alive, over every protocol it is relaying for us - #[instrument(level = "trace", skip(self, futurequeue), err)] - async fn relay_keepalive_public_internet( + // Get protocol-specific noderefs for a relay to determine its liveness + // Relays get pinged over more protocols than non-relay nodes because we need to ensure + // that they can reliably forward packets with 'all' sequencing, not just over 'any' sequencing + fn get_relay_specific_noderefs( &self, - cur_ts: Timestamp, - futurequeue: &mut VecDeque, - ) -> EyreResult<()> { - // Get the PublicInternet relay if we are using one - let Some(relay_nr) = self.relay_node(RoutingDomain::PublicInternet) else { - return Ok(()); - }; - + relay_nr: FilteredNodeRef, + routing_domain: RoutingDomain, + ) -> Vec { // Get our publicinternet dial info - let dids = self.all_filtered_dial_info_details( - RoutingDomain::PublicInternet.into(), - &DialInfoFilter::all(), - ); - - let opt_relay_keepalive_ts = self.relay_node_last_keepalive(RoutingDomain::PublicInternet); - let relay_needs_keepalive = opt_relay_keepalive_ts - .map(|kts| { - cur_ts.saturating_sub(kts).as_u64() - >= (RELAY_KEEPALIVE_PING_INTERVAL_SECS as u64 * 1_000_000u64) - }) - .unwrap_or(true); - - if !relay_needs_keepalive { - return Ok(()); - } - // Say we're doing this keepalive now - self.inner - .write() - .set_relay_node_last_keepalive(RoutingDomain::PublicInternet, cur_ts); + let dids = + self.all_filtered_dial_info_details(routing_domain.into(), &DialInfoFilter::all()); // We need to keep-alive at one connection per ordering for relays // but also one per NAT mapping that we need to keep open for our inbound dial info @@ -180,6 +158,41 @@ impl RoutingTable { relay_noderefs.push(relay_nr); } + relay_noderefs + } + + // Ping the relay to keep it alive, over every protocol it is relaying for us + #[instrument(level = "trace", skip(self, futurequeue), err)] + async fn relay_keepalive_public_internet( + &self, + cur_ts: Timestamp, + futurequeue: &mut VecDeque, + ) -> EyreResult<()> { + // Get the PublicInternet relay if we are using one + let Some(relay_nr) = self.relay_node(RoutingDomain::PublicInternet) else { + return Ok(()); + }; + + let opt_relay_keepalive_ts = self.relay_node_last_keepalive(RoutingDomain::PublicInternet); + let relay_needs_keepalive = opt_relay_keepalive_ts + .map(|kts| { + cur_ts.saturating_sub(kts).as_u64() + >= (RELAY_KEEPALIVE_PING_INTERVAL_SECS as u64 * 1_000_000u64) + }) + .unwrap_or(true); + + if !relay_needs_keepalive { + return Ok(()); + } + // Say we're doing this keepalive now + self.inner + .write() + .set_relay_node_last_keepalive(RoutingDomain::PublicInternet, cur_ts); + + // Get the sequencing-specific relay noderefs for this relay + let relay_noderefs = + self.get_relay_specific_noderefs(relay_nr, RoutingDomain::PublicInternet); + for relay_nr_filtered in relay_noderefs { futurequeue.push_back( async move { @@ -249,24 +262,36 @@ impl RoutingTable { futurequeue: &mut VecDeque, ) -> EyreResult<()> { // Get all nodes needing pings in the PublicInternet routing domain + let relay_node_filter = self.make_public_internet_relay_node_filter(); let node_refs = self.get_nodes_needing_ping(RoutingDomain::PublicInternet, cur_ts); // Just do a single ping with the best protocol for all the other nodes to check for liveness for nr in node_refs { - let nr = nr.sequencing_clone(Sequencing::PreferOrdered); + // If the node is relay-capable, we should ping it over ALL sequencing types + // instead of just a simple liveness check on ANY best contact method - futurequeue.push_back( - async move { - #[cfg(feature = "verbose-tracing")] - veilid_log!(nr debug "--> PublicInternet Validator ping to {:?}", nr); - let rpc_processor = nr.rpc_processor(); - let _ = rpc_processor - .rpc_call_status(Destination::direct(nr)) - .await?; - Ok(()) - } - .boxed(), - ); + let all_noderefs = if nr.operate(|_rti, e| !relay_node_filter(e)) { + // If this is a relay capable node, get all the sequencing specific noderefs + self.get_relay_specific_noderefs(nr, RoutingDomain::PublicInternet) + } else { + // If a non-relay node, ping with the normal ping type + vec![nr.sequencing_clone(Sequencing::PreferOrdered)] + }; + + for nr in all_noderefs { + futurequeue.push_back( + async move { + #[cfg(feature = "verbose-tracing")] + veilid_log!(nr debug "--> PublicInternet Validator ping to {:?}", nr); + let rpc_processor = nr.rpc_processor(); + let _ = rpc_processor + .rpc_call_status(Destination::direct(nr)) + .await?; + Ok(()) + } + .boxed(), + ); + } } Ok(()) diff --git a/veilid-core/src/rpc_processor/destination.rs b/veilid-core/src/rpc_processor/destination.rs index ef063993..6c75db82 100644 --- a/veilid-core/src/rpc_processor/destination.rs +++ b/veilid-core/src/rpc_processor/destination.rs @@ -206,7 +206,7 @@ impl Destination { } if opt_routing_domain.is_none() { // In the case of an unexpected relay, log it and don't pass any sender peer info into an unexpected relay - veilid_log!(node warn "No routing domain for relay: relay={}, node={}", relay, node); + veilid_log!(node debug "Unexpected relay: relay={}, node={}", relay, node); }; ( diff --git a/veilid-core/src/rpc_processor/mod.rs b/veilid-core/src/rpc_processor/mod.rs index 49260ae3..b463c752 100644 --- a/veilid-core/src/rpc_processor/mod.rs +++ b/veilid-core/src/rpc_processor/mod.rs @@ -817,8 +817,10 @@ impl RPCProcessor { return SenderPeerInfo::default(); }; let Some(routing_domain) = opt_routing_domain else { - // No routing domain for target, no node info - // Only a stale connection or no connection exists + // No routing domain for target, no node info is safe to send here + // Only a stale connection or no connection exists, or an unexpected + // relay was used, possibly due to the destination switching relays + // in a race condition with our send return SenderPeerInfo::default(); }; From 792e05a1878417493fbb017f82e508a0e3f8c286 Mon Sep 17 00:00:00 2001 From: Christien Rioux Date: Sat, 1 Mar 2025 17:59:09 -0500 Subject: [PATCH 02/17] [ci skip] pick random relay from percentile rather than always the fastest --- veilid-core/src/routing_table/mod.rs | 16 ++++++- .../routing_table/routing_table_inner/mod.rs | 48 +++++++++++++++++++ .../routing_table/tasks/relay_management.rs | 7 ++- 3 files changed, 69 insertions(+), 2 deletions(-) diff --git a/veilid-core/src/routing_table/mod.rs b/veilid-core/src/routing_table/mod.rs index d70bb20b..be252fe0 100644 --- a/veilid-core/src/routing_table/mod.rs +++ b/veilid-core/src/routing_table/mod.rs @@ -43,7 +43,9 @@ pub const RELAY_MANAGEMENT_INTERVAL_SECS: u32 = 1; /// How frequently we optimize relays pub const RELAY_OPTIMIZATION_INTERVAL_SECS: u32 = 10; /// What percentile to keep our relays optimized to -pub const RELAY_OPTIMIZATION_PERCENTILE: f32 = 75.0; +pub const RELAY_OPTIMIZATION_PERCENTILE: f32 = 66.0; +/// What percentile to choose our relays from (must be greater than RELAY_OPTIMIZATION_PERCENTILE) +pub const RELAY_SELECTION_PERCENTILE: f32 = 85.0; /// How frequently we tick the private route management routine pub const PRIVATE_ROUTE_MANAGEMENT_INTERVAL_SECS: u32 = 1; @@ -1146,6 +1148,18 @@ impl RoutingTable { inner.find_fastest_node(cur_ts, filter, metric) } + #[instrument(level = "trace", skip(self, filter, metric), ret)] + pub fn find_random_fast_node( + &self, + cur_ts: Timestamp, + filter: impl Fn(&BucketEntryInner) -> bool, + percentile: f32, + metric: impl Fn(&LatencyStats) -> TimestampDuration, + ) -> Option { + let inner = self.inner.read(); + inner.find_random_fast_node(cur_ts, filter, percentile, metric) + } + #[instrument(level = "trace", skip(self, filter, metric), ret)] pub fn get_node_speed_percentile( &self, diff --git a/veilid-core/src/routing_table/routing_table_inner/mod.rs b/veilid-core/src/routing_table/routing_table_inner/mod.rs index e4a314f8..58bf5f40 100644 --- a/veilid-core/src/routing_table/routing_table_inner/mod.rs +++ b/veilid-core/src/routing_table/routing_table_inner/mod.rs @@ -1445,6 +1445,54 @@ impl RoutingTableInner { fastest_node.map(|e| NodeRef::new(self.registry(), e)) } + #[instrument(level = "trace", skip(self, filter, metric), ret)] + pub fn find_random_fast_node( + &self, + cur_ts: Timestamp, + filter: impl Fn(&BucketEntryInner) -> bool, + percentile: f32, + metric: impl Fn(&LatencyStats) -> TimestampDuration, + ) -> Option { + // Go through all entries and find all entries that matches filter function + let mut all_filtered_nodes: Vec> = Vec::new(); + + // Iterate all known nodes for candidates + self.with_entries(cur_ts, BucketEntryState::Unreliable, |rti, entry| { + let entry2 = entry.clone(); + entry.with(rti, |_rti, e| { + // Filter this node + if filter(e) { + all_filtered_nodes.push(entry2); + } + }); + // Don't end early, iterate through all entries + Option::<()>::None + }); + + // Sort by fastest tm90 reliable + all_filtered_nodes.sort_by(|a, b| { + a.with(self, |rti, ea| { + b.with(rti, |_rti, eb| { + BucketEntryInner::cmp_fastest_reliable(cur_ts, ea, eb, &metric) + }) + }) + }); + + if all_filtered_nodes.is_empty() { + return None; + } + + let max_index = + (((all_filtered_nodes.len() - 1) as f32) * (100.0 - percentile) / 100.0) as u32; + let chosen_index = (get_random_u32() % (max_index + 1)) as usize; + + // Return the chosen node node + Some(NodeRef::new( + self.registry(), + all_filtered_nodes[chosen_index].clone(), + )) + } + #[instrument(level = "trace", skip(self, filter, metric), ret)] pub fn get_node_relative_performance( &self, diff --git a/veilid-core/src/routing_table/tasks/relay_management.rs b/veilid-core/src/routing_table/tasks/relay_management.rs index b3d6c61d..cb8a0d87 100644 --- a/veilid-core/src/routing_table/tasks/relay_management.rs +++ b/veilid-core/src/routing_table/tasks/relay_management.rs @@ -202,7 +202,12 @@ impl RoutingTable { } if !got_outbound_relay { // Find a node in our routing table that is an acceptable inbound relay - if let Some(nr) = self.find_fastest_node(cur_ts, &relay_node_filter, |ls| ls.tm90) { + if let Some(nr) = self.find_random_fast_node( + cur_ts, + &relay_node_filter, + RELAY_SELECTION_PERCENTILE, + |ls| ls.tm90, + ) { veilid_log!(self debug "Inbound relay node selected: {}", nr); editor.set_relay_node(Some(nr)); } From c3fddba239111e9a19cc2c95968d9c252381d59a Mon Sep 17 00:00:00 2001 From: Christien Rioux Date: Sat, 1 Mar 2025 19:26:51 -0500 Subject: [PATCH 03/17] [ci skip] measure relay time --- veilid-core/src/network_manager/mod.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/veilid-core/src/network_manager/mod.rs b/veilid-core/src/network_manager/mod.rs index 1eb2beda..968e3e42 100644 --- a/veilid-core/src/network_manager/mod.rs +++ b/veilid-core/src/network_manager/mod.rs @@ -1101,9 +1101,11 @@ impl NetworkManager { // Relay the packet to the desired destination veilid_log!(self trace "relaying {} bytes to {}", data.len(), relay_nr); + let cur_ts = Timestamp::now(); if let Err(e) = pin_future!(self.send_data(relay_nr, data.to_vec())).await { veilid_log!(self debug "failed to relay envelope: {}" ,e); } + veilid_log!(self debug target:"network_result", "relay time: {}", Timestamp::now() - cur_ts); } // Inform caller that we dealt with the envelope, but did not process it locally return Ok(false); From b7b9342b1323abf998babe74dfb1d63615194342 Mon Sep 17 00:00:00 2001 From: Christien Rioux Date: Sun, 2 Mar 2025 11:38:23 -0500 Subject: [PATCH 04/17] [ci skip] add relay workers, organize rpc workers --- veilid-core/Cargo.toml | 2 +- veilid-core/src/lib.rs | 2 + veilid-core/src/network_manager/debug.rs | 37 +++ veilid-core/src/network_manager/mod.rs | 63 ++++- .../src/network_manager/relay_worker.rs | 120 +++++++++ veilid-core/src/network_manager/stats.rs | 45 ++-- .../src/routing_table/stats_accounting.rs | 153 ----------- veilid-core/src/rpc_processor/debug.rs | 18 ++ veilid-core/src/rpc_processor/mod.rs | 227 ++-------------- veilid-core/src/rpc_processor/rpc_worker.rs | 247 ++++++++++++++++++ veilid-core/src/stats_accounting.rs | 153 +++++++++++ veilid-core/src/veilid_api/debug.rs | 9 +- veilid-flutter/rust/Cargo.toml | 2 +- veilid-tools/src/timestamp.rs | 46 ++-- 14 files changed, 721 insertions(+), 403 deletions(-) create mode 100644 veilid-core/src/network_manager/debug.rs create mode 100644 veilid-core/src/network_manager/relay_worker.rs create mode 100644 veilid-core/src/rpc_processor/debug.rs create mode 100644 veilid-core/src/rpc_processor/rpc_worker.rs create mode 100644 veilid-core/src/stats_accounting.rs diff --git a/veilid-core/Cargo.toml b/veilid-core/Cargo.toml index d8b72852..a4125dcf 100644 --- a/veilid-core/Cargo.toml +++ b/veilid-core/Cargo.toml @@ -83,7 +83,7 @@ directories = "5.0.1" # Logging tracing = { version = "0.1.40", features = ["log", "attributes"] } -tracing-subscriber = "0.3.18" +tracing-subscriber = "0.3.19" tracing-error = "0.2.0" eyre = "0.6.12" thiserror = "1.0.63" diff --git a/veilid-core/src/lib.rs b/veilid-core/src/lib.rs index 503264ee..04e74320 100644 --- a/veilid-core/src/lib.rs +++ b/veilid-core/src/lib.rs @@ -50,6 +50,7 @@ mod logging; mod network_manager; mod routing_table; mod rpc_processor; +mod stats_accounting; mod storage_manager; mod table_store; mod veilid_api; @@ -64,6 +65,7 @@ pub use self::logging::{ DEFAULT_LOG_FACILITIES_ENABLED_LIST, DEFAULT_LOG_FACILITIES_IGNORE_LIST, DURATION_LOG_FACILITIES, FLAME_LOG_FACILITIES_IGNORE_LIST, VEILID_LOG_KEY_FIELD, }; +pub(crate) use self::stats_accounting::*; pub use self::veilid_api::*; pub use self::veilid_config::*; pub use veilid_tools as tools; diff --git a/veilid-core/src/network_manager/debug.rs b/veilid-core/src/network_manager/debug.rs new file mode 100644 index 00000000..c06a92be --- /dev/null +++ b/veilid-core/src/network_manager/debug.rs @@ -0,0 +1,37 @@ +use super::*; + +impl NetworkManager { + pub fn debug_info_nodeinfo(&self) -> String { + let mut out = String::new(); + let inner = self.inner.lock(); + out += &format!( + "Relay Worker Deque Latency:\n{}", + indent_all_string(&inner.stats.relay_worker_dequeue_latency) + ); + out += "\n"; + out += &format!( + "Relay Worker Process Latency:\n{}", + indent_all_string(&inner.stats.relay_worker_process_latency) + ); + out + } + + pub fn debug(&self) -> String { + let stats = self.get_stats(); + + let mut out = String::new(); + out += "Network Manager\n"; + out += "---------------\n"; + let mut out = format!( + "Transfer stats:\n{}\n", + indent_all_string(&stats.self_stats.transfer_stats) + ); + out += &self.debug_info_nodeinfo(); + + out += "Node Contact Method Cache\n"; + out += "-------------------------\n"; + out += &self.inner.lock().node_contact_method_cache.debug(); + + out + } +} diff --git a/veilid-core/src/network_manager/mod.rs b/veilid-core/src/network_manager/mod.rs index 968e3e42..c9464395 100644 --- a/veilid-core/src/network_manager/mod.rs +++ b/veilid-core/src/network_manager/mod.rs @@ -10,10 +10,12 @@ mod address_filter; mod connection_handle; mod connection_manager; mod connection_table; +mod debug; mod direct_boot; mod network_connection; mod node_contact_method_cache; mod receipt_manager; +mod relay_worker; mod send_data; mod stats; mod tasks; @@ -26,9 +28,10 @@ pub mod tests; pub use connection_manager::*; pub use network_connection::*; -pub(crate) use node_contact_method_cache::*; pub use receipt_manager::*; pub use stats::*; + +pub(crate) use node_contact_method_cache::*; pub(crate) use types::*; //////////////////////////////////////////////////////////////////////////////////////// @@ -42,6 +45,7 @@ use hashlink::LruCache; use native::*; #[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))] pub use native::{MAX_CAPABILITIES, PUBLIC_INTERNET_CAPABILITIES}; +use relay_worker::*; use routing_table::*; use rpc_processor::*; #[cfg(all(target_arch = "wasm32", target_os = "unknown"))] @@ -60,6 +64,7 @@ pub const IPADDR_MAX_INACTIVE_DURATION_US: TimestampDuration = pub const ADDRESS_FILTER_TASK_INTERVAL_SECS: u32 = 60; pub const BOOT_MAGIC: &[u8; 4] = b"BOOT"; pub const HOLE_PUNCH_DELAY_MS: u32 = 100; +pub const RELAY_WORKERS_PER_CORE: u32 = 16; // Things we get when we start up and go away when we shut down // Routing table is not in here because we want it to survive a network shutdown/startup restart @@ -171,7 +176,6 @@ impl Default for NetworkManagerStartupContext { Self::new() } } - // The mutable state of the network manager #[derive(Debug)] struct NetworkManagerInner { @@ -181,6 +185,11 @@ struct NetworkManagerInner { address_check: Option, peer_info_change_subscription: Option, socket_address_change_subscription: Option, + + // Relay workers + relay_stop_source: Option, + relay_send_channel: Option>, + relay_worker_join_handles: Vec>, } pub(crate) struct NetworkManager { @@ -202,6 +211,10 @@ pub(crate) struct NetworkManager { // Startup context startup_context: NetworkManagerStartupContext, + + // Relay workers config + concurrency: u32, + queue_size: u32, } impl_veilid_component!(NetworkManager); @@ -214,6 +227,8 @@ impl fmt::Debug for NetworkManager { .field("address_filter", &self.address_filter) .field("network_key", &self.network_key) .field("startup_context", &self.startup_context) + .field("concurrency", &self.concurrency) + .field("queue_size", &self.queue_size) .finish() } } @@ -227,6 +242,10 @@ impl NetworkManager { address_check: None, peer_info_change_subscription: None, socket_address_change_subscription: None, + // + relay_send_channel: None, + relay_stop_source: None, + relay_worker_join_handles: Vec::new(), } } @@ -264,6 +283,26 @@ impl NetworkManager { network_key }; + // make local copy of node id for easy access + let (concurrency, queue_size) = { + let config = registry.config(); + let c = config.get(); + + // set up channel + let mut concurrency = c.network.rpc.concurrency; + let queue_size = c.network.rpc.queue_size; + if concurrency == 0 { + concurrency = get_concurrency(); + if concurrency == 0 { + concurrency = 1; + } + + // Default relay concurrency is the number of CPUs * 16 relay workers per core + concurrency *= RELAY_WORKERS_PER_CORE; + } + (concurrency, queue_size) + }; + let inner = Self::new_inner(); let address_filter = AddressFilter::new(registry.clone()); @@ -282,6 +321,8 @@ impl NetworkManager { ), network_key, startup_context, + concurrency, + queue_size, }; this.setup_tasks(); @@ -360,7 +401,8 @@ impl NetworkManager { receipt_manager: receipt_manager.clone(), }); - let address_check = AddressCheck::new(net.clone()); + // Startup relay workers + self.startup_relay_workers()?; // Register event handlers let peer_info_change_subscription = @@ -371,6 +413,7 @@ impl NetworkManager { { let mut inner = self.inner.lock(); + let address_check = AddressCheck::new(net.clone()); inner.address_check = Some(address_check); inner.peer_info_change_subscription = Some(peer_info_change_subscription); inner.socket_address_change_subscription = Some(socket_address_change_subscription); @@ -426,6 +469,9 @@ impl NetworkManager { inner.address_check = None; } + // Shutdown relay workers + self.shutdown_relay_workers().await; + // Shutdown network components if they started up veilid_log!(self debug "shutting down network components"); @@ -1099,13 +1145,12 @@ impl NetworkManager { relay_nr.set_sequencing(Sequencing::EnsureOrdered); }; - // Relay the packet to the desired destination - veilid_log!(self trace "relaying {} bytes to {}", data.len(), relay_nr); - let cur_ts = Timestamp::now(); - if let Err(e) = pin_future!(self.send_data(relay_nr, data.to_vec())).await { - veilid_log!(self debug "failed to relay envelope: {}" ,e); + // Pass relay to RPC system + if let Err(e) = self.enqueue_relay(relay_nr, data.to_vec()) { + // Couldn't enqueue, but not the sender's fault + veilid_log!(self debug "failed to enqueue relay: {}", e); + return Ok(false); } - veilid_log!(self debug target:"network_result", "relay time: {}", Timestamp::now() - cur_ts); } // Inform caller that we dealt with the envelope, but did not process it locally return Ok(false); diff --git a/veilid-core/src/network_manager/relay_worker.rs b/veilid-core/src/network_manager/relay_worker.rs new file mode 100644 index 00000000..61dc7951 --- /dev/null +++ b/veilid-core/src/network_manager/relay_worker.rs @@ -0,0 +1,120 @@ +use futures_util::StreamExt as _; +use stop_token::future::FutureExt as _; + +use super::*; + +#[derive(Debug)] +pub(super) enum RelayWorkerRequestKind { + Relay { + relay_nr: FilteredNodeRef, + data: Vec, + }, +} + +#[derive(Debug)] +pub(super) struct RelayWorkerRequest { + enqueued_ts: Timestamp, + span: Span, + kind: RelayWorkerRequestKind, +} + +impl NetworkManager { + pub(super) fn startup_relay_workers(&self) -> EyreResult<()> { + let mut inner = self.inner.lock(); + + // Relay workers + let channel = flume::bounded(self.queue_size as usize); + inner.relay_send_channel = Some(channel.0.clone()); + inner.relay_stop_source = Some(StopSource::new()); + + // spin up N workers + veilid_log!(self debug "Starting {} relay workers", self.concurrency); + for task_n in 0..self.concurrency { + let registry = self.registry(); + let receiver = channel.1.clone(); + let stop_token = inner.relay_stop_source.as_ref().unwrap().token(); + let jh = spawn(&format!("relay worker {}", task_n), async move { + let this = registry.network_manager(); + Box::pin(this.relay_worker(stop_token, receiver)).await + }); + inner.relay_worker_join_handles.push(jh); + } + Ok(()) + } + + pub(super) async fn shutdown_relay_workers(&self) { + // Stop the relay workers + let mut unord = FuturesUnordered::new(); + { + let mut inner = self.inner.lock(); + // take the join handles out + for h in inner.relay_worker_join_handles.drain(..) { + unord.push(h); + } + // drop the stop + drop(inner.relay_stop_source.take()); + } + veilid_log!(self debug "Stopping {} relay workers", unord.len()); + + // Wait for them to complete + while unord.next().await.is_some() {} + } + + pub(super) async fn relay_worker( + &self, + stop_token: StopToken, + receiver: flume::Receiver, + ) { + while let Ok(Ok(request)) = receiver.recv_async().timeout_at(stop_token.clone()).await { + let relay_request_span = tracing::trace_span!("relay request"); + relay_request_span.follows_from(request.span); + + // Measure dequeue time + let dequeue_ts = Timestamp::now(); + let dequeue_latency = dequeue_ts.saturating_sub(request.enqueued_ts); + + // Process request kind + match request.kind { + RelayWorkerRequestKind::Relay { relay_nr, data } => { + // Relay the packet to the desired destination + veilid_log!(self trace "relaying {} bytes to {}", data.len(), relay_nr); + if let Err(e) = pin_future!(self.send_data(relay_nr, data.to_vec())).await { + veilid_log!(self debug "failed to relay envelope: {}" ,e); + } + } + } + + // Measure process time + let process_ts = Timestamp::now(); + let process_latency = process_ts.saturating_sub(dequeue_ts); + + // Accounting + self.stats_relay_processed(dequeue_latency, process_latency) + } + } + + #[instrument(level = "trace", target = "rpc", skip_all)] + pub(super) fn enqueue_relay(&self, relay_nr: FilteredNodeRef, data: Vec) -> EyreResult<()> { + let _guard = self + .startup_context + .startup_lock + .enter() + .wrap_err("not started up")?; + + let send_channel = { + let inner = self.inner.lock(); + let Some(send_channel) = inner.relay_send_channel.as_ref().cloned() else { + bail!("send channel is closed"); + }; + send_channel + }; + send_channel + .try_send(RelayWorkerRequest { + enqueued_ts: Timestamp::now(), + span: Span::current(), + kind: RelayWorkerRequestKind::Relay { relay_nr, data }, + }) + .map_err(|e| eyre!("failed to enqueue relay: {}", e))?; + Ok(()) + } +} diff --git a/veilid-core/src/network_manager/stats.rs b/veilid-core/src/network_manager/stats.rs index 0e897968..4cdca5ad 100644 --- a/veilid-core/src/network_manager/stats.rs +++ b/veilid-core/src/network_manager/stats.rs @@ -22,6 +22,10 @@ impl Default for PerAddressStatsKey { pub struct NetworkManagerStats { pub self_stats: PerAddressStats, pub per_address_stats: LruCache, + pub relay_worker_dequeue_latency: LatencyStats, + pub relay_worker_process_latency: LatencyStats, + pub relay_worker_dequeue_latency_accounting: LatencyStatsAccounting, + pub relay_worker_process_latency_accounting: LatencyStatsAccounting, } impl Default for NetworkManagerStats { @@ -29,6 +33,10 @@ impl Default for NetworkManagerStats { Self { self_stats: PerAddressStats::default(), per_address_stats: LruCache::new(IPADDR_TABLE_SIZE), + relay_worker_dequeue_latency: LatencyStats::default(), + relay_worker_process_latency: LatencyStats::default(), + relay_worker_dequeue_latency_accounting: LatencyStatsAccounting::new(), + relay_worker_process_latency_accounting: LatencyStatsAccounting::new(), } } } @@ -36,7 +44,7 @@ impl Default for NetworkManagerStats { impl NetworkManager { // Callbacks from low level network for statistics gathering pub fn stats_packet_sent(&self, addr: IpAddr, bytes: ByteCount) { - let inner = &mut *self.inner.lock(); + let mut inner = self.inner.lock(); inner .stats .self_stats @@ -53,7 +61,7 @@ impl NetworkManager { } pub fn stats_packet_rcvd(&self, addr: IpAddr, bytes: ByteCount) { - let inner = &mut *self.inner.lock(); + let mut inner = self.inner.lock(); inner .stats .self_stats @@ -69,28 +77,27 @@ impl NetworkManager { .add_down(bytes); } + pub fn stats_relay_processed( + &self, + dequeue_latency: TimestampDuration, + process_latency: TimestampDuration, + ) { + let mut inner = self.inner.lock(); + inner.stats.relay_worker_dequeue_latency = inner + .stats + .relay_worker_dequeue_latency_accounting + .record_latency(dequeue_latency); + inner.stats.relay_worker_process_latency = inner + .stats + .relay_worker_process_latency_accounting + .record_latency(process_latency); + } + pub fn get_stats(&self) -> NetworkManagerStats { let inner = self.inner.lock(); inner.stats.clone() } - pub fn debug(&self) -> String { - let stats = self.get_stats(); - - let mut out = String::new(); - out += "Network Manager\n"; - out += "---------------\n"; - let mut out = format!( - "Transfer stats:\n{}\n", - indent_all_string(&stats.self_stats.transfer_stats) - ); - out += "Node Contact Method Cache\n"; - out += "-------------------------\n"; - out += &self.inner.lock().node_contact_method_cache.debug(); - - out - } - pub fn get_veilid_state(&self) -> Box { if !self.network_is_started() { return Box::new(VeilidStateNetwork { diff --git a/veilid-core/src/routing_table/stats_accounting.rs b/veilid-core/src/routing_table/stats_accounting.rs index a10b6a61..9a8552da 100644 --- a/veilid-core/src/routing_table/stats_accounting.rs +++ b/veilid-core/src/routing_table/stats_accounting.rs @@ -1,15 +1,5 @@ use super::*; -// Latency entry is per round-trip packet (ping or data) -// - Size is number of entries -const ROLLING_LATENCIES_SIZE: usize = 50; - -// Transfers entries are in bytes total for the interval -// - Size is number of entries -// - Interval is number of seconds in each entry -const ROLLING_TRANSFERS_SIZE: usize = 10; -pub const ROLLING_TRANSFERS_INTERVAL_SECS: u32 = 1; - // State entry is per state reason change // - Size is number of entries const ROLLING_STATE_REASON_SPAN_SIZE: usize = 32; @@ -20,149 +10,6 @@ pub const UPDATE_STATE_STATS_INTERVAL_SECS: u32 = 1; // - Interval is number of seconds in each entry const ROLLING_ANSWERS_SIZE: usize = 10; pub const ROLLING_ANSWER_INTERVAL_SECS: u32 = 60; - -#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] -pub struct TransferCount { - down: ByteCount, - up: ByteCount, -} - -#[derive(Debug, Clone, Default)] -pub struct TransferStatsAccounting { - rolling_transfers: VecDeque, - current_transfer: TransferCount, -} - -impl TransferStatsAccounting { - pub fn new() -> Self { - Self { - rolling_transfers: VecDeque::new(), - current_transfer: TransferCount::default(), - } - } - - pub fn add_down(&mut self, bytes: ByteCount) { - self.current_transfer.down += bytes; - } - - pub fn add_up(&mut self, bytes: ByteCount) { - self.current_transfer.up += bytes; - } - - pub fn roll_transfers( - &mut self, - last_ts: Timestamp, - cur_ts: Timestamp, - transfer_stats: &mut TransferStatsDownUp, - ) { - let dur_ms = cur_ts.saturating_sub(last_ts) / 1000u64; - while self.rolling_transfers.len() >= ROLLING_TRANSFERS_SIZE { - self.rolling_transfers.pop_front(); - } - self.rolling_transfers.push_back(self.current_transfer); - - transfer_stats.down.total += self.current_transfer.down; - transfer_stats.up.total += self.current_transfer.up; - - self.current_transfer = TransferCount::default(); - - transfer_stats.down.maximum = 0.into(); - transfer_stats.up.maximum = 0.into(); - transfer_stats.down.minimum = u64::MAX.into(); - transfer_stats.up.minimum = u64::MAX.into(); - transfer_stats.down.average = 0.into(); - transfer_stats.up.average = 0.into(); - for xfer in &self.rolling_transfers { - let bpsd = xfer.down * 1000u64 / dur_ms; - let bpsu = xfer.up * 1000u64 / dur_ms; - transfer_stats.down.maximum.max_assign(bpsd); - transfer_stats.up.maximum.max_assign(bpsu); - transfer_stats.down.minimum.min_assign(bpsd); - transfer_stats.up.minimum.min_assign(bpsu); - transfer_stats.down.average += bpsd; - transfer_stats.up.average += bpsu; - } - let len = self.rolling_transfers.len() as u64; - if len > 0 { - transfer_stats.down.average /= len; - transfer_stats.up.average /= len; - } - } -} - -#[derive(Debug, Clone, Default)] -pub struct LatencyStatsAccounting { - rolling_latencies: VecDeque, -} - -impl LatencyStatsAccounting { - pub fn new() -> Self { - Self { - rolling_latencies: VecDeque::new(), - } - } - - fn get_tm_n(sorted_latencies: &[TimestampDuration], n: usize) -> Option { - let tmcount = sorted_latencies.len() * n / 100; - if tmcount == 0 { - None - } else { - let mut tm = TimestampDuration::new(0); - for l in &sorted_latencies[..tmcount] { - tm += *l; - } - tm /= tmcount as u64; - Some(tm) - } - } - - fn get_p_n(sorted_latencies: &[TimestampDuration], n: usize) -> TimestampDuration { - let pindex = (sorted_latencies.len() * n / 100).saturating_sub(1); - sorted_latencies[pindex] - } - - pub fn record_latency(&mut self, latency: TimestampDuration) -> LatencyStats { - while self.rolling_latencies.len() >= ROLLING_LATENCIES_SIZE { - self.rolling_latencies.pop_front(); - } - self.rolling_latencies.push_back(latency); - - // Calculate latency stats - - let mut fastest = TimestampDuration::new(u64::MAX); - let mut slowest = TimestampDuration::new(0u64); - let mut average = TimestampDuration::new(0u64); - - for rl in &self.rolling_latencies { - fastest.min_assign(*rl); - slowest.max_assign(*rl); - average += *rl; - } - let len = self.rolling_latencies.len() as u64; - if len > 0 { - average /= len; - } - - let mut sorted_latencies: Vec<_> = self.rolling_latencies.iter().copied().collect(); - sorted_latencies.sort(); - - let tm90 = Self::get_tm_n(&sorted_latencies, 90).unwrap_or(average); - let tm75 = Self::get_tm_n(&sorted_latencies, 75).unwrap_or(average); - let p90 = Self::get_p_n(&sorted_latencies, 90); - let p75 = Self::get_p_n(&sorted_latencies, 75); - - LatencyStats { - fastest, - average, - slowest, - tm90, - tm75, - p90, - p75, - } - } -} - #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct StateReasonSpan { state_reason: BucketEntryStateReason, diff --git a/veilid-core/src/rpc_processor/debug.rs b/veilid-core/src/rpc_processor/debug.rs new file mode 100644 index 00000000..1e19735f --- /dev/null +++ b/veilid-core/src/rpc_processor/debug.rs @@ -0,0 +1,18 @@ +use super::*; + +impl RPCProcessor { + pub fn debug_info_nodeinfo(&self) -> String { + let mut out = String::new(); + let inner = self.inner.lock(); + out += &format!( + "RPC Worker Dequeue Latency:\n{}", + indent_all_string(&inner.rpc_worker_dequeue_latency) + ); + out += "\n"; + out += &format!( + "RPC Worker Process Latency:\n{}", + indent_all_string(&inner.rpc_worker_process_latency) + ); + out + } +} diff --git a/veilid-core/src/rpc_processor/mod.rs b/veilid-core/src/rpc_processor/mod.rs index b463c752..b623f5ba 100644 --- a/veilid-core/src/rpc_processor/mod.rs +++ b/veilid-core/src/rpc_processor/mod.rs @@ -2,6 +2,7 @@ use super::*; mod answer; mod coders; +mod debug; mod destination; mod error; mod fanout; @@ -22,6 +23,7 @@ mod rpc_status; mod rpc_validate_dial_info; mod rpc_value_changed; mod rpc_watch_value; +mod rpc_worker; mod sender_info; mod sender_peer_info; @@ -48,7 +50,7 @@ pub(crate) use error::*; pub(crate) use fanout::*; pub(crate) use sender_info::*; -use futures_util::StreamExt; +use futures_util::StreamExt as _; use stop_token::future::FutureExt as _; use coders::*; @@ -56,6 +58,7 @@ use message::*; use message_header::*; use operation_waiter::*; use rendered_operation::*; +use rpc_worker::*; use sender_peer_info::*; use crypto::*; @@ -67,6 +70,10 @@ impl_veilid_log_facility!("rpc"); ///////////////////////////////////////////////////////////////////// +const RPC_WORKERS_PER_CORE: u32 = 16; + +///////////////////////////////////////////////////////////////////// + #[derive(Debug)] #[must_use] struct WaitableReplyContext { @@ -122,9 +129,13 @@ impl Default for RPCProcessorStartupContext { #[derive(Debug)] #[must_use] struct RPCProcessorInner { - send_channel: Option>, - stop_source: Option, - worker_join_handles: Vec>, + rpc_send_channel: Option>, + rpc_stop_source: Option, + rpc_worker_join_handles: Vec>, + rpc_worker_dequeue_latency: LatencyStats, + rpc_worker_process_latency: LatencyStats, + rpc_worker_dequeue_latency_accounting: LatencyStatsAccounting, + rpc_worker_process_latency_accounting: LatencyStatsAccounting, } #[derive(Debug)] @@ -146,9 +157,13 @@ impl_veilid_component!(RPCProcessor); impl RPCProcessor { fn new_inner() -> RPCProcessorInner { RPCProcessorInner { - send_channel: None, - stop_source: None, - worker_join_handles: Vec::new(), + rpc_send_channel: None, + rpc_stop_source: None, + rpc_worker_join_handles: Vec::new(), + rpc_worker_dequeue_latency: LatencyStats::default(), + rpc_worker_process_latency: LatencyStats::default(), + rpc_worker_dequeue_latency_accounting: LatencyStatsAccounting::new(), + rpc_worker_process_latency_accounting: LatencyStatsAccounting::new(), } } @@ -173,7 +188,7 @@ impl RPCProcessor { } // Default RPC concurrency is the number of CPUs * 16 rpc workers per core, as a single worker takes about 1% CPU when relaying and 16% is reasonable for baseline plus relay - concurrency *= 16; + concurrency *= RPC_WORKERS_PER_CORE; } (concurrency, queue_size, max_route_hop_count, timeout_us) }; @@ -227,22 +242,12 @@ impl RPCProcessor { let mut inner = self.inner.lock(); let channel = flume::bounded(self.queue_size as usize); - inner.send_channel = Some(channel.0.clone()); - inner.stop_source = Some(StopSource::new()); - - // spin up N workers - veilid_log!(self trace "Spinning up {} RPC workers", self.concurrency); - for task_n in 0..self.concurrency { - let registry = self.registry(); - let receiver = channel.1.clone(); - let stop_token = inner.stop_source.as_ref().unwrap().token(); - let jh = spawn(&format!("rpc worker {}", task_n), async move { - let this = registry.rpc_processor(); - Box::pin(this.rpc_worker(stop_token, receiver)).await - }); - inner.worker_join_handles.push(jh); - } + inner.rpc_send_channel = Some(channel.0.clone()); + inner.rpc_stop_source = Some(StopSource::new()); } + + self.startup_rpc_workers()?; + guard.success(); veilid_log!(self debug "finished rpc processor startup"); @@ -260,21 +265,7 @@ impl RPCProcessor { .await .expect("should be started up"); - // Stop the rpc workers - let mut unord = FuturesUnordered::new(); - { - let mut inner = self.inner.lock(); - // take the join handles out - for h in inner.worker_join_handles.drain(..) { - unord.push(h); - } - // drop the stop - drop(inner.stop_source.take()); - } - veilid_log!(self debug "stopping {} rpc worker tasks", unord.len()); - - // Wait for them to complete - while unord.next().await.is_some() {} + self.shutdown_rpc_workers().await; veilid_log!(self debug "resetting rpc processor state"); @@ -1620,164 +1611,4 @@ impl RPCProcessor { } } } - - async fn rpc_worker( - &self, - stop_token: StopToken, - receiver: flume::Receiver<(Span, MessageEncoded)>, - ) { - while let Ok(Ok((prev_span, msg))) = - receiver.recv_async().timeout_at(stop_token.clone()).await - { - let rpc_message_span = tracing::trace_span!("rpc message"); - rpc_message_span.follows_from(prev_span); - - network_result_value_or_log!(self match self - .process_rpc_message(msg).instrument(rpc_message_span) - .await - { - Err(e) => { - veilid_log!(self error "couldn't process rpc message: {}", e); - continue; - } - - Ok(v) => { - v - } - } => [ format!(": msg.header={:?}", msg.header) ] {}); - } - } - - #[instrument(level = "trace", target = "rpc", skip_all)] - pub fn enqueue_direct_message( - &self, - envelope: Envelope, - sender_noderef: FilteredNodeRef, - flow: Flow, - routing_domain: RoutingDomain, - body: Vec, - ) -> EyreResult<()> { - let _guard = self - .startup_context - .startup_lock - .enter() - .wrap_err("not started up")?; - - if sender_noderef.routing_domain_set() != routing_domain { - bail!("routing domain should match peer noderef filter"); - } - - let header = MessageHeader { - detail: RPCMessageHeaderDetail::Direct(RPCMessageHeaderDetailDirect { - envelope, - sender_noderef, - flow, - routing_domain, - }), - timestamp: Timestamp::now(), - body_len: ByteCount::new(body.len() as u64), - }; - - let msg = MessageEncoded { - header, - data: MessageData { contents: body }, - }; - - let send_channel = { - let inner = self.inner.lock(); - let Some(send_channel) = inner.send_channel.as_ref().cloned() else { - bail!("send channel is closed"); - }; - send_channel - }; - send_channel - .try_send((Span::current(), msg)) - .map_err(|e| eyre!("failed to enqueue direct RPC message: {}", e))?; - Ok(()) - } - - #[instrument(level = "trace", target = "rpc", skip_all)] - fn enqueue_safety_routed_message( - &self, - direct: RPCMessageHeaderDetailDirect, - remote_safety_route: PublicKey, - sequencing: Sequencing, - body: Vec, - ) -> EyreResult<()> { - let _guard = self - .startup_context - .startup_lock - .enter() - .wrap_err("not started up")?; - - let header = MessageHeader { - detail: RPCMessageHeaderDetail::SafetyRouted(RPCMessageHeaderDetailSafetyRouted { - direct, - remote_safety_route, - sequencing, - }), - timestamp: Timestamp::now(), - body_len: (body.len() as u64).into(), - }; - - let msg = MessageEncoded { - header, - data: MessageData { contents: body }, - }; - let send_channel = { - let inner = self.inner.lock(); - let Some(send_channel) = inner.send_channel.as_ref().cloned() else { - bail!("send channel is closed"); - }; - send_channel - }; - send_channel - .try_send((Span::current(), msg)) - .map_err(|e| eyre!("failed to enqueue safety routed RPC message: {}", e))?; - Ok(()) - } - - #[instrument(level = "trace", target = "rpc", skip_all)] - fn enqueue_private_routed_message( - &self, - direct: RPCMessageHeaderDetailDirect, - remote_safety_route: PublicKey, - private_route: PublicKey, - safety_spec: SafetySpec, - body: Vec, - ) -> EyreResult<()> { - let _guard = self - .startup_context - .startup_lock - .enter() - .wrap_err("not started up")?; - - let header = MessageHeader { - detail: RPCMessageHeaderDetail::PrivateRouted(RPCMessageHeaderDetailPrivateRouted { - direct, - remote_safety_route, - private_route, - safety_spec, - }), - timestamp: Timestamp::now(), - body_len: (body.len() as u64).into(), - }; - - let msg = MessageEncoded { - header, - data: MessageData { contents: body }, - }; - - let send_channel = { - let inner = self.inner.lock(); - let Some(send_channel) = inner.send_channel.as_ref().cloned() else { - bail!("send channel is closed"); - }; - send_channel - }; - send_channel - .try_send((Span::current(), msg)) - .map_err(|e| eyre!("failed to enqueue private routed RPC message: {}", e))?; - Ok(()) - } } diff --git a/veilid-core/src/rpc_processor/rpc_worker.rs b/veilid-core/src/rpc_processor/rpc_worker.rs new file mode 100644 index 00000000..8141f965 --- /dev/null +++ b/veilid-core/src/rpc_processor/rpc_worker.rs @@ -0,0 +1,247 @@ +use futures_util::StreamExt as _; +use stop_token::future::FutureExt as _; + +use super::*; + +#[derive(Debug)] +pub(super) enum RPCWorkerRequestKind { + Message { message_encoded: MessageEncoded }, +} + +#[derive(Debug)] +pub(super) struct RPCWorkerRequest { + enqueued_ts: Timestamp, + span: Span, + kind: RPCWorkerRequestKind, +} + +impl RPCProcessor { + pub(super) fn startup_rpc_workers(&self) -> EyreResult<()> { + let mut inner = self.inner.lock(); + + // Relay workers + let channel = flume::bounded(self.queue_size as usize); + inner.rpc_send_channel = Some(channel.0.clone()); + inner.rpc_stop_source = Some(StopSource::new()); + + // spin up N workers + veilid_log!(self debug "Starting {} RPC workers", self.concurrency); + for task_n in 0..self.concurrency { + let registry = self.registry(); + let receiver = channel.1.clone(); + let stop_token = inner.rpc_stop_source.as_ref().unwrap().token(); + let jh = spawn(&format!("relay worker {}", task_n), async move { + let this = registry.rpc_processor(); + Box::pin(this.rpc_worker(stop_token, receiver)).await + }); + inner.rpc_worker_join_handles.push(jh); + } + Ok(()) + } + + pub(super) async fn shutdown_rpc_workers(&self) { + // Stop the rpc workers + let mut unord = FuturesUnordered::new(); + { + let mut inner = self.inner.lock(); + // take the join handles out + for h in inner.rpc_worker_join_handles.drain(..) { + unord.push(h); + } + // drop the stop + drop(inner.rpc_stop_source.take()); + } + veilid_log!(self debug "Stopping {} RPC workers", unord.len()); + + // Wait for them to complete + while unord.next().await.is_some() {} + } + + async fn rpc_worker(&self, stop_token: StopToken, receiver: flume::Receiver) { + while let Ok(Ok(request)) = receiver.recv_async().timeout_at(stop_token.clone()).await { + let rpc_request_span = tracing::trace_span!("rpc request"); + rpc_request_span.follows_from(request.span); + + // Measure dequeue time + let dequeue_ts = Timestamp::now(); + let dequeue_latency = dequeue_ts.saturating_sub(request.enqueued_ts); + + // Process request kind + match request.kind { + // Process RPC Message + RPCWorkerRequestKind::Message { message_encoded } => { + network_result_value_or_log!(self match self + .process_rpc_message(message_encoded).instrument(rpc_request_span) + .await + { + Err(e) => { + veilid_log!(self error "couldn't process rpc message: {}", e); + continue; + } + Ok(v) => { + v + } + } => [ format!(": msg.header={:?}", message_encoded.header) ] {}); + } + } + + // Measure process time + let process_ts = Timestamp::now(); + let process_latency = process_ts.saturating_sub(dequeue_ts); + + // Accounting + let mut inner = self.inner.lock(); + inner.rpc_worker_dequeue_latency = inner + .rpc_worker_dequeue_latency_accounting + .record_latency(dequeue_latency); + inner.rpc_worker_process_latency = inner + .rpc_worker_process_latency_accounting + .record_latency(process_latency); + } + } + + #[instrument(level = "trace", target = "rpc", skip_all)] + pub fn enqueue_direct_message( + &self, + envelope: Envelope, + sender_noderef: FilteredNodeRef, + flow: Flow, + routing_domain: RoutingDomain, + body: Vec, + ) -> EyreResult<()> { + let _guard = self + .startup_context + .startup_lock + .enter() + .wrap_err("not started up")?; + + if sender_noderef.routing_domain_set() != routing_domain { + bail!("routing domain should match peer noderef filter"); + } + + let header = MessageHeader { + detail: RPCMessageHeaderDetail::Direct(RPCMessageHeaderDetailDirect { + envelope, + sender_noderef, + flow, + routing_domain, + }), + timestamp: Timestamp::now(), + body_len: ByteCount::new(body.len() as u64), + }; + + let message_encoded = MessageEncoded { + header, + data: MessageData { contents: body }, + }; + + let send_channel = { + let inner = self.inner.lock(); + let Some(send_channel) = inner.rpc_send_channel.as_ref().cloned() else { + bail!("send channel is closed"); + }; + send_channel + }; + send_channel + .try_send(RPCWorkerRequest { + enqueued_ts: Timestamp::now(), + span: Span::current(), + kind: RPCWorkerRequestKind::Message { message_encoded }, + }) + .map_err(|e| eyre!("failed to enqueue direct RPC message: {}", e))?; + Ok(()) + } + + #[instrument(level = "trace", target = "rpc", skip_all)] + pub(super) fn enqueue_safety_routed_message( + &self, + direct: RPCMessageHeaderDetailDirect, + remote_safety_route: PublicKey, + sequencing: Sequencing, + body: Vec, + ) -> EyreResult<()> { + let _guard = self + .startup_context + .startup_lock + .enter() + .wrap_err("not started up")?; + + let header = MessageHeader { + detail: RPCMessageHeaderDetail::SafetyRouted(RPCMessageHeaderDetailSafetyRouted { + direct, + remote_safety_route, + sequencing, + }), + timestamp: Timestamp::now(), + body_len: (body.len() as u64).into(), + }; + + let message_encoded = MessageEncoded { + header, + data: MessageData { contents: body }, + }; + let send_channel = { + let inner = self.inner.lock(); + let Some(send_channel) = inner.rpc_send_channel.as_ref().cloned() else { + bail!("send channel is closed"); + }; + send_channel + }; + send_channel + .try_send(RPCWorkerRequest { + enqueued_ts: Timestamp::now(), + span: Span::current(), + kind: RPCWorkerRequestKind::Message { message_encoded }, + }) + .map_err(|e| eyre!("failed to enqueue safety routed RPC message: {}", e))?; + Ok(()) + } + + #[instrument(level = "trace", target = "rpc", skip_all)] + pub(super) fn enqueue_private_routed_message( + &self, + direct: RPCMessageHeaderDetailDirect, + remote_safety_route: PublicKey, + private_route: PublicKey, + safety_spec: SafetySpec, + body: Vec, + ) -> EyreResult<()> { + let _guard = self + .startup_context + .startup_lock + .enter() + .wrap_err("not started up")?; + + let header = MessageHeader { + detail: RPCMessageHeaderDetail::PrivateRouted(RPCMessageHeaderDetailPrivateRouted { + direct, + remote_safety_route, + private_route, + safety_spec, + }), + timestamp: Timestamp::now(), + body_len: (body.len() as u64).into(), + }; + + let message_encoded = MessageEncoded { + header, + data: MessageData { contents: body }, + }; + + let send_channel = { + let inner = self.inner.lock(); + let Some(send_channel) = inner.rpc_send_channel.as_ref().cloned() else { + bail!("send channel is closed"); + }; + send_channel + }; + send_channel + .try_send(RPCWorkerRequest { + enqueued_ts: Timestamp::now(), + span: Span::current(), + kind: RPCWorkerRequestKind::Message { message_encoded }, + }) + .map_err(|e| eyre!("failed to enqueue private routed RPC message: {}", e))?; + Ok(()) + } +} diff --git a/veilid-core/src/stats_accounting.rs b/veilid-core/src/stats_accounting.rs new file mode 100644 index 00000000..acbc0d20 --- /dev/null +++ b/veilid-core/src/stats_accounting.rs @@ -0,0 +1,153 @@ +use super::*; + +// Latency entry is per round-trip packet (ping or data) +// - Size is number of entries +const ROLLING_LATENCIES_SIZE: usize = 50; + +// Transfers entries are in bytes total for the interval +// - Size is number of entries +// - Interval is number of seconds in each entry +const ROLLING_TRANSFERS_SIZE: usize = 10; +pub const ROLLING_TRANSFERS_INTERVAL_SECS: u32 = 1; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] +pub struct TransferCount { + down: ByteCount, + up: ByteCount, +} + +#[derive(Debug, Clone, Default)] +pub struct TransferStatsAccounting { + rolling_transfers: VecDeque, + current_transfer: TransferCount, +} + +impl TransferStatsAccounting { + pub fn new() -> Self { + Self { + rolling_transfers: VecDeque::new(), + current_transfer: TransferCount::default(), + } + } + + pub fn add_down(&mut self, bytes: ByteCount) { + self.current_transfer.down += bytes; + } + + pub fn add_up(&mut self, bytes: ByteCount) { + self.current_transfer.up += bytes; + } + + pub fn roll_transfers( + &mut self, + last_ts: Timestamp, + cur_ts: Timestamp, + transfer_stats: &mut TransferStatsDownUp, + ) { + let dur_ms = cur_ts.saturating_sub(last_ts) / 1000u64; + while self.rolling_transfers.len() >= ROLLING_TRANSFERS_SIZE { + self.rolling_transfers.pop_front(); + } + self.rolling_transfers.push_back(self.current_transfer); + + transfer_stats.down.total += self.current_transfer.down; + transfer_stats.up.total += self.current_transfer.up; + + self.current_transfer = TransferCount::default(); + + transfer_stats.down.maximum = 0.into(); + transfer_stats.up.maximum = 0.into(); + transfer_stats.down.minimum = u64::MAX.into(); + transfer_stats.up.minimum = u64::MAX.into(); + transfer_stats.down.average = 0.into(); + transfer_stats.up.average = 0.into(); + for xfer in &self.rolling_transfers { + let bpsd = xfer.down * 1000u64 / dur_ms; + let bpsu = xfer.up * 1000u64 / dur_ms; + transfer_stats.down.maximum.max_assign(bpsd); + transfer_stats.up.maximum.max_assign(bpsu); + transfer_stats.down.minimum.min_assign(bpsd); + transfer_stats.up.minimum.min_assign(bpsu); + transfer_stats.down.average += bpsd; + transfer_stats.up.average += bpsu; + } + let len = self.rolling_transfers.len() as u64; + if len > 0 { + transfer_stats.down.average /= len; + transfer_stats.up.average /= len; + } + } +} + +#[derive(Debug, Clone, Default)] +pub struct LatencyStatsAccounting { + rolling_latencies: VecDeque, +} + +impl LatencyStatsAccounting { + pub fn new() -> Self { + Self { + rolling_latencies: VecDeque::new(), + } + } + + fn get_tm_n(sorted_latencies: &[TimestampDuration], n: usize) -> Option { + let tmcount = sorted_latencies.len() * n / 100; + if tmcount == 0 { + None + } else { + let mut tm = TimestampDuration::new(0); + for l in &sorted_latencies[..tmcount] { + tm += *l; + } + tm /= tmcount as u64; + Some(tm) + } + } + + fn get_p_n(sorted_latencies: &[TimestampDuration], n: usize) -> TimestampDuration { + let pindex = (sorted_latencies.len() * n / 100).saturating_sub(1); + sorted_latencies[pindex] + } + + pub fn record_latency(&mut self, latency: TimestampDuration) -> LatencyStats { + while self.rolling_latencies.len() >= ROLLING_LATENCIES_SIZE { + self.rolling_latencies.pop_front(); + } + self.rolling_latencies.push_back(latency); + + // Calculate latency stats + + let mut fastest = TimestampDuration::new(u64::MAX); + let mut slowest = TimestampDuration::new(0u64); + let mut average = TimestampDuration::new(0u64); + + for rl in &self.rolling_latencies { + fastest.min_assign(*rl); + slowest.max_assign(*rl); + average += *rl; + } + let len = self.rolling_latencies.len() as u64; + if len > 0 { + average /= len; + } + + let mut sorted_latencies: Vec<_> = self.rolling_latencies.iter().copied().collect(); + sorted_latencies.sort(); + + let tm90 = Self::get_tm_n(&sorted_latencies, 90).unwrap_or(average); + let tm75 = Self::get_tm_n(&sorted_latencies, 75).unwrap_or(average); + let p90 = Self::get_p_n(&sorted_latencies, 90); + let p75 = Self::get_p_n(&sorted_latencies, 75); + + LatencyStats { + fastest, + average, + slowest, + tm90, + tm75, + p90, + p75, + } + } +} diff --git a/veilid-core/src/veilid_api/debug.rs b/veilid-core/src/veilid_api/debug.rs index ab7a4da0..34f58d55 100644 --- a/veilid-core/src/veilid_api/debug.rs +++ b/veilid-core/src/veilid_api/debug.rs @@ -761,7 +761,9 @@ impl VeilidAPI { async fn debug_nodeinfo(&self, _args: String) -> VeilidAPIResult { // Dump routing table entry let registry = self.core_context()?.registry(); - let nodeinfo = registry.routing_table().debug_info_nodeinfo(); + let nodeinfo_rtab = registry.routing_table().debug_info_nodeinfo(); + let nodeinfo_net = registry.network_manager().debug_info_nodeinfo(); + let nodeinfo_rpc = registry.rpc_processor().debug_info_nodeinfo(); // Dump core state let state = self.get_state().await?; @@ -790,7 +792,10 @@ impl VeilidAPI { "Connection manager unavailable when detached".to_owned() }; - Ok(format!("{}\n{}\n{}\n", nodeinfo, peertable, connman)) + Ok(format!( + "{}\n{}\n{}\n{}\n{}\n", + nodeinfo_rtab, nodeinfo_net, nodeinfo_rpc, peertable, connman + )) } fn debug_nodeid(&self, _args: String) -> VeilidAPIResult { diff --git a/veilid-flutter/rust/Cargo.toml b/veilid-flutter/rust/Cargo.toml index b98347de..ec297ff8 100644 --- a/veilid-flutter/rust/Cargo.toml +++ b/veilid-flutter/rust/Cargo.toml @@ -36,7 +36,7 @@ debug-load = ["dep:ctor", "dep:libc-print", "dep:android_log-sys", "dep:oslog"] [dependencies] veilid-core = { path = "../../veilid-core", default-features = false } tracing = { version = "0.1.40", features = ["log", "attributes"] } -tracing-subscriber = "0.3.18" +tracing-subscriber = "0.3.19" parking_lot = "0.12.3" backtrace = "0.3.71" serde_json = "1.0.120" diff --git a/veilid-tools/src/timestamp.rs b/veilid-tools/src/timestamp.rs index 81b00b20..7fc8ca08 100644 --- a/veilid-tools/src/timestamp.rs +++ b/veilid-tools/src/timestamp.rs @@ -125,27 +125,33 @@ pub fn display_duration(dur: u64) -> String { let secs = dur / SEC; let dur = dur % SEC; let msecs = dur / MSEC; + let dur = dur % MSEC; - format!( - "{}{}{}{}.{:03}s", - if days != 0 { - format!("{}d", days) - } else { - "".to_owned() - }, - if hours != 0 { - format!("{}h", hours) - } else { - "".to_owned() - }, - if mins != 0 { - format!("{}m", mins) - } else { - "".to_owned() - }, - secs, - msecs - ) + // microseconds format + if days == 0 && hours == 0 && mins == 0 && secs == 0 { + format!("{}.{:03}ms", msecs, dur) + } else { + format!( + "{}{}{}{}.{:03}s", + if days != 0 { + format!("{}d", days) + } else { + "".to_owned() + }, + if hours != 0 { + format!("{}h", hours) + } else { + "".to_owned() + }, + if mins != 0 { + format!("{}m", mins) + } else { + "".to_owned() + }, + secs, + msecs + ) + } } #[must_use] From 60f5e792c706fe59e1417fc9313c897f3c3a578e Mon Sep 17 00:00:00 2001 From: Christien Rioux Date: Sun, 2 Mar 2025 12:09:20 -0500 Subject: [PATCH 05/17] [ci skip] typo --- veilid-core/src/network_manager/debug.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/veilid-core/src/network_manager/debug.rs b/veilid-core/src/network_manager/debug.rs index c06a92be..2d4e10ba 100644 --- a/veilid-core/src/network_manager/debug.rs +++ b/veilid-core/src/network_manager/debug.rs @@ -5,7 +5,7 @@ impl NetworkManager { let mut out = String::new(); let inner = self.inner.lock(); out += &format!( - "Relay Worker Deque Latency:\n{}", + "Relay Worker Dequeue Latency:\n{}", indent_all_string(&inner.stats.relay_worker_dequeue_latency) ); out += "\n"; From 9a3cab071afbfee8ad33ba6711b8f03a1a9e3bdb Mon Sep 17 00:00:00 2001 From: Christien Rioux Date: Mon, 3 Mar 2025 16:06:56 -0500 Subject: [PATCH 06/17] [ci skip] debugging --- .../src/network_manager/connection_manager.rs | 18 +++- veilid-core/src/network_manager/native/mod.rs | 31 +++++-- .../src/network_manager/native/protocol/ws.rs | 50 ++++------- veilid-core/src/network_manager/send_data.rs | 88 +++++++++++++++++-- .../src/network_manager/wasm/protocol/ws.rs | 10 ++- veilid-tools/Cargo.toml | 1 + veilid-tools/src/tools.rs | 26 +++++- 7 files changed, 165 insertions(+), 59 deletions(-) diff --git a/veilid-core/src/network_manager/connection_manager.rs b/veilid-core/src/network_manager/connection_manager.rs index 71eef5ab..304eaaf8 100644 --- a/veilid-core/src/network_manager/connection_manager.rs +++ b/veilid-core/src/network_manager/connection_manager.rs @@ -8,7 +8,7 @@ impl_veilid_log_facility!("net"); const PROTECTED_CONNECTION_DROP_SPAN: TimestampDuration = TimestampDuration::new_secs(10); const PROTECTED_CONNECTION_DROP_COUNT: usize = 3; -const NEW_CONNECTION_RETRY_COUNT: usize = 1; +const NEW_CONNECTION_RETRY_COUNT: usize = 0; const NEW_CONNECTION_RETRY_DELAY_MS: u32 = 500; /////////////////////////////////////////////////////////// @@ -415,7 +415,18 @@ impl ConnectionManager { let best_port = preferred_local_address.map(|pla| pla.port()); // Async lock on the remote address for atomicity per remote - let _lock_guard = self.arc.address_lock_table.lock_tag(remote_addr).await; + // Use the initial timeout here as the 'close' timeout as well + let Ok(_lock_guard) = timeout( + self.arc.connection_initial_timeout_ms, + self.arc.address_lock_table.lock_tag(remote_addr), + ) + .await + else { + veilid_log!(self debug "== get_or_create_connection: connection busy, not connecting to dial_info={:?}", dial_info); + return Ok(NetworkResult::no_connection_other( + "connection endpoint busy", + )); + }; veilid_log!(self trace "== get_or_create_connection dial_info={:?}", dial_info); @@ -477,8 +488,9 @@ impl ConnectionManager { veilid_log!(self debug "get_or_create_connection retries left: {}", retry_count); retry_count -= 1; + // XXX: This should not be necessary // Release the preferred local address if things can't connect due to a low-level collision we dont have a record of - preferred_local_address = None; + // preferred_local_address = None; sleep(NEW_CONNECTION_RETRY_DELAY_MS).await; }); diff --git a/veilid-core/src/network_manager/native/mod.rs b/veilid-core/src/network_manager/native/mod.rs index c3de6093..13ed2a9c 100644 --- a/veilid-core/src/network_manager/native/mod.rs +++ b/veilid-core/src/network_manager/native/mod.rs @@ -636,6 +636,10 @@ impl Network { ) -> EyreResult> { let _guard = self.startup_lock.enter()?; + let connection_initial_timeout_us = self + .config() + .with(|c| c.network.connection_initial_timeout_ms as u64 * 1000); + self.record_dial_info_failure( dial_info.clone(), async move { @@ -652,24 +656,33 @@ impl Network { )); } }; - let flow = network_result_try!(ph - .send_message(data, peer_socket_addr) - .await - .wrap_err("failed to send data to dial info")?); + let flow = network_result_try!(debug_duration( + || { ph.send_message(data, peer_socket_addr) }, + Some(connection_initial_timeout_us * 2) + ) + .await + .wrap_err("failed to send data to dial info")?); unique_flow = UniqueFlow { flow, connection_id: None, }; } else { // Handle connection-oriented protocols + let connmgr = self.network_manager().connection_manager(); let conn = network_result_try!( - self.network_manager() - .connection_manager() - .get_or_create_connection(dial_info.clone()) - .await? + debug_duration( + || { connmgr.get_or_create_connection(dial_info.clone()) }, + Some(connection_initial_timeout_us * 2) + ) + .await? ); - if let ConnectionHandleSendResult::NotSent(_) = conn.send_async(data).await { + if let ConnectionHandleSendResult::NotSent(_) = debug_duration( + || conn.send_async(data), + Some(connection_initial_timeout_us * 2), + ) + .await + { return Ok(NetworkResult::NoConnection(io::Error::new( io::ErrorKind::ConnectionReset, "failed to send", diff --git a/veilid-core/src/network_manager/native/protocol/ws.rs b/veilid-core/src/network_manager/native/protocol/ws.rs index 7253d45a..b3c2c58f 100644 --- a/veilid-core/src/network_manager/native/protocol/ws.rs +++ b/veilid-core/src/network_manager/native/protocol/ws.rs @@ -6,7 +6,7 @@ use async_tungstenite::tungstenite::handshake::server::{ Callback, ErrorResponse, Request, Response, }; use async_tungstenite::tungstenite::http::StatusCode; -use async_tungstenite::tungstenite::protocol::{frame::coding::CloseCode, CloseFrame, Message}; +use async_tungstenite::tungstenite::protocol::Message; use async_tungstenite::tungstenite::Error; use async_tungstenite::{accept_hdr_async, client_async, WebSocketStream}; use futures_util::{AsyncRead, AsyncWrite, SinkExt}; @@ -98,45 +98,27 @@ where #[instrument(level = "trace", target = "protocol", err, skip_all)] pub async fn close(&self) -> io::Result> { + let timeout_ms = self + .registry + .config() + .with(|c| c.network.connection_initial_timeout_ms); + // Make an attempt to close the stream normally let mut stream = self.stream.clone(); - let out = match stream - .send(Message::Close(Some(CloseFrame { - code: CloseCode::Normal, - reason: "".into(), - }))) - .await - { - Ok(v) => NetworkResult::value(v), - Err(e) => err_to_network_result(e), - }; // This close does not do a TCP shutdown so it is safe and will not cause TIME_WAIT - let _ = stream.close().await; - - Ok(out) - - // Drive connection to close - /* - let cur_ts = get_timestamp(); - loop { - match stream.flush().await { - Ok(()) => {} - Err(Error::Io(ioerr)) => { - break Err(ioerr).into_network_result(); - } - Err(Error::ConnectionClosed) => { - break Ok(NetworkResult::value(())); - } - Err(e) => { - break Err(to_io_error_other(e)); - } + match timeout(timeout_ms, stream.close()).await { + Ok(Ok(())) => {} + Ok(Err(e)) => { + return Ok(err_to_network_result(e)); } - if get_timestamp().saturating_sub(cur_ts) >= MAX_CONNECTION_CLOSE_WAIT_US { - return Ok(NetworkResult::Timeout); + Err(_) => { + // Timed out + return Ok(NetworkResult::timeout()); } - } - */ + }; + + Ok(NetworkResult::value(())) } #[instrument(level = "trace", target="protocol", err, skip(self, message), fields(network_result, message.len = message.len()))] diff --git a/veilid-core/src/network_manager/send_data.rs b/veilid-core/src/network_manager/send_data.rs index 51d179b6..b3adddc5 100644 --- a/veilid-core/src/network_manager/send_data.rs +++ b/veilid-core/src/network_manager/send_data.rs @@ -152,7 +152,7 @@ impl NetworkManager { // If a node is unreachable it may still have an existing inbound connection // Try that, but don't cache anything network_result_try!( - pin_future_closure!(self.send_data_ncm_existing(target_node_ref, data)).await? + pin_future_closure!(self.send_data_unreachable(target_node_ref, data)).await? ) } Some(NodeContactMethod { @@ -239,6 +239,42 @@ impl NetworkManager { })) } + /// Send data to unreachable node + #[instrument(level = "trace", target = "net", skip_all, err)] + async fn send_data_unreachable( + &self, + target_node_ref: FilteredNodeRef, + data: Vec, + ) -> EyreResult> { + // First try to send data to the last connection we've seen this peer on + let Some(flow) = target_node_ref.last_flow() else { + return Ok(NetworkResult::no_connection_other(format!( + "node was unreachable: {}", + target_node_ref + ))); + }; + + let net = self.net(); + let unique_flow = match pin_future!(debug_duration( + || { net.send_data_to_existing_flow(flow, data) }, + Some(1_000_000) + )) + .await? + { + SendDataToExistingFlowResult::Sent(unique_flow) => unique_flow, + SendDataToExistingFlowResult::NotSent(_) => { + return Ok(NetworkResult::no_connection_other( + "failed to send to existing flow", + )); + } + }; + + // Update timestamp for this last connection since we just sent to it + self.set_last_flow(target_node_ref.unfiltered(), flow, Timestamp::now()); + + Ok(NetworkResult::value(unique_flow)) + } + /// Send data using NodeContactMethod::Existing #[instrument(level = "trace", target = "net", skip_all, err)] async fn send_data_ncm_existing( @@ -255,7 +291,12 @@ impl NetworkManager { }; let net = self.net(); - let unique_flow = match pin_future!(net.send_data_to_existing_flow(flow, data)).await? { + let unique_flow = match pin_future!(debug_duration( + || { net.send_data_to_existing_flow(flow, data) }, + Some(1_000_000) + )) + .await? + { SendDataToExistingFlowResult::Sent(unique_flow) => unique_flow, SendDataToExistingFlowResult::NotSent(_) => { return Ok(NetworkResult::no_connection_other( @@ -297,7 +338,12 @@ impl NetworkManager { // First try to send data to the last flow we've seen this peer on let data = if let Some(flow) = seq_target_node_ref.last_flow() { let net = self.net(); - match pin_future!(net.send_data_to_existing_flow(flow, data)).await? { + match pin_future!(debug_duration( + || { net.send_data_to_existing_flow(flow, data) }, + Some(1_000_000) + )) + .await? + { SendDataToExistingFlowResult::Sent(unique_flow) => { // Update timestamp for this last connection since we just sent to it self.set_last_flow(target_node_ref.unfiltered(), flow, Timestamp::now()); @@ -321,9 +367,16 @@ impl NetworkManager { data }; + let connection_initial_timeout_us = self + .config() + .with(|c| c.network.connection_initial_timeout_ms as u64 * 1000); + let unique_flow = network_result_try!( - pin_future!(self.do_reverse_connect(relay_nr.clone(), target_node_ref.clone(), data)) - .await? + pin_future!(debug_duration( + || { self.do_reverse_connect(relay_nr.clone(), target_node_ref.clone(), data) }, + Some(connection_initial_timeout_us * 2) + )) + .await? ); Ok(NetworkResult::value(unique_flow)) } @@ -339,7 +392,12 @@ impl NetworkManager { // First try to send data to the last flow we've seen this peer on let data = if let Some(flow) = target_node_ref.last_flow() { let net = self.net(); - match pin_future!(net.send_data_to_existing_flow(flow, data)).await? { + match pin_future!(debug_duration( + || { net.send_data_to_existing_flow(flow, data) }, + Some(1_000_000) + )) + .await? + { SendDataToExistingFlowResult::Sent(unique_flow) => { // Update timestamp for this last connection since we just sent to it self.set_last_flow(target_node_ref.unfiltered(), flow, Timestamp::now()); @@ -363,9 +421,16 @@ impl NetworkManager { data }; + let hole_punch_receipt_time_us = self + .config() + .with(|c| c.network.hole_punch_receipt_time_ms as u64 * 1000); + let unique_flow = network_result_try!( - pin_future!(self.do_hole_punch(relay_nr.clone(), target_node_ref.clone(), data)) - .await? + pin_future!(debug_duration( + || { self.do_hole_punch(relay_nr.clone(), target_node_ref.clone(), data) }, + Some(hole_punch_receipt_time_us * 2) + )) + .await? ); Ok(NetworkResult::value(unique_flow)) @@ -391,7 +456,12 @@ impl NetworkManager { ); let net = self.net(); - match pin_future!(net.send_data_to_existing_flow(flow, data)).await? { + match pin_future!(debug_duration( + || { net.send_data_to_existing_flow(flow, data) }, + Some(1_000_000) + )) + .await? + { SendDataToExistingFlowResult::Sent(unique_flow) => { // Update timestamp for this last connection since we just sent to it self.set_last_flow(node_ref.unfiltered(), flow, Timestamp::now()); diff --git a/veilid-core/src/network_manager/wasm/protocol/ws.rs b/veilid-core/src/network_manager/wasm/protocol/ws.rs index 1af70872..28d22e2b 100644 --- a/veilid-core/src/network_manager/wasm/protocol/ws.rs +++ b/veilid-core/src/network_manager/wasm/protocol/ws.rs @@ -52,8 +52,16 @@ impl WebsocketNetworkConnection { instrument(level = "trace", err, skip(self)) )] pub async fn close(&self) -> io::Result> { + let timeout_ms = self + .registry + .config() + .with(|c| c.network.connection_initial_timeout_ms); + #[allow(unused_variables)] - let x = self.inner.ws_meta.close().await.map_err(ws_err_to_io_error); + let x = match timeout(timeout_ms, self.inner.ws_meta.close()).await { + Ok(v) => v.map_err(ws_err_to_io_error), + Err(_) => return Ok(NetworkResult::timeout()), + }; #[cfg(feature = "verbose-tracing")] veilid_log!(self debug "close result: {:?}", x); Ok(NetworkResult::value(())) diff --git a/veilid-tools/Cargo.toml b/veilid-tools/Cargo.toml index 22d6283b..1da65745 100644 --- a/veilid-tools/Cargo.toml +++ b/veilid-tools/Cargo.toml @@ -47,6 +47,7 @@ veilid_tools_android_tests = ["dep:paranoid-android"] veilid_tools_ios_tests = ["dep:tracing", "dep:oslog", "dep:tracing-oslog"] tracing = ["dep:tracing", "dep:tracing-subscriber", "tokio/tracing"] debug-locks = [] +debug-duration-timeout = [] virtual-network = [] virtual-network-server = [ diff --git a/veilid-tools/src/tools.rs b/veilid-tools/src/tools.rs index eec24484..05289a3d 100644 --- a/veilid-tools/src/tools.rs +++ b/veilid-tools/src/tools.rs @@ -522,13 +522,33 @@ pub fn is_debug_backtrace_enabled() -> bool { } #[track_caller] -pub fn debug_duration, T: FnOnce() -> F>(f: T) -> impl Future { - let location = std::panic::Location::caller(); +pub fn debug_duration, T: FnOnce() -> F>( + f: T, + opt_timeout_us: Option, +) -> impl Future { + let location = core::panic::Location::caller(); async move { let t1 = get_timestamp(); let out = f().await; let t2 = get_timestamp(); - debug!("duration@{}: {}", location, display_duration(t2 - t1)); + let duration_us = t2 - t1; + if let Some(timeout_us) = opt_timeout_us { + if duration_us > timeout_us { + #[cfg(not(feature = "debug-duration-timeout"))] + debug!( + "Excessive duration: {}\n{:?}", + display_duration(duration_us), + backtrace::Backtrace::new() + ); + #[cfg(feature = "debug-duration-timeout")] + panic!(format!( + "Duration panic timeout exceeded: {}", + display_duration(duration_us) + )); + } + } else { + debug!("Duration: {} = {}", location, display_duration(duration_us),); + } out } } From 500547cfa81d33254e0d6b4d3a86164df055b6bc Mon Sep 17 00:00:00 2001 From: Christien Rioux Date: Mon, 3 Mar 2025 17:03:54 -0500 Subject: [PATCH 07/17] [ci skip] debugging --- .../node_ref/filtered_node_ref.rs | 18 ++++++++++++++++++ veilid-core/src/routing_table/node_ref/mod.rs | 18 ++++++++++++++++++ .../routing_table/node_ref/node_ref_lock.rs | 15 +++++++++++++++ .../node_ref/node_ref_lock_mut.rs | 16 ++++++++++++++++ .../src/routing_table/node_ref/traits.rs | 17 +++++++++++++++-- 5 files changed, 82 insertions(+), 2 deletions(-) diff --git a/veilid-core/src/routing_table/node_ref/filtered_node_ref.rs b/veilid-core/src/routing_table/node_ref/filtered_node_ref.rs index e22b039b..16faf44d 100644 --- a/veilid-core/src/routing_table/node_ref/filtered_node_ref.rs +++ b/veilid-core/src/routing_table/node_ref/filtered_node_ref.rs @@ -119,6 +119,24 @@ impl NodeRefOperateTrait for FilteredNodeRef { let inner = &mut *routing_table.inner.write(); self.entry.with_mut(inner, f) } + + fn with_inner(&self, f: F) -> T + where + F: FnOnce(&RoutingTableInner) -> T, + { + let routing_table = self.registry.routing_table(); + let inner = &*routing_table.inner.read(); + f(inner) + } + + fn with_inner_mut(&self, f: F) -> T + where + F: FnOnce(&mut RoutingTableInner) -> T, + { + let routing_table = self.registry.routing_table(); + let inner = &mut *routing_table.inner.write(); + f(inner) + } } impl NodeRefCommonTrait for FilteredNodeRef {} diff --git a/veilid-core/src/routing_table/node_ref/mod.rs b/veilid-core/src/routing_table/node_ref/mod.rs index 4a997892..7a3a41cc 100644 --- a/veilid-core/src/routing_table/node_ref/mod.rs +++ b/veilid-core/src/routing_table/node_ref/mod.rs @@ -139,6 +139,24 @@ impl NodeRefOperateTrait for NodeRef { let inner = &mut *routing_table.inner.write(); self.entry.with_mut(inner, f) } + + fn with_inner(&self, f: F) -> T + where + F: FnOnce(&RoutingTableInner) -> T, + { + let routing_table = self.routing_table(); + let inner = &*routing_table.inner.read(); + f(inner) + } + + fn with_inner_mut(&self, f: F) -> T + where + F: FnOnce(&mut RoutingTableInner) -> T, + { + let routing_table = self.routing_table(); + let inner = &mut *routing_table.inner.write(); + f(inner) + } } impl NodeRefCommonTrait for NodeRef {} diff --git a/veilid-core/src/routing_table/node_ref/node_ref_lock.rs b/veilid-core/src/routing_table/node_ref/node_ref_lock.rs index b8e33f38..d07a8abf 100644 --- a/veilid-core/src/routing_table/node_ref/node_ref_lock.rs +++ b/veilid-core/src/routing_table/node_ref/node_ref_lock.rs @@ -90,6 +90,21 @@ impl<'a, N: NodeRefAccessorsTrait + NodeRefOperateTrait + fmt::Debug + fmt::Disp { panic!("need to locked_mut() for this operation") } + + fn with_inner(&self, f: F) -> T + where + F: FnOnce(&RoutingTableInner) -> T, + { + let inner = &*self.inner.lock(); + f(inner) + } + + fn with_inner_mut(&self, _f: F) -> T + where + F: FnOnce(&mut RoutingTableInner) -> T, + { + panic!("need to locked_mut() for this operation") + } } impl<'a, N: NodeRefAccessorsTrait + NodeRefOperateTrait + fmt::Debug + fmt::Display + Clone> diff --git a/veilid-core/src/routing_table/node_ref/node_ref_lock_mut.rs b/veilid-core/src/routing_table/node_ref/node_ref_lock_mut.rs index c132e85c..8b0acb9b 100644 --- a/veilid-core/src/routing_table/node_ref/node_ref_lock_mut.rs +++ b/veilid-core/src/routing_table/node_ref/node_ref_lock_mut.rs @@ -92,6 +92,22 @@ impl<'a, N: NodeRefAccessorsTrait + NodeRefOperateTrait + fmt::Debug + fmt::Disp let inner = &mut *self.inner.lock(); self.nr.entry().with_mut(inner, f) } + + fn with_inner(&self, f: F) -> T + where + F: FnOnce(&RoutingTableInner) -> T, + { + let inner = &*self.inner.lock(); + f(inner) + } + + fn with_inner_mut(&self, f: F) -> T + where + F: FnOnce(&mut RoutingTableInner) -> T, + { + let inner = &mut *self.inner.lock(); + f(inner) + } } impl<'a, N: NodeRefAccessorsTrait + NodeRefOperateTrait + fmt::Debug + fmt::Display + Clone> diff --git a/veilid-core/src/routing_table/node_ref/traits.rs b/veilid-core/src/routing_table/node_ref/traits.rs index 02649e1e..4c499afd 100644 --- a/veilid-core/src/routing_table/node_ref/traits.rs +++ b/veilid-core/src/routing_table/node_ref/traits.rs @@ -20,6 +20,13 @@ pub(crate) trait NodeRefOperateTrait { fn operate_mut(&self, f: F) -> T where F: FnOnce(&mut RoutingTableInner, &mut BucketEntryInner) -> T; + #[expect(dead_code)] + fn with_inner(&self, f: F) -> T + where + F: FnOnce(&RoutingTableInner) -> T; + fn with_inner_mut(&self, f: F) -> T + where + F: FnOnce(&mut RoutingTableInner) -> T; } // Common Operations @@ -115,7 +122,7 @@ pub(crate) trait NodeRefCommonTrait: NodeRefAccessorsTrait + NodeRefOperateTrait // } fn relay(&self, routing_domain: RoutingDomain) -> EyreResult> { - self.operate_mut(|rti, e| { + let Some(rpi) = self.operate(|rti, e| { let Some(sni) = e.signed_node_info(routing_domain) else { return Ok(None); }; @@ -127,8 +134,14 @@ pub(crate) trait NodeRefCommonTrait: NodeRefAccessorsTrait + NodeRefOperateTrait if rti.routing_table().matches_own_node_id(rpi.node_ids()) { bail!("Can't relay though ourselves"); } + Ok(Some(rpi)) + })? + else { + return Ok(None); + }; - // Register relay node and return noderef + // Register relay node and return noderef + self.with_inner_mut(|rti| { let nr = rti.register_node_with_peer_info(rpi, false)?; Ok(Some(nr)) }) From 909fea721afa0bf22addc01756db3f814e3d6007 Mon Sep 17 00:00:00 2001 From: Christien Rioux Date: Mon, 3 Mar 2025 21:02:20 -0500 Subject: [PATCH 08/17] [ci skip] connection table debugging --- .../src/network_manager/connection_manager.rs | 13 ++++----- veilid-core/src/network_manager/native/mod.rs | 27 +++++-------------- .../src/network_manager/network_connection.rs | 13 ++++----- veilid-tools/src/async_tag_lock.rs | 9 ++++--- 4 files changed, 25 insertions(+), 37 deletions(-) diff --git a/veilid-core/src/network_manager/connection_manager.rs b/veilid-core/src/network_manager/connection_manager.rs index 304eaaf8..8cb1bdb7 100644 --- a/veilid-core/src/network_manager/connection_manager.rs +++ b/veilid-core/src/network_manager/connection_manager.rs @@ -415,7 +415,8 @@ impl ConnectionManager { let best_port = preferred_local_address.map(|pla| pla.port()); // Async lock on the remote address for atomicity per remote - // Use the initial timeout here as the 'close' timeout as well + // Use the initial connection timeout here because multiple calls to get_or_create_connection + // can be performed simultaneously and we want to wait for the first one to succeed or not let Ok(_lock_guard) = timeout( self.arc.connection_initial_timeout_ms, self.arc.address_lock_table.lock_tag(remote_addr), @@ -461,6 +462,7 @@ impl ConnectionManager { let network_manager = self.network_manager(); let prot_conn = network_result_try!(loop { + veilid_log!(self debug "get_or_create_connection connect({}) {:?} -> {}", retry_count, preferred_local_address, dial_info); let result_net_res = ProtocolNetworkConnection::connect( self.registry(), preferred_local_address, @@ -485,11 +487,10 @@ impl ConnectionManager { } } }; - veilid_log!(self debug "get_or_create_connection retries left: {}", retry_count); retry_count -= 1; - // XXX: This should not be necessary - // Release the preferred local address if things can't connect due to a low-level collision we dont have a record of + // // XXX: This should not be necessary + // // Release the preferred local address if things can't connect due to a low-level collision we dont have a record of // preferred_local_address = None; sleep(NEW_CONNECTION_RETRY_DELAY_MS).await; }); @@ -610,7 +611,7 @@ impl ConnectionManager { // Callback from network connection receive loop when it exits // cleans up the entry in the connection table - pub(super) async fn report_connection_finished(&self, connection_id: NetworkConnectionId) { + pub(super) fn report_connection_finished(&self, connection_id: NetworkConnectionId) { // Get channel sender let sender = { let mut inner = self.arc.inner.lock(); @@ -680,7 +681,7 @@ impl ConnectionManager { } } } - let _ = sender.send_async(ConnectionManagerEvent::Dead(conn)).await; + let _ = sender.send(ConnectionManagerEvent::Dead(conn)); } } diff --git a/veilid-core/src/network_manager/native/mod.rs b/veilid-core/src/network_manager/native/mod.rs index 13ed2a9c..2facc059 100644 --- a/veilid-core/src/network_manager/native/mod.rs +++ b/veilid-core/src/network_manager/native/mod.rs @@ -636,10 +636,6 @@ impl Network { ) -> EyreResult> { let _guard = self.startup_lock.enter()?; - let connection_initial_timeout_us = self - .config() - .with(|c| c.network.connection_initial_timeout_ms as u64 * 1000); - self.record_dial_info_failure( dial_info.clone(), async move { @@ -656,12 +652,10 @@ impl Network { )); } }; - let flow = network_result_try!(debug_duration( - || { ph.send_message(data, peer_socket_addr) }, - Some(connection_initial_timeout_us * 2) - ) - .await - .wrap_err("failed to send data to dial info")?); + let flow = network_result_try!(ph + .send_message(data, peer_socket_addr) + .await + .wrap_err("failed to send data to dial info")?); unique_flow = UniqueFlow { flow, connection_id: None, @@ -670,19 +664,10 @@ impl Network { // Handle connection-oriented protocols let connmgr = self.network_manager().connection_manager(); let conn = network_result_try!( - debug_duration( - || { connmgr.get_or_create_connection(dial_info.clone()) }, - Some(connection_initial_timeout_us * 2) - ) - .await? + connmgr.get_or_create_connection(dial_info.clone()).await? ); - if let ConnectionHandleSendResult::NotSent(_) = debug_duration( - || conn.send_async(data), - Some(connection_initial_timeout_us * 2), - ) - .await - { + if let ConnectionHandleSendResult::NotSent(_) = conn.send_async(data).await { return Ok(NetworkResult::NoConnection(io::Error::new( io::ErrorKind::ConnectionReset, "failed to send", diff --git a/veilid-core/src/network_manager/network_connection.rs b/veilid-core/src/network_manager/network_connection.rs index d52a43a2..552571f7 100644 --- a/veilid-core/src/network_manager/network_connection.rs +++ b/veilid-core/src/network_manager/network_connection.rs @@ -480,20 +480,21 @@ impl NetworkConnection { } } - veilid_log!(registry trace - "Connection loop finished flow={:?}", - flow - ); // Let the connection manager know the receive loop exited connection_manager - .report_connection_finished(connection_id) - .await; + .report_connection_finished(connection_id); // Close the low level socket if let Err(e) = protocol_connection.close().await { veilid_log!(registry debug "Protocol connection close error: {}", e); } + + veilid_log!(registry trace + "Connection loop exited flow={:?}", + flow + ); + }.in_current_span()) } diff --git a/veilid-tools/src/async_tag_lock.rs b/veilid-tools/src/async_tag_lock.rs index 0b8306ea..ff892c95 100644 --- a/veilid-tools/src/async_tag_lock.rs +++ b/veilid-tools/src/async_tag_lock.rs @@ -10,7 +10,7 @@ where { table: AsyncTagLockTable, tag: T, - _guard: AsyncMutexGuardArc<()>, + guard: Option>, } impl AsyncTagLockGuard @@ -21,7 +21,7 @@ where Self { table, tag, - _guard: guard, + guard: Some(guard), } } } @@ -45,7 +45,8 @@ where if guards == 0 { inner.table.remove(&self.tag).unwrap(); } - // Proceed with releasing _guard, which may cause some concurrent tag lock to acquire + // Proceed with releasing guard, which may cause some concurrent tag lock to acquire + drop(self.guard.take()); } } @@ -153,7 +154,7 @@ where } std::collections::hash_map::Entry::Vacant(v) => { let mutex = Arc::new(AsyncMutex::new(())); - let guard = asyncmutex_try_lock_arc!(mutex)?; + let guard = asyncmutex_try_lock_arc!(mutex).unwrap(); v.insert(AsyncTagLockTableEntry { mutex, guards: 1 }); guard } From 8f521099bd55c7131d39390b601de907900429fd Mon Sep 17 00:00:00 2001 From: Christien Rioux Date: Tue, 4 Mar 2025 21:56:55 -0500 Subject: [PATCH 09/17] rpc cleanup --- veilid-core/src/logging/facilities.rs | 88 +++++++++++++++++++ .../src/network_manager/connection_manager.rs | 8 +- veilid-core/src/routing_table/bucket_entry.rs | 7 +- veilid-core/src/routing_table/mod.rs | 16 ++-- .../src/routing_table/route_spec_store/mod.rs | 4 +- .../src/routing_table/tasks/bootstrap.rs | 4 +- veilid-core/src/rpc_processor/mod.rs | 29 ++++-- .../src/rpc_processor/operation_waiter.rs | 77 ++++++++-------- veilid-core/src/rpc_processor/rpc_worker.rs | 2 +- veilid-core/src/storage_manager/mod.rs | 2 +- veilid-tools/src/network_result.rs | 82 ----------------- 11 files changed, 174 insertions(+), 145 deletions(-) diff --git a/veilid-core/src/logging/facilities.rs b/veilid-core/src/logging/facilities.rs index c9ed341f..7963175b 100644 --- a/veilid-core/src/logging/facilities.rs +++ b/veilid-core/src/logging/facilities.rs @@ -289,3 +289,91 @@ macro_rules! veilid_log { $($k).+ = $($fields)* )}; } + +#[macro_export] +macro_rules! network_result_value_or_log { + ($self:ident $r:expr => $f:expr) => { + network_result_value_or_log!($self target: self::__VEILID_LOG_FACILITY, $r => [ "" ] $f ) + }; + ($self:ident $r:expr => [ $d:expr ] $f:expr) => { + network_result_value_or_log!($self target: self::__VEILID_LOG_FACILITY, $r => [ $d ] $f ) + }; + ($self:ident target: $target:expr, $r:expr => $f:expr) => { + network_result_value_or_log!($self target: $target, $r => [ "" ] $f ) + }; + ($self:ident target: $target:expr, $r:expr => [ $d:expr ] $f:expr) => { { + let __extra_message = if debug_target_enabled!("network_result") { + $d.to_string() + } else { + "".to_string() + }; + match $r { + NetworkResult::Timeout => { + veilid_log!($self debug target: $target, + "{} at {}@{}:{} in {}{}", + "Timeout", + file!(), + line!(), + column!(), + fn_name::uninstantiated!(), + __extra_message + ); + $f + } + NetworkResult::ServiceUnavailable(ref s) => { + veilid_log!($self debug target: $target, + "{}({}) at {}@{}:{} in {}{}", + "ServiceUnavailable", + s, + file!(), + line!(), + column!(), + fn_name::uninstantiated!(), + __extra_message + ); + $f + } + NetworkResult::NoConnection(ref e) => { + veilid_log!($self debug target: $target, + "{}({}) at {}@{}:{} in {}{}", + "No connection", + e.to_string(), + file!(), + line!(), + column!(), + fn_name::uninstantiated!(), + __extra_message + ); + $f + } + NetworkResult::AlreadyExists(ref e) => { + veilid_log!($self debug target: $target, + "{}({}) at {}@{}:{} in {}{}", + "Already exists", + e.to_string(), + file!(), + line!(), + column!(), + fn_name::uninstantiated!(), + __extra_message + ); + $f + } + NetworkResult::InvalidMessage(ref s) => { + veilid_log!($self debug target: $target, + "{}({}) at {}@{}:{} in {}{}", + "Invalid message", + s, + file!(), + line!(), + column!(), + fn_name::uninstantiated!(), + __extra_message + ); + $f + } + NetworkResult::Value(v) => v, + } + } }; + +} diff --git a/veilid-core/src/network_manager/connection_manager.rs b/veilid-core/src/network_manager/connection_manager.rs index 8cb1bdb7..c63e34be 100644 --- a/veilid-core/src/network_manager/connection_manager.rs +++ b/veilid-core/src/network_manager/connection_manager.rs @@ -461,8 +461,8 @@ impl ConnectionManager { let mut retry_count = NEW_CONNECTION_RETRY_COUNT; let network_manager = self.network_manager(); - let prot_conn = network_result_try!(loop { - veilid_log!(self debug "get_or_create_connection connect({}) {:?} -> {}", retry_count, preferred_local_address, dial_info); + let nres = loop { + veilid_log!(self trace "== get_or_create_connection connect({}) {:?} -> {}", retry_count, preferred_local_address, dial_info); let result_net_res = ProtocolNetworkConnection::connect( self.registry(), preferred_local_address, @@ -493,6 +493,10 @@ impl ConnectionManager { // // Release the preferred local address if things can't connect due to a low-level collision we dont have a record of // preferred_local_address = None; sleep(NEW_CONNECTION_RETRY_DELAY_MS).await; + }; + + let prot_conn = network_result_value_or_log!(self target:"network_result", nres => [ format!("== get_or_create_connection failed {:?} -> {}", preferred_local_address, dial_info) ] { + network_result_raise!(nres); }); // Add to the connection table diff --git a/veilid-core/src/routing_table/bucket_entry.rs b/veilid-core/src/routing_table/bucket_entry.rs index c0f81088..d97ffcca 100644 --- a/veilid-core/src/routing_table/bucket_entry.rs +++ b/veilid-core/src/routing_table/bucket_entry.rs @@ -1011,7 +1011,12 @@ impl BucketEntryInner { match latest_contact_time { None => { - error!("Peer is reliable, but not seen!"); + // Peer may be appear reliable from a previous attach/detach + // But reliability uses last_seen_ts not the last_outbound_contact_time + // Regardless, if we haven't pinged it, we need to ping it. + // But it it was reliable before, and pings successfully then it can + // stay reliable, so we don't make it unreliable just because we haven't + // contacted it yet during this attachment. true } Some(latest_contact_time) => { diff --git a/veilid-core/src/routing_table/mod.rs b/veilid-core/src/routing_table/mod.rs index be252fe0..7fe044bb 100644 --- a/veilid-core/src/routing_table/mod.rs +++ b/veilid-core/src/routing_table/mod.rs @@ -1041,7 +1041,7 @@ impl RoutingTable { #[instrument(level = "trace", skip(self), err)] pub async fn find_nodes_close_to_node_id( &self, - node_ref: NodeRef, + node_ref: FilteredNodeRef, node_id: TypedKey, capabilities: Vec, ) -> EyreResult>> { @@ -1049,11 +1049,7 @@ impl RoutingTable { let res = network_result_try!( rpc_processor - .rpc_call_find_node( - Destination::direct(node_ref.default_filtered()), - node_id, - capabilities - ) + .rpc_call_find_node(Destination::direct(node_ref), node_id, capabilities) .await? ); @@ -1069,7 +1065,7 @@ impl RoutingTable { pub async fn find_nodes_close_to_self( &self, crypto_kind: CryptoKind, - node_ref: NodeRef, + node_ref: FilteredNodeRef, capabilities: Vec, ) -> EyreResult>> { let self_node_id = self.node_id(crypto_kind); @@ -1083,7 +1079,7 @@ impl RoutingTable { pub async fn find_nodes_close_to_node_ref( &self, crypto_kind: CryptoKind, - node_ref: NodeRef, + node_ref: FilteredNodeRef, capabilities: Vec, ) -> EyreResult>> { let Some(target_node_id) = node_ref.node_ids().get(crypto_kind) else { @@ -1104,7 +1100,7 @@ impl RoutingTable { capabilities: Vec, ) { // Ask node for nodes closest to our own node - let closest_nodes = network_result_value_or_log!(self match pin_future!(self.find_nodes_close_to_self(crypto_kind, node_ref.clone(), capabilities.clone())).await { + let closest_nodes = network_result_value_or_log!(self match pin_future!(self.find_nodes_close_to_self(crypto_kind, node_ref.sequencing_filtered(Sequencing::PreferOrdered), capabilities.clone())).await { Err(e) => { veilid_log!(self error "find_self failed for {:?}: {:?}", @@ -1120,7 +1116,7 @@ impl RoutingTable { // Ask each node near us to find us as well if wide { for closest_nr in closest_nodes { - network_result_value_or_log!(self match pin_future!(self.find_nodes_close_to_self(crypto_kind, closest_nr.clone(), capabilities.clone())).await { + network_result_value_or_log!(self match pin_future!(self.find_nodes_close_to_self(crypto_kind, closest_nr.sequencing_filtered(Sequencing::PreferOrdered), capabilities.clone())).await { Err(e) => { veilid_log!(self error "find_self failed for {:?}: {:?}", diff --git a/veilid-core/src/routing_table/route_spec_store/mod.rs b/veilid-core/src/routing_table/route_spec_store/mod.rs index 45623c11..ad3b39b2 100644 --- a/veilid-core/src/routing_table/route_spec_store/mod.rs +++ b/veilid-core/src/routing_table/route_spec_store/mod.rs @@ -716,7 +716,7 @@ impl RouteSpecStore { }; let Some(rsid) = inner.content.get_id_by_key(&public_key.value) else { - veilid_log!(self debug "route id does not exist: {:?}", public_key.value); + veilid_log!(self debug target: "network_result", "route id does not exist: {:?}", public_key.value); return None; }; let Some(rssd) = inner.content.get_detail(&rsid) else { @@ -753,7 +753,7 @@ impl RouteSpecStore { return None; } Err(e) => { - veilid_log!(self debug "errir verifying signature for hop {} at {} on private route {}: {}", hop_n, hop_public_key, public_key, e); + veilid_log!(self debug "error verifying signature for hop {} at {} on private route {}: {}", hop_n, hop_public_key, public_key, e); return None; } } diff --git a/veilid-core/src/routing_table/tasks/bootstrap.rs b/veilid-core/src/routing_table/tasks/bootstrap.rs index 8ab20079..31dc5089 100644 --- a/veilid-core/src/routing_table/tasks/bootstrap.rs +++ b/veilid-core/src/routing_table/tasks/bootstrap.rs @@ -289,7 +289,7 @@ impl RoutingTable { // Get what contact method would be used for contacting the bootstrap let bsdi = match network_manager - .get_node_contact_method(nr.default_filtered()) + .get_node_contact_method(nr.sequencing_filtered(Sequencing::PreferOrdered)) { Ok(Some(ncm)) if ncm.is_direct() => ncm.direct_dial_info().unwrap(), Ok(v) => { @@ -307,7 +307,7 @@ impl RoutingTable { // Need VALID signed peer info, so ask bootstrap to find_node of itself // which will ensure it has the bootstrap's signed peer info as part of the response - let _ = routing_table.find_nodes_close_to_node_ref(crypto_kind, nr.clone(), vec![]).await; + let _ = routing_table.find_nodes_close_to_node_ref(crypto_kind, nr.sequencing_filtered(Sequencing::PreferOrdered), vec![]).await; // Ensure we got the signed peer info if !nr.signed_node_info_has_valid_signature(routing_domain) { diff --git a/veilid-core/src/rpc_processor/mod.rs b/veilid-core/src/rpc_processor/mod.rs index b623f5ba..68bab229 100644 --- a/veilid-core/src/rpc_processor/mod.rs +++ b/veilid-core/src/rpc_processor/mod.rs @@ -1471,11 +1471,24 @@ impl RPCProcessor { let operation = match self.decode_rpc_operation(&encoded_msg) { Ok(v) => v, Err(e) => { - // Debug on error - veilid_log!(self debug "Dropping routed RPC: {}", e); + match e { + // Invalid messages that should be punished + RPCError::Protocol(_) | RPCError::InvalidFormat(_) => { + veilid_log!(self debug "Invalid routed RPC Operation: {}", e); + + // XXX: Punish routes that send routed undecodable crap + // self.network_manager().address_filter().punish_route_id(xxx, PunishmentReason::FailedToDecodeRoutedMessage); + } + // Ignored messages that should be dropped + RPCError::Ignore(_) | RPCError::Network(_) | RPCError::TryAgain(_) => { + veilid_log!(self trace "Dropping routed RPC Operation: {}", e); + } + // Internal errors that deserve louder logging + RPCError::Unimplemented(_) | RPCError::Internal(_) => { + veilid_log!(self error "Error decoding routed RPC operation: {}", e); + } + }; - // XXX: Punish routes that send routed undecodable crap - // self.network_manager().address_filter().punish_route_id(xxx, PunishmentReason::FailedToDecodeRoutedMessage); return Ok(NetworkResult::invalid_message(e)); } }; @@ -1593,16 +1606,16 @@ impl RPCProcessor { if let Err(e) = self.waiting_rpc_table.complete_op_waiter(op_id, msg) { match e { RPCError::Unimplemented(_) | RPCError::Internal(_) => { - veilid_log!(self error "Could not complete rpc operation: id = {}: {}", op_id, e); + veilid_log!(self error "Error in RPC operation: id = {}: {}", op_id, e); } RPCError::InvalidFormat(_) | RPCError::Protocol(_) | RPCError::Network(_) | RPCError::TryAgain(_) => { - veilid_log!(self debug "Could not complete rpc operation: id = {}: {}", op_id, e); + veilid_log!(self debug "Could not complete RPC operation: id = {}: {}", op_id, e); } - RPCError::Ignore(_) => { - veilid_log!(self debug "Answer late: id = {}", op_id); + RPCError::Ignore(e) => { + veilid_log!(self debug "RPC operation ignored: id = {}: {}", op_id, e); } }; // Don't throw an error here because it's okay if the original operation timed out diff --git a/veilid-core/src/rpc_processor/operation_waiter.rs b/veilid-core/src/rpc_processor/operation_waiter.rs index 205fa339..7e4ee05c 100644 --- a/veilid-core/src/rpc_processor/operation_waiter.rs +++ b/veilid-core/src/rpc_processor/operation_waiter.rs @@ -8,7 +8,7 @@ where { waiter: OperationWaiter, op_id: OperationId, - result_receiver: Option>, + result_receiver: flume::Receiver<(Span, T)>, } impl OperationWaitHandle @@ -27,9 +27,7 @@ where C: Unpin + Clone, { fn drop(&mut self) { - if self.result_receiver.is_some() { - self.waiter.cancel_op_waiter(self.op_id); - } + self.waiter.cancel_op_waiter(self.op_id); } } @@ -106,7 +104,7 @@ where OperationWaitHandle { waiter: self.clone(), op_id, - result_receiver: Some(result_receiver), + result_receiver, } } @@ -125,65 +123,69 @@ where /// Get operation context pub fn get_op_context(&self, op_id: OperationId) -> Result { let inner = self.inner.lock(); - let Some(waiting_op) = inner.waiting_op_table.get(&op_id) else { - return Err(RPCError::ignore(format!( - "Missing operation id getting op context: id={}", - op_id - ))); + let res = { + let Some(waiting_op) = inner.waiting_op_table.get(&op_id) else { + return Err(RPCError::ignore(format!( + "Missing operation id getting op context: id={}", + op_id + ))); + }; + Ok(waiting_op.context.clone()) }; - Ok(waiting_op.context.clone()) + drop(inner); + res } /// Remove wait for op #[instrument(level = "trace", target = "rpc", skip_all)] fn cancel_op_waiter(&self, op_id: OperationId) { let mut inner = self.inner.lock(); - inner.waiting_op_table.remove(&op_id); + { + let waiting_op = inner.waiting_op_table.remove(&op_id); + drop(waiting_op); + } + drop(inner); } /// Complete the waiting op #[instrument(level = "trace", target = "rpc", skip_all)] pub fn complete_op_waiter(&self, op_id: OperationId, message: T) -> Result<(), RPCError> { - let waiting_op = { - let mut inner = self.inner.lock(); - inner - .waiting_op_table - .remove(&op_id) - .ok_or_else(RPCError::else_ignore(format!( - "Unmatched operation id: {}", - op_id - )))? + let mut inner = self.inner.lock(); + let res = { + let waiting_op = + inner + .waiting_op_table + .remove(&op_id) + .ok_or_else(RPCError::else_ignore(format!( + "Unmatched operation id: {}", + op_id + )))?; + waiting_op + .result_sender + .send((Span::current(), message)) + .map_err(RPCError::ignore) }; - waiting_op - .result_sender - .send((Span::current(), message)) - .map_err(RPCError::ignore) + drop(inner); + res } /// Wait for operation to complete #[instrument(level = "trace", target = "rpc", skip_all)] pub async fn wait_for_op( &self, - mut handle: OperationWaitHandle, + handle: OperationWaitHandle, timeout_us: TimestampDuration, ) -> Result, RPCError> { let timeout_ms = us_to_ms(timeout_us.as_u64()).map_err(RPCError::internal)?; - // Take the receiver - // After this, we must manually cancel since the cancel on handle drop is disabled - let result_receiver = handle.result_receiver.take().unwrap(); - - let result_fut = result_receiver.recv_async().in_current_span(); + let result_fut = handle.result_receiver.recv_async().in_current_span(); // wait for eventualvalue let start_ts = Timestamp::now(); let res = timeout(timeout_ms, result_fut).await.into_timeout_or(); match res { - TimeoutOr::Timeout => { - self.cancel_op_waiter(handle.op_id); - Ok(TimeoutOr::Timeout) - } + TimeoutOr::Timeout => Ok(TimeoutOr::Timeout), TimeoutOr::Value(Ok((_span_id, ret))) => { let end_ts = Timestamp::now(); @@ -192,7 +194,10 @@ where Ok(TimeoutOr::Value((ret, end_ts.saturating_sub(start_ts)))) } - TimeoutOr::Value(Err(e)) => Err(RPCError::ignore(e)), + TimeoutOr::Value(Err(e)) => { + // + Err(RPCError::ignore(e)) + } } } } diff --git a/veilid-core/src/rpc_processor/rpc_worker.rs b/veilid-core/src/rpc_processor/rpc_worker.rs index 8141f965..194a0f2f 100644 --- a/veilid-core/src/rpc_processor/rpc_worker.rs +++ b/veilid-core/src/rpc_processor/rpc_worker.rs @@ -70,7 +70,7 @@ impl RPCProcessor { match request.kind { // Process RPC Message RPCWorkerRequestKind::Message { message_encoded } => { - network_result_value_or_log!(self match self + network_result_value_or_log!(self target:"network_result", match self .process_rpc_message(message_encoded).instrument(rpc_request_span) .await { diff --git a/veilid-core/src/storage_manager/mod.rs b/veilid-core/src/storage_manager/mod.rs index 73ea7ebf..722c41b9 100644 --- a/veilid-core/src/storage_manager/mod.rs +++ b/veilid-core/src/storage_manager/mod.rs @@ -1120,7 +1120,7 @@ impl StorageManager { let dest = rpc_processor .resolve_target_to_destination( vc.target, - SafetySelection::Unsafe(Sequencing::NoPreference), + SafetySelection::Unsafe(Sequencing::PreferOrdered), ) .await .map_err(VeilidAPIError::from)?; diff --git a/veilid-tools/src/network_result.rs b/veilid-tools/src/network_result.rs index 88201b60..89ccf1f8 100644 --- a/veilid-tools/src/network_result.rs +++ b/veilid-tools/src/network_result.rs @@ -278,85 +278,3 @@ macro_rules! network_result_try { } }; } - -#[macro_export] -macro_rules! network_result_value_or_log { - ($self:ident $r:expr => $f:expr) => { - network_result_value_or_log!($self $r => [ "" ] $f ) - }; - ($self:ident $r:expr => [ $d:expr ] $f:expr) => { { - let __extra_message = if debug_target_enabled!("network_result") { - $d.to_string() - } else { - "".to_string() - }; - match $r { - NetworkResult::Timeout => { - veilid_log!($self debug - "{} at {}@{}:{} in {}{}", - "Timeout", - file!(), - line!(), - column!(), - fn_name::uninstantiated!(), - __extra_message - ); - $f - } - NetworkResult::ServiceUnavailable(ref s) => { - veilid_log!($self debug - "{}({}) at {}@{}:{} in {}{}", - "ServiceUnavailable", - s, - file!(), - line!(), - column!(), - fn_name::uninstantiated!(), - __extra_message - ); - $f - } - NetworkResult::NoConnection(ref e) => { - veilid_log!($self debug - "{}({}) at {}@{}:{} in {}{}", - "No connection", - e.to_string(), - file!(), - line!(), - column!(), - fn_name::uninstantiated!(), - __extra_message - ); - $f - } - NetworkResult::AlreadyExists(ref e) => { - veilid_log!($self debug - "{}({}) at {}@{}:{} in {}{}", - "Already exists", - e.to_string(), - file!(), - line!(), - column!(), - fn_name::uninstantiated!(), - __extra_message - ); - $f - } - NetworkResult::InvalidMessage(ref s) => { - veilid_log!($self debug - "{}({}) at {}@{}:{} in {}{}", - "Invalid message", - s, - file!(), - line!(), - column!(), - fn_name::uninstantiated!(), - __extra_message - ); - $f - } - NetworkResult::Value(v) => v, - } - } }; - -} From 7f36cc009233e30589f66603bc0018758b6a5e5e Mon Sep 17 00:00:00 2001 From: Christien Rioux Date: Wed, 5 Mar 2025 15:23:50 -0500 Subject: [PATCH 10/17] [ci skip]revert rtnetlink upgrade --- Cargo.lock | 439 ++++++++++++++-------------- veilid-cli/Cargo.toml | 16 +- veilid-core/Cargo.toml | 62 ++-- veilid-flutter/rust/Cargo.toml | 26 +- veilid-server/Cargo.toml | 30 +- veilid-tools/Cargo.toml | 49 ++-- veilid-tools/src/assembly_buffer.rs | 7 +- 7 files changed, 315 insertions(+), 314 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b246c049..815eb980 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -96,9 +96,9 @@ checksum = "85965b6739a430150bdd138e2374a98af0c3ee0d030b3bb7fc3bddff58d0102e" [[package]] name = "android_log-sys" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ecc8056bf6ab9892dcd53216c83d1597487d7dacac16c8df6b877d127df9937" +checksum = "84521a3cf562bc62942e294181d9eef17eb38ceb8c68677bc49f144e4c3d4f8d" [[package]] name = "android_logger" @@ -118,7 +118,7 @@ version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c494134f746c14dc653a35a4ea5aca24ac368529da5370ecf41fe0341c35772f" dependencies = [ - "android_log-sys 0.3.1", + "android_log-sys 0.3.2", "env_logger 0.10.2", "log", "once_cell", @@ -203,9 +203,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.95" +version = "1.0.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04" +checksum = "dcfed56ad506cb2c684a14971b8861fdc3baaaae314b9e5f9bb532cbe3ba7a4f" [[package]] name = "arboard" @@ -471,7 +471,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -495,13 +495,13 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.86" +version = "0.1.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "644dd749086bf3771a2fbc5f256fdb982d53f011c7d5d560304eafeecebce79d" +checksum = "d556ec1359574147ec0c4fc5eb525f3f23263a592b1a9c07e0a75b427de55c97" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -559,7 +559,7 @@ dependencies = [ "futures-task", "futures-timer", "futures-util", - "pin-project 1.1.9", + "pin-project 1.1.10", "rustc_version", "tokio", "wasm-bindgen-futures", @@ -634,7 +634,7 @@ dependencies = [ "async-trait", "axum-core", "bitflags 1.3.2", - "bytes 1.10.0", + "bytes 1.10.1", "futures-util", "http 0.2.12", "http-body", @@ -660,7 +660,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" dependencies = [ "async-trait", - "bytes 1.10.0", + "bytes 1.10.1", "futures-util", "http 0.2.12", "http-body", @@ -728,9 +728,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f68f53c83ab957f72c32642f3868eec03eb974d1fb82e453128456482613d36" +checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd" [[package]] name = "bitmaps" @@ -749,16 +749,15 @@ dependencies = [ [[package]] name = "blake3" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1230237285e3e10cde447185e8975408ae24deaa67205ce684805c25bc0c7937" +checksum = "675f87afced0413c9bb02843499dbbd3882a237645883f71a2b59644a6d2f753" dependencies = [ "arrayref", "arrayvec", "cc", "cfg-if 1.0.0", "constant_time_eq", - "memmap2", ] [[package]] @@ -769,7 +768,7 @@ checksum = "e0b121a9fe0df916e362fb3271088d071159cdf11db0e4182d02152850756eff" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -857,9 +856,9 @@ checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" [[package]] name = "bytes" -version = "1.10.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f61dac84819c6588b558454b194026eb1f09c293b9036ae9b159e74e73ab6cf9" +checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" [[package]] name = "capnp" @@ -881,9 +880,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.14" +version = "1.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c3d1b2e905a3a7b00a6141adb0e4c0bb941d11caf55349d863942a1cc44e3c9" +checksum = "be714c154be609ec7f5dad223a33bf1482fff90472de28f7362806e6d4832b8c" dependencies = [ "shlex", ] @@ -947,16 +946,16 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.39" +version = "0.4.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e36cc9d416881d2e24f9a963be5fb1cd90966419ac844274161d10488b3e825" +checksum = "1a7964611d71df112cb1730f2ee67324fcf4d0fc6606acbbe9bfe06df124637c" dependencies = [ "android-tzdata", "iana-time-zone", "js-sys", "num-traits", "wasm-bindgen", - "windows-targets 0.52.6", + "windows-link", ] [[package]] @@ -1007,9 +1006,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.30" +version = "4.5.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92b7b18d71fad5313a1e320fa9897994228ce274b60faa4d694fe0ea89cd9e6d" +checksum = "027bb0d98429ae334a8698531da7077bdf906419543a35a55c2cb1b66437d767" dependencies = [ "clap_builder", "clap_derive", @@ -1017,9 +1016,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.30" +version = "4.5.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a35db2071778a7344791a4fb4f95308b5673d219dee3ae348b86642574ecc90c" +checksum = "5589e0cba072e0f3d23791efac0fd8627b49c829c196a492e88168e6a669d863" dependencies = [ "anstream", "anstyle", @@ -1037,7 +1036,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -1095,7 +1094,7 @@ version = "4.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" dependencies = [ - "bytes 1.10.0", + "bytes 1.10.1", "memchr", ] @@ -1137,21 +1136,21 @@ dependencies = [ [[package]] name = "config" -version = "0.15.8" +version = "0.15.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cf9dc8d4ef88e27a8cb23e85cb116403dedd57f7971964dc4b18ccead548901" +checksum = "fb07d21d12f9f0bc5e7c3e97ccc78b2341b9b4a4604eac3ed7c1d0d6e2c3b23e" dependencies = [ "pathdiff", "serde", "winnow 0.7.3", - "yaml-rust2 0.9.0", + "yaml-rust2 0.10.0", ] [[package]] name = "console" -version = "0.15.10" +version = "0.15.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea3c6ecd8059b57859df5c69830340ed3c41d30e3da0c1cbed90a96ac853041b" +checksum = "054ccb5b10f9f2cbf51eb355ca1d05c2d279ce1804688d0db74b4733a5aeafd8" dependencies = [ "encode_unicode", "libc", @@ -1281,7 +1280,7 @@ version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f476fe445d41c9e991fd07515a6f463074b782242ccf4a5b7b1d1012e70824df" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "crossterm_winapi", "libc", "mio 0.8.11", @@ -1297,7 +1296,7 @@ version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "829d955a0bb380ef178a640b91779e3987da38c9aea133b20614cfed8cdea9c6" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "crossterm_winapi", "futures-core", "mio 1.0.3", @@ -1345,7 +1344,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a2785755761f3ddc1492979ce1e48d2c00d09311c39e4466429188f3dd6501" dependencies = [ "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -1458,7 +1457,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -1515,7 +1514,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -1537,7 +1536,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core 0.20.10", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -1638,14 +1637,14 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] name = "dyn-clone" -version = "1.0.18" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "feeef44e73baff3a26d371801df019877a9866a8c493d315ab00177843314f35" +checksum = "1c7a8fb8a9fbf66c1f703fe16184d10ca0ee9d23be5b4436400408ba54a95005" [[package]] name = "ed25519" @@ -1675,9 +1674,9 @@ dependencies = [ [[package]] name = "either" -version = "1.13.0" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" +checksum = "b7914353092ddf589ad78f25c5c1c21b7f80b0ff8621e7c814c3485b5306da9d" [[package]] name = "embedded-io" @@ -1715,7 +1714,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -1735,7 +1734,7 @@ checksum = "f282cfdfe92516eb26c2af8589c274c7c17681f5ecc03c18255fe741c6aa64eb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -1778,7 +1777,7 @@ dependencies = [ "darling 0.20.10", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -1922,12 +1921,12 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.35" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c936bfdafb507ebbf50b8074c54fa31c5be9a1e7e5f467dd659697041407d07c" +checksum = "11faaf5a5236997af9848be0bef4db95824b1d534ebc64d0f0c6cf3e67bd38dc" dependencies = [ "crc32fast", - "miniz_oxide 0.8.4", + "miniz_oxide 0.8.5", ] [[package]] @@ -1972,6 +1971,12 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foldhash" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0d2fde1f7b3d48b8395d5f2de76c18a528bd6a9cdde438df747bfcba3e05d6f" + [[package]] name = "foreign-types" version = "0.3.2" @@ -2090,7 +2095,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -2295,7 +2300,7 @@ version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" dependencies = [ - "bytes 1.10.0", + "bytes 1.10.1", "fnv", "futures-core", "futures-sink", @@ -2362,6 +2367,9 @@ name = "hashbrown" version = "0.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" +dependencies = [ + "foldhash", +] [[package]] name = "hashlink" @@ -2374,11 +2382,11 @@ dependencies = [ [[package]] name = "hashlink" -version = "0.9.1" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" +checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1" dependencies = [ - "hashbrown 0.14.5", + "hashbrown 0.15.2", ] [[package]] @@ -2553,7 +2561,7 @@ version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" dependencies = [ - "bytes 1.10.0", + "bytes 1.10.1", "fnv", "itoa", ] @@ -2564,7 +2572,7 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea" dependencies = [ - "bytes 1.10.0", + "bytes 1.10.1", "fnv", "itoa", ] @@ -2575,16 +2583,16 @@ version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ - "bytes 1.10.0", + "bytes 1.10.1", "http 0.2.12", "pin-project-lite", ] [[package]] name = "httparse" -version = "1.10.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2d708df4e7140240a16cd6ab0ab65c972d7433ab77819ea693fde9c43811e2a" +checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" [[package]] name = "httpdate" @@ -2604,7 +2612,7 @@ version = "0.14.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41dfc780fdec9373c01bae43289ea34c972e40ee3c9f6b3c8801a35f35586ce7" dependencies = [ - "bytes 1.10.0", + "bytes 1.10.1", "futures-channel", "futures-core", "futures-util", @@ -2640,7 +2648,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ - "bytes 1.10.0", + "bytes 1.10.1", "hyper", "native-tls", "tokio", @@ -2785,7 +2793,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -2882,9 +2890,9 @@ dependencies = [ [[package]] name = "inout" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" +checksum = "879f10e63c20629ecabbb64a8010319738c66a5cd0c29b02d63d272b03751d01" dependencies = [ "generic-array", ] @@ -2965,9 +2973,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.14" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" [[package]] name = "jni" @@ -3107,9 +3115,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.169" +version = "0.2.170" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a" +checksum = "875b3680cb2f8f71bdcf9a30f38d48282f5d3c95cbf9b3fa57269bb5d5c06828" [[package]] name = "libc-print" @@ -3136,9 +3144,9 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "libc", - "redox_syscall 0.5.8", + "redox_syscall 0.5.10", ] [[package]] @@ -3184,9 +3192,9 @@ checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" [[package]] name = "litemap" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104" +checksum = "23fb14cb19457329c82206317a5663005a4d404783dc74f4252769b0d5f42856" [[package]] name = "lock_api" @@ -3200,9 +3208,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.25" +version = "0.4.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04cbf5b083de1c7e0222a7a51dbfdba1cbe1c6ab0b15e29fff3f6c077fd9cd9f" +checksum = "30bde2b3dc3671ae49d8e2e9f044c7c005836e7a023ee57cffa25ab82764bb9e" dependencies = [ "value-bag", ] @@ -3270,15 +3278,6 @@ version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" -[[package]] -name = "memmap2" -version = "0.9.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd3f7eed9d3848f8b98834af67102b720745c4ec028fcd0aa0239277e7de374f" -dependencies = [ - "libc", -] - [[package]] name = "memoffset" version = "0.6.5" @@ -3327,9 +3326,9 @@ dependencies = [ [[package]] name = "miniz_oxide" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3b1c9bd4fe1f0f8b387f6eb9eb3b4a1aa26185e5750efb9140301703f62cd1b" +checksum = "8e3e04debbb59698c15bacbb6d93584a8c0ca9cc3213cb423d31f760d8843ce5" dependencies = [ "adler2", ] @@ -3505,12 +3504,12 @@ version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72452e012c2f8d612410d89eea01e2d9b56205274abb35d53f60200b2ec41d60" dependencies = [ - "bytes 1.10.0", + "bytes 1.10.1", "futures", "log", "netlink-packet-core", "netlink-sys", - "thiserror 2.0.11", + "thiserror 2.0.12", ] [[package]] @@ -3520,7 +3519,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6471bf08e7ac0135876a9581bf3217ef0333c191c128d34878079f42ee150411" dependencies = [ "async-io 1.13.0", - "bytes 1.10.0", + "bytes 1.10.1", "futures", "libc", "log", @@ -3557,7 +3556,7 @@ version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "cfg-if 1.0.0", "libc", ] @@ -3568,7 +3567,7 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "cfg-if 1.0.0", "cfg_aliases", "libc", @@ -3734,7 +3733,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e4e89ad9e3d7d297152b17d39ed92cd50ca8063a89a9fa569046d41568891eff" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "block2", "libc", "objc2", @@ -3750,7 +3749,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "617fbf49e071c178c0b24c080767db52958f716d9eabdf0890523aeae54773ef" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "block2", "objc2", "objc2-foundation", @@ -3780,7 +3779,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ee638a5da3799329310ad4cfa62fbf045d5f56e3ef5ba4149e7452dcf89d5a8" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "block2", "libc", "objc2", @@ -3792,7 +3791,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd0cba1276f6023976a406a14ffa85e1fdd19df6b0f737b063b95f6c8c7aadd6" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "block2", "objc2", "objc2-foundation", @@ -3804,7 +3803,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e42bee7bff906b14b167da2bac5efe6b6a07e6f7c0a21a7308d40c960242dc7a" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "block2", "objc2", "objc2-foundation", @@ -3838,7 +3837,7 @@ version = "0.10.71" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e14130c6a98cd258fdcb0fb6d744152343ff729cbfcb28c656a9d12b999fbcd" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "cfg-if 1.0.0", "foreign-types", "libc", @@ -3855,7 +3854,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -4163,7 +4162,7 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if 1.0.0", "libc", - "redox_syscall 0.5.8", + "redox_syscall 0.5.10", "smallvec", "windows-targets 0.52.6", ] @@ -4224,11 +4223,11 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.9" +version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfe2e71e1471fe07709406bf725f710b02927c9c54b2b5b2ec0e8087d97c327d" +checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" dependencies = [ - "pin-project-internal 1.1.9", + "pin-project-internal 1.1.10", ] [[package]] @@ -4244,13 +4243,13 @@ dependencies = [ [[package]] name = "pin-project-internal" -version = "1.1.9" +version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6e859e6e5bd50440ab63c47e3ebabc90f26251f7c73c3d3e837b74a1cc3fa67" +checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -4288,9 +4287,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" +checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" [[package]] name = "polling" @@ -4336,9 +4335,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "280dc24453071f1b63954171985a0b0d30058d287960968b9b2aca264c8d4ee6" +checksum = "350e9b48cbc6b0e028b0473b114454c6316e57336ee184ceab6e53f72c178b3e" [[package]] name = "portable-atomic-util" @@ -4415,14 +4414,14 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] name = "proc-macro2" -version = "1.0.93" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60946a68e5f9d28b0dc1c21bb8a97ee7d018a8b322fa57838ba31cc878e22d99" +checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84" dependencies = [ "unicode-ident", ] @@ -4433,7 +4432,7 @@ version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd" dependencies = [ - "bytes 1.10.0", + "bytes 1.10.1", "prost-derive 0.11.9", ] @@ -4443,7 +4442,7 @@ version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "deb1435c188b76130da55f17a466d252ff7b1418b2ad3e037d127b94e3411f29" dependencies = [ - "bytes 1.10.0", + "bytes 1.10.1", "prost-derive 0.12.6", ] @@ -4470,7 +4469,7 @@ dependencies = [ "itertools 0.12.1", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -4496,9 +4495,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.38" +version = "1.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc" +checksum = "c1f1914ce909e1658d9907913b4b91947430c7d9be598b15a1912935b8c04801" dependencies = [ "proc-macro2", ] @@ -4571,11 +4570,11 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.8" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03a862b389f93e68874fbf580b9de08dd02facb9a788ebadaf4a3fd33cf58834" +checksum = "0b8c0c260b63a8219631167be35e6a988e9554dbd323f8bd08439c8ed1302bd1" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", ] [[package]] @@ -4640,7 +4639,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" dependencies = [ "base64", - "bytes 1.10.0", + "bytes 1.10.1", "encoding_rs", "futures-core", "futures-util", @@ -4685,9 +4684,9 @@ dependencies = [ [[package]] name = "ring" -version = "0.17.9" +version = "0.17.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e75ec5e92c4d8aede845126adc388046234541629e76029599ed35a003c7ed24" +checksum = "da5349ae27d3887ca812fb375b45a4fbb36d8d12d2df394968cd86e35683fe73" dependencies = [ "cc", "cfg-if 1.0.0", @@ -4743,7 +4742,7 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "549b9d036d571d42e6e85d1c1425e2ac83491075078ca9a15be021c56b1641f2" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "fallible-iterator", "fallible-streaming-iterator", "hashlink 0.8.4", @@ -4792,7 +4791,7 @@ version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "errno", "libc", "linux-raw-sys 0.4.15", @@ -4832,9 +4831,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.19" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7c45b9784283f1b2e7fb61b42047c2fd678ef0960d4f6f1eba131594cc369d4" +checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2" [[package]] name = "rustyline-async" @@ -4844,18 +4843,18 @@ checksum = "6fa3f78c2ea57b827be4c11adbfed26e5fe1b49fb6fb7826e2a9eebbc2e8db10" dependencies = [ "crossterm 0.28.1", "futures-util", - "pin-project 1.1.9", + "pin-project 1.1.10", "thingbuf", - "thiserror 2.0.11", + "thiserror 2.0.12", "unicode-segmentation", "unicode-width 0.2.0", ] [[package]] name = "ryu" -version = "1.0.19" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ea1a2d0a644769cc99faa24c3ad26b379b786fe7c36fd3c546254801650e6dd" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" [[package]] name = "same-file" @@ -4896,9 +4895,9 @@ dependencies = [ [[package]] name = "schemars" -version = "0.8.21" +version = "0.8.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09c024468a378b7e36765cd36702b7a90cc3cba11654f6685c8f233408e89e92" +checksum = "3fbf2ae1b8bc8e02df939598064d22402220cd5bbcca1c76f7d6a310974d5615" dependencies = [ "dyn-clone", "schemars_derive", @@ -4908,14 +4907,14 @@ dependencies = [ [[package]] name = "schemars_derive" -version = "0.8.21" +version = "0.8.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1eee588578aff73f856ab961cd2f79e36bc45d7ded33a7562adba4667aecc0e" +checksum = "32e265784ad618884abaea0600a9adf15393368d840e0222d101a072f3f7534d" dependencies = [ "proc-macro2", "quote", "serde_derive_internals 0.29.1", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -4972,7 +4971,7 @@ version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "core-foundation", "core-foundation-sys", "libc", @@ -4991,9 +4990,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.25" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f79dfe2d285b0488816f30e700a7438c5a73d816b5b7d3ac72fbc48b0d185e03" +checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" [[package]] name = "send_wrapper" @@ -5012,9 +5011,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.217" +version = "1.0.218" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70" +checksum = "e8dfc9d19bdbf6d17e22319da49161d5d0108e4188e8b680aef6299eed22df60" dependencies = [ "serde_derive", ] @@ -5052,9 +5051,9 @@ dependencies = [ [[package]] name = "serde_bytes" -version = "0.11.15" +version = "0.11.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "387cc504cb06bb40a96c8e04e951fe01854cf6bc921053c954e4a606d9675c6a" +checksum = "364fec0df39c49a083c9a8a18a23a6bcfd9af130fe9fe321d18520a0d113e09e" dependencies = [ "serde", ] @@ -5071,13 +5070,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.217" +version = "1.0.218" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" +checksum = "f09503e191f4e797cb8aac08e9a4a4695c5edf6a2e70e376d961ddd5c969f82b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -5088,7 +5087,7 @@ checksum = "e578a843d40b4189a4d66bba51d7684f57da5bd7c304c64e14bd63efbef49509" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -5099,14 +5098,14 @@ checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] name = "serde_json" -version = "1.0.138" +version = "1.0.140" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d434192e7da787e94a6ea7e9670b26a036d0ca41e0b7efb2676dd32bae872949" +checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" dependencies = [ "itoa", "memchr", @@ -5116,13 +5115,13 @@ dependencies = [ [[package]] name = "serde_repr" -version = "0.1.19" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" +checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -5208,7 +5207,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -5219,7 +5218,7 @@ checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -5466,9 +5465,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.98" +version = "2.0.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36147f1a48ae0ec2b5b3bc5b537d267457555a10dc06f3dbc8cb11ba3006d3b1" +checksum = "e02e925281e18ffd9d640e234264753c43edc62d64b2d4cf898f1bc5e75f3fc2" dependencies = [ "proc-macro2", "quote", @@ -5489,7 +5488,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -5576,7 +5575,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "662b54ef6f7b4e71f683dadc787bbb2d8e8ef2f91b682ebed3164a5a7abca905" dependencies = [ "parking_lot 0.12.3", - "pin-project 1.1.9", + "pin-project 1.1.10", ] [[package]] @@ -5590,11 +5589,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.11" +version = "2.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d452f284b73e6d76dd36758a0c8684b1d5be31f92b89d07fd5822175732206fc" +checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" dependencies = [ - "thiserror-impl 2.0.11", + "thiserror-impl 2.0.12", ] [[package]] @@ -5605,18 +5604,18 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] name = "thiserror-impl" -version = "2.0.11" +version = "2.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26afc1baea8a989337eeb52b6e72a039780ce45c3edfcc9c5b9d112feeb173c2" +checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -5641,9 +5640,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.37" +version = "0.3.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21" +checksum = "bb041120f25f8fbe8fd2dbe4671c7c2ed74d83be2e7a77529bf7e0790ae3f472" dependencies = [ "deranged", "itoa", @@ -5658,15 +5657,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" +checksum = "765c97a5b985b7c11d7bc27fa927dc4fe6af3a6dfb021d28deb60d3bf51e76ef" [[package]] name = "time-macros" -version = "0.2.19" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de" +checksum = "e8093bc3e81c3bc5f7879de09619d06c9a5a5e45ca44dfeeb7225bae38005c5c" dependencies = [ "num-conv", "time-core", @@ -5684,9 +5683,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.8.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "022db8904dfa342efe721985167e9fcd16c29b226db4397ed752a761cfce81e8" +checksum = "09b3661f17e86524eccd4371ab0429194e0d7c008abb45f7a7495b1719463c71" dependencies = [ "tinyvec_macros", ] @@ -5704,7 +5703,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d61fa4ffa3de412bfea335c6ecff681de2b609ba3c77ef3e00e521813a9ed9e" dependencies = [ "backtrace", - "bytes 1.10.0", + "bytes 1.10.1", "libc", "mio 1.0.3", "parking_lot 0.12.3", @@ -5734,7 +5733,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -5764,7 +5763,7 @@ version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078" dependencies = [ - "bytes 1.10.0", + "bytes 1.10.1", "futures-core", "futures-io", "futures-sink", @@ -5835,7 +5834,7 @@ dependencies = [ "async-trait", "axum", "base64", - "bytes 1.10.0", + "bytes 1.10.1", "futures-core", "futures-util", "h2", @@ -5844,7 +5843,7 @@ dependencies = [ "hyper", "hyper-timeout", "percent-encoding", - "pin-project 1.1.9", + "pin-project 1.1.10", "prost 0.11.9", "tokio", "tokio-stream", @@ -5864,14 +5863,14 @@ dependencies = [ "async-trait", "axum", "base64", - "bytes 1.10.0", + "bytes 1.10.1", "h2", "http 0.2.12", "http-body", "hyper", "hyper-timeout", "percent-encoding", - "pin-project 1.1.9", + "pin-project 1.1.10", "prost 0.12.6", "tokio", "tokio-stream", @@ -5890,7 +5889,7 @@ dependencies = [ "futures-core", "futures-util", "indexmap 1.9.3", - "pin-project 1.1.9", + "pin-project 1.1.10", "pin-project-lite", "rand", "slab", @@ -5945,7 +5944,7 @@ checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -6069,7 +6068,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf599f51530a7211f5aa92c84abdfc1137362d471f0473a4b1be8d625167fb0d" dependencies = [ "anyhow", - "bytes 1.10.0", + "bytes 1.10.1", "chrono", "prost 0.12.6", "rand", @@ -6147,7 +6146,7 @@ dependencies = [ "proc-macro2", "quote", "serde_derive_internals 0.28.0", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -6157,7 +6156,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e3dac10fd62eaf6617d3a904ae222845979aec67c615d1c842b4002c7666fb9" dependencies = [ "byteorder", - "bytes 1.10.0", + "bytes 1.10.1", "data-encoding", "http 0.2.12", "httparse", @@ -6176,7 +6175,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e2e2ce1e47ed2994fd43b04c8f618008d4cabdd5ee34027cf14f9d918edd9c8" dependencies = [ "byteorder", - "bytes 1.10.0", + "bytes 1.10.1", "data-encoding", "http 1.2.0", "httparse", @@ -6194,7 +6193,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "18e5b8366ee7a95b16d32197d0b2604b43a0be89dc5fac9f8e96ccafbaedda8a" dependencies = [ "byteorder", - "bytes 1.10.0", + "bytes 1.10.1", "data-encoding", "http 1.2.0", "httparse", @@ -6213,9 +6212,9 @@ checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" [[package]] name = "unicode-ident" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00e2473a93778eb0bad35909dff6a10d28e63f792f16ed15e404fca9d5eeedbe" +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" [[package]] name = "unicode-segmentation" @@ -6331,7 +6330,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -6376,8 +6375,8 @@ dependencies = [ "async-tungstenite 0.23.0", "cfg-if 1.0.0", "chrono", - "clap 4.5.30", - "config 0.15.8", + "clap 4.5.31", + "config 0.15.9", "console", "crossbeam-channel", "cursive", @@ -6517,7 +6516,7 @@ name = "veilid-flutter" version = "0.4.3" dependencies = [ "allo-isolate", - "android_log-sys 0.3.1", + "android_log-sys 0.3.2", "async-std", "backtrace", "cfg-if 1.0.0", @@ -6580,7 +6579,7 @@ dependencies = [ "backtrace", "cfg-if 1.0.0", "chrono", - "clap 4.5.30", + "clap 4.5.31", "color-eyre", "config 0.14.1", "console-subscriber", @@ -6638,7 +6637,7 @@ dependencies = [ "backtrace", "cfg-if 1.0.0", "chrono", - "clap 4.5.30", + "clap 4.5.31", "console_error_panic_hook", "ctrlc", "eyre", @@ -6810,7 +6809,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", "wasm-bindgen-shared", ] @@ -6845,7 +6844,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -6880,7 +6879,7 @@ checksum = "17d5042cc5fa009658f9a7333ef24291b1291a25b6382dd68862a7f3b969f69b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -7057,6 +7056,12 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-link" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dccfd733ce2b1753b03b6d3c65edf020262ea35e20ccdf3e288043e6dd620e3" + [[package]] name = "windows-permissions" version = "0.2.4" @@ -7073,7 +7078,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "193cae8e647981c35bc947fdd57ba7928b1fa0d4a79305f6dd2dc55221ac35ac" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "widestring", "windows-sys 0.59.0", ] @@ -7326,7 +7331,7 @@ version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", ] [[package]] @@ -7349,7 +7354,7 @@ checksum = "ed39ff9f8b2eda91bf6390f9f49eee93d655489e15708e3bb638c1c4f07cecb4" dependencies = [ "async-tungstenite 0.28.2", "async_io_stream", - "bitflags 2.8.0", + "bitflags 2.9.0", "futures-core", "futures-io", "futures-sink", @@ -7451,13 +7456,13 @@ dependencies = [ [[package]] name = "yaml-rust2" -version = "0.9.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a1a1c0bc9823338a3bdf8c61f994f23ac004c6fa32c08cd152984499b445e8d" +checksum = "232bdb534d65520716bef0bbb205ff8f2db72d807b19c0bc3020853b92a0cd4b" dependencies = [ "arraydeque", "encoding_rs", - "hashlink 0.9.1", + "hashlink 0.10.0", ] [[package]] @@ -7480,7 +7485,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", "synstructure", ] @@ -7537,27 +7542,27 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] name = "zerofrom" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cff3ee08c995dee1859d998dea82f7374f2826091dd9cd47def953cae446cd2e" +checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" dependencies = [ "zerofrom-derive", ] [[package]] name = "zerofrom-derive" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" +checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", "synstructure", ] @@ -7578,7 +7583,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -7600,7 +7605,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] diff --git a/veilid-cli/Cargo.toml b/veilid-cli/Cargo.toml index d219ed1a..7535474c 100644 --- a/veilid-cli/Cargo.toml +++ b/veilid-cli/Cargo.toml @@ -27,12 +27,12 @@ rt-async-std = [ rt-tokio = ["tokio", "tokio-util", "veilid-tools/rt-tokio", "cursive/rt-tokio"] [dependencies] -async-std = { version = "1.12.0", features = [ +async-std = { version = "1.13.0", features = [ "unstable", "attributes", ], optional = true } -tokio = { version = "1.38.1", features = ["full", "tracing"], optional = true } -tokio-util = { version = "0.7.11", features = ["compat"], optional = true } +tokio = { version = "1.43.0", features = ["full", "tracing"], optional = true } +tokio-util = { version = "0.7.13", features = ["compat"], optional = true } async-tungstenite = { version = "^0.23" } cursive = { git = "https://gitlab.com/veilid/cursive.git", default-features = false, features = [ "crossterm", @@ -44,7 +44,7 @@ cursive_buffered_backend = { git = "https://gitlab.com/veilid/cursive-buffered-b # cursive-multiplex = "0.6.0" # cursive_tree_view = "0.6.0" cursive_table_view = { git = "https://gitlab.com/veilid/cursive-table-view.git" } -arboard = { version = "3.4.0", default-features = false } +arboard = { version = "3.4.1", default-features = false } # cursive-tabs = "0.5.0" clap = { version = "4", features = ["derive"] } directories = "^5" @@ -68,12 +68,12 @@ flume = { version = "^0", features = ["async"] } data-encoding = { version = "^2" } indent = { version = "0.1.1" } -chrono = "0.4.38" +chrono = "0.4.40" owning_ref = "0.4.1" -unicode-width = "0.1.13" +unicode-width = "0.1.14" lru = "0.10.1" -rustyline-async = "0.4.2" -console = "0.15.8" +rustyline-async = "0.4.5" +console = "0.15.11" [dev-dependencies] serial_test = "^2" diff --git a/veilid-core/Cargo.toml b/veilid-core/Cargo.toml index a4125dcf..5aefc1ee 100644 --- a/veilid-core/Cargo.toml +++ b/veilid-core/Cargo.toml @@ -71,8 +71,8 @@ veilid-tools = { version = "0.4.3", path = "../veilid-tools", features = [ "tracing", ], default-features = false } paste = "1.0.15" -once_cell = "1.19.0" -backtrace = "0.3.71" +once_cell = "1.20.3" +backtrace = "^0.3.71" num-traits = "0.2.19" shell-words = "1.1.0" static_assertions = "1.1.0" @@ -82,14 +82,14 @@ lazy_static = "1.5.0" directories = "5.0.1" # Logging -tracing = { version = "0.1.40", features = ["log", "attributes"] } +tracing = { version = "0.1.41", features = ["log", "attributes"] } tracing-subscriber = "0.3.19" -tracing-error = "0.2.0" +tracing-error = "0.2.1" eyre = "0.6.12" -thiserror = "1.0.63" +thiserror = "1.0.69" # Data structures -enumset = { version = "1.1.3", features = ["serde"] } +enumset = { version = "1.1.5", features = ["serde"] } keyvaluedb = "0.1.2" range-set-blaze = "0.1.16" weak-table = "0.3.2" @@ -98,10 +98,10 @@ hashlink = { package = "veilid-hashlink", version = "0.1.1", features = [ ] } # System -futures-util = { version = "0.3.30", default-features = false, features = [ +futures-util = { version = "0.3.31", default-features = false, features = [ "alloc", ] } -flume = { version = "0.11.0", features = ["async"] } +flume = { version = "0.11.1", features = ["async"] } parking_lot = "0.12.3" lock_api = "0.4.12" stop-token = { version = "0.7.0", default-features = false } @@ -124,23 +124,23 @@ curve25519-dalek = { version = "4.1.3", default-features = false, features = [ "zeroize", "precomputed-tables", ] } -blake3 = { version = "1.5.3" } +blake3 = { version = "1.6.1" } chacha20poly1305 = "0.10.1" chacha20 = "0.9.1" argon2 = "0.5.3" # Network -async-std-resolver = { version = "0.24.1", optional = true } -hickory-resolver = { version = "0.24.1", optional = true } +async-std-resolver = { version = "0.24.4", optional = true } +hickory-resolver = { version = "0.24.4", optional = true } # Serialization -capnp = { version = "0.19.6", default-features = false, features = ["alloc"] } -serde = { version = "1.0.214", features = ["derive", "rc"] } -serde_json = { version = "1.0.132" } +capnp = { version = "0.19.8", default-features = false, features = ["alloc"] } +serde = { version = "1.0.218", features = ["derive", "rc"] } +serde_json = { version = "1.0.140" } serde-big-array = "0.5.1" json = "0.12.4" -data-encoding = { version = "2.6.0" } -schemars = "0.8.21" +data-encoding = { version = "2.8.0" } +schemars = "0.8.22" lz4_flex = { version = "0.11.3", default-features = false, features = [ "safe-encode", "safe-decode", @@ -155,18 +155,18 @@ sanitize-filename = "0.5.0" # Tools config = { version = "0.13.4", default-features = false, features = ["yaml"] } bugsalot = { package = "veilid-bugsalot", version = "0.2.0" } -chrono = "0.4.38" -libc = "0.2.155" +chrono = "0.4.40" +libc = "0.2.170" nix = "0.27.1" maxminddb = { version = "0.24.0", optional = true } # System -async-std = { version = "1.12.0", features = ["unstable"], optional = true } +async-std = { version = "1.13.0", features = ["unstable"], optional = true } sysinfo = { version = "^0.30.13", default-features = false } -tokio = { version = "1.38.1", features = ["full"], optional = true } -tokio-util = { version = "0.7.11", features = ["compat"], optional = true } -tokio-stream = { version = "0.1.15", features = ["net"], optional = true } -futures-util = { version = "0.3.30", default-features = false, features = [ +tokio = { version = "1.43.0", features = ["full"], optional = true } +tokio-util = { version = "0.7.13", features = ["compat"], optional = true } +tokio-stream = { version = "0.1.17", features = ["net"], optional = true } +futures-util = { version = "0.3.31", default-features = false, features = [ "async-await", "sink", "std", @@ -201,9 +201,9 @@ async_executors = { version = "0.7.0", default-features = false, features = [ "bindgen", "timer", ] } -wasm-bindgen = "0.2.92" -js-sys = "0.3.69" -wasm-bindgen-futures = "0.4.42" +wasm-bindgen = "0.2.100" +js-sys = "0.3.77" +wasm-bindgen-futures = "0.4.50" send_wrapper = { version = "0.6.0", features = ["futures"] } serde_bytes = { version = "0.11", default-features = false, features = [ "alloc", @@ -223,7 +223,7 @@ keyvaluedb-web = "0.1.2" ### Configuration for WASM32 'web-sys' crate [target.'cfg(all(target_arch = "wasm32", target_os = "unknown"))'.dependencies.web-sys] -version = "0.3.69" +version = "0.3.77" features = [ 'Document', 'HtmlDocument', @@ -263,13 +263,13 @@ tracing-oslog = { version = "0.1.2", optional = true } [target.'cfg(not(all(target_arch = "wasm32", target_os = "unknown")))'.dev-dependencies] simplelog = { version = "0.12.2", features = ["test"] } serial_test = "2.0.0" -tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } +tracing-subscriber = { version = "0.3.19", features = ["env-filter"] } [target.'cfg(all(target_arch = "wasm32", target_os = "unknown"))'.dev-dependencies] serial_test = { version = "2.0.0", default-features = false, features = [ "async", ] } -wasm-bindgen-test = "0.3.42" +wasm-bindgen-test = "0.3.50" console_error_panic_hook = "0.1.7" wee_alloc = "0.4.5" wasm-logger = "0.2.0" @@ -278,8 +278,8 @@ wasm-logger = "0.2.0" [build-dependencies] capnpc = "0.19.0" -glob = "0.3.1" -filetime = "0.2.23" +glob = "0.3.2" +filetime = "0.2.25" sha2 = "0.10.8" hex = "0.4.3" reqwest = { version = "0.11", features = ["blocking"], optional = true } diff --git a/veilid-flutter/rust/Cargo.toml b/veilid-flutter/rust/Cargo.toml index ec297ff8..aa75d2f4 100644 --- a/veilid-flutter/rust/Cargo.toml +++ b/veilid-flutter/rust/Cargo.toml @@ -35,17 +35,17 @@ debug-load = ["dep:ctor", "dep:libc-print", "dep:android_log-sys", "dep:oslog"] [dependencies] veilid-core = { path = "../../veilid-core", default-features = false } -tracing = { version = "0.1.40", features = ["log", "attributes"] } +tracing = { version = "0.1.41", features = ["log", "attributes"] } tracing-subscriber = "0.3.19" parking_lot = "0.12.3" -backtrace = "0.3.71" -serde_json = "1.0.120" -serde = "1.0.204" -futures-util = { version = "0.3.30", default-features = false, features = [ +backtrace = "^0.3.71" +serde_json = "1.0.140" +serde = "1.0.218" +futures-util = { version = "0.3.31", default-features = false, features = [ "alloc", ] } cfg-if = "1.0.0" -data-encoding = { version = "2.6.0" } +data-encoding = { version = "2.8.0" } tracing-flame = "0.2.0" # Dependencies for native builds only @@ -55,15 +55,15 @@ tracing-opentelemetry = "0.21" opentelemetry = { version = "0.20" } opentelemetry-otlp = { version = "0.13" } opentelemetry-semantic-conventions = "0.12" -async-std = { version = "1.12.0", features = ["unstable"], optional = true } -tokio = { version = "1.38.1", features = ["full"], optional = true } -tokio-stream = { version = "0.1.15", features = ["net"], optional = true } -tokio-util = { version = "0.7.11", features = ["compat"], optional = true } -allo-isolate = "0.1.25" +async-std = { version = "1.13.0", features = ["unstable"], optional = true } +tokio = { version = "1.43.0", features = ["full"], optional = true } +tokio-stream = { version = "0.1.17", features = ["net"], optional = true } +tokio-util = { version = "0.7.13", features = ["compat"], optional = true } +allo-isolate = "0.1.26" ffi-support = "0.4.4" lazy_static = "1.5.0" hostname = "0.3.1" -ctor = { version = "0.2.8", optional = true } +ctor = { version = "0.2.9", optional = true } libc-print = { version = "0.1.23", optional = true } @@ -74,7 +74,7 @@ libc-print = { version = "0.1.23", optional = true } [target.'cfg(target_os = "android")'.dependencies] jni = "0.21.1" paranoid-android = "0.2.2" -android_log-sys = { version = "0.3.1", optional = true } +android_log-sys = { version = "0.3.2", optional = true } # Dependencies for iOS builds only [target.'cfg(target_os = "ios")'.dependencies] diff --git a/veilid-server/Cargo.toml b/veilid-server/Cargo.toml index fc2612c6..aff56bf4 100644 --- a/veilid-server/Cargo.toml +++ b/veilid-server/Cargo.toml @@ -53,8 +53,8 @@ geolocation = ["veilid-core/geolocation"] [dependencies] veilid-core = { path = "../veilid-core", default-features = false } -tracing = { version = "^0.1.40", features = ["log", "attributes"] } -tracing-subscriber = { version = "^0.3.18", features = ["env-filter", "time"] } +tracing = { version = "^0.1.41", features = ["log", "attributes"] } +tracing-subscriber = { version = "^0.3.19", features = ["env-filter", "time"] } tracing-appender = "^0.2.3" tracing-opentelemetry = "^0.24.0" # Buggy: tracing-error = "^0" @@ -62,21 +62,21 @@ opentelemetry = { version = "^0.23" } opentelemetry-otlp = { version = "^0.16.0", default-features = false, optional = true } opentelemetry_sdk = "0.23.0" opentelemetry-semantic-conventions = "^0.16.0" -async-std = { version = "^1.12.0", features = ["unstable"], optional = true } -tokio = { version = "^1.38.1", features = ["full", "tracing"], optional = true } -tokio-stream = { version = "^0.1.15", features = ["net"], optional = true } -tokio-util = { version = "^0.7.11", features = ["compat"], optional = true } +async-std = { version = "^1.13.0", features = ["unstable"], optional = true } +tokio = { version = "^1.43.0", features = ["full", "tracing"], optional = true } +tokio-stream = { version = "^0.1.17", features = ["net"], optional = true } +tokio-util = { version = "^0.7.13", features = ["compat"], optional = true } console-subscriber = { version = "^0.3.0", optional = true } async-tungstenite = { version = "^0.27.0", features = ["async-tls"] } color-eyre = { version = "^0.6.3", default-features = false } backtrace = "^0.3.71" -clap = { version = "^4.5.9", features = ["derive", "string", "wrap_help"] } +clap = { version = "^4.5.31", features = ["derive", "string", "wrap_help"] } directories = "^5.0.1" parking_lot = "^0.12.3" -config = { version = "^0.14.0", default-features = false, features = ["yaml"] } +config = { version = "^0.14.1", default-features = false, features = ["yaml"] } cfg-if = "^1.0.0" -serde = "^1.0.204" -serde_derive = "^1.0.204" +serde = "^1.0.218" +serde_derive = "^1.0.218" serde_yaml = { package = "serde_yaml_ng", version = "^0.10.0" } json = "^0" futures-util = { version = "^0", default-features = false, features = [ @@ -91,10 +91,10 @@ rpassword = "^7" hostname = "^0" stop-token = { version = "^0", default-features = false } sysinfo = { version = "^0.30.13", default-features = false } -wg = { version = "^0.9.1", features = ["future"] } +wg = { version = "^0.9.2", features = ["future"] } tracing-flame = { version = "0.2.0", optional = true } -time = { version = "0.3.36", features = ["local-offset"] } -chrono = "0.4.38" +time = { version = "0.3.38", features = ["local-offset"] } +chrono = "0.4.40" [target.'cfg(windows)'.dependencies] windows-service = "^0" @@ -108,10 +108,10 @@ nix = "^0.29.0" tracing-perfetto = { version = "0.1.5", optional = true } [target.'cfg(target_os = "linux")'.dependencies] -tracing-journald = "^0.3.0" +tracing-journald = "^0.3.1" [dev-dependencies] -serial_test = "^3.1.1" +serial_test = "^3.2.0" [lints] workspace = true diff --git a/veilid-tools/Cargo.toml b/veilid-tools/Cargo.toml index 1da65745..2ccbd97c 100644 --- a/veilid-tools/Cargo.toml +++ b/veilid-tools/Cargo.toml @@ -68,66 +68,65 @@ virtual-router-bin = [ ] [dependencies] -tracing = { version = "0.1.40", features = [ +tracing = { version = "0.1.41", features = [ "log", "attributes", ], optional = true } -tracing-subscriber = { version = "0.3.18", features = [ +tracing-subscriber = { version = "0.3.19", features = [ "env-filter", "time", ], optional = true } -log = { version = "0.4.22" } +log = { version = "0.4.26" } eyre = "0.6.12" static_assertions = "1.1.0" -serde = { version = "1.0.214", features = ["derive", "rc"] } -postcard = { version = "1.0.10", features = ["use-std"] } +serde = { version = "1.0.218", features = ["derive", "rc"] } +postcard = { version = "1.1.1", features = ["use-std"] } cfg-if = "1.0.0" -thiserror = "1.0.63" -futures-util = { version = "0.3.30", default-features = false, features = [ +thiserror = "1.0.69" +futures-util = { version = "0.3.31", default-features = false, features = [ "alloc", ] } futures_codec = "0.4.1" parking_lot = "0.12.3" async-lock = "3.4.0" -once_cell = "1.19.0" +once_cell = "1.20.3" stop-token = { version = "0.7.0", default-features = false } rand = "0.8.5" rand_core = "0.6.4" -backtrace = "0.3.71" +backtrace = "^0.3.71" fn_name = "0.1.0" range-set-blaze = "0.1.16" -flume = { version = "0.11.0", features = ["async"] } +flume = { version = "0.11.1", features = ["async"] } imbl = { version = "3.0.0", features = ["serde"] } - # Dependencies for native builds only # Linux, Windows, Mac, iOS, Android [target.'cfg(not(all(target_arch = "wasm32", target_os = "unknown")))'.dependencies] async-io = { version = "1.13.0" } -async-std = { version = "1.12.0", features = ["unstable"], optional = true } +async-std = { version = "1.13.0", features = ["unstable"], optional = true } bugsalot = { package = "veilid-bugsalot", version = "0.2.0", optional = true } -time = { version = "0.3.36", features = [ +time = { version = "0.3.38", features = [ "local-offset", "formatting", ], optional = true } -chrono = "0.4.38" +chrono = "0.4.40" ctrlc = "^3" -futures-util = { version = "0.3.30", default-features = false, features = [ +futures-util = { version = "0.3.31", default-features = false, features = [ "async-await", "sink", "std", "io", ] } indent = { version = "0.1.1", optional = true } -libc = "0.2.155" +libc = "0.2.170" nix = { version = "0.27.1", features = ["user"] } -socket2 = { version = "0.5.7", features = ["all"] } -tokio = { version = "1.38.1", features = ["full"], optional = true } -tokio-util = { version = "0.7.11", features = ["compat"], optional = true } -tokio-stream = { version = "0.1.15", features = ["net"], optional = true } +socket2 = { version = "0.5.8", features = ["all"] } +tokio = { version = "1.43.0", features = ["full"], optional = true } +tokio-util = { version = "0.7.13", features = ["compat"], optional = true } +tokio-stream = { version = "0.1.17", features = ["net"], optional = true } ws_stream_tungstenite = { version = "0.14.0", optional = true } -async-tungstenite = { version = "0.28.0", optional = true } +async-tungstenite = { version = "0.28.2", optional = true } clap = { version = "4", features = ["derive"], optional = true } ipnet = { version = "2", features = ["serde"], optional = true } serde_yaml = { package = "serde_yaml_ng", version = "^0.10.0", optional = true } @@ -136,9 +135,9 @@ rand_chacha = { version = "0.3.1", optional = true } # Dependencies for WASM builds only [target.'cfg(all(target_arch = "wasm32", target_os = "unknown"))'.dependencies] -wasm-bindgen = "0.2.92" -js-sys = "0.3.70" -wasm-bindgen-futures = "0.4.42" +wasm-bindgen = "0.2.100" +js-sys = "0.3.77" +wasm-bindgen-futures = "0.4.50" async_executors = { version = "0.7.0", default-features = false } getrandom = { version = "0.2", features = ["js"] } ws_stream_wasm = { version = "0.7.4", optional = true } @@ -184,7 +183,7 @@ serial_test = { version = "2.0.0", default-features = false, features = [ "async", ] } console_error_panic_hook = "0.1.7" -wasm-bindgen-test = "0.3.42" +wasm-bindgen-test = "0.3.50" wee_alloc = "0.4.5" wasm-logger = "0.2.0" tracing-wasm = { version = "0.2.1" } diff --git a/veilid-tools/src/assembly_buffer.rs b/veilid-tools/src/assembly_buffer.rs index 339a9ae2..23a5d948 100644 --- a/veilid-tools/src/assembly_buffer.rs +++ b/veilid-tools/src/assembly_buffer.rs @@ -14,9 +14,6 @@ type SequenceType = u16; const HEADER_LEN: usize = 8; const MAX_LEN: usize = LengthType::MAX as usize; -// XXX: keep statistics on all drops and why we dropped them -// XXX: move to config eventually? - /// The hard-coded maximum fragment size used by AssemblyBuffer /// /// Eventually this should parameterized and made configurable. @@ -119,7 +116,7 @@ impl PeerMessages { let mut assembly = MessageAssembly { timestamp, seq, - data: vec![0u8; len as usize], + data: unsafe { unaligned_u8_vec_uninit(len as usize) }, parts: RangeSetBlaze::from_iter([part_start..=part_end]), }; assembly.data[part_start as usize..=part_end as usize].copy_from_slice(chunk); @@ -229,6 +226,7 @@ struct AssemblyBufferUnlockedInner { /// * No sequencing of packets. Packets may still be delivered to the application out of order, but this guarantees that only whole packets will be delivered if all of their fragments are received. #[derive(Clone)] +#[must_use] pub struct AssemblyBuffer { inner: Arc>, unlocked_inner: Arc, @@ -247,7 +245,6 @@ impl AssemblyBuffer { } } - #[must_use] pub fn new() -> Self { Self { inner: Arc::new(Mutex::new(Self::new_inner())), From 9e79c03718fdb728d3af44f955459a561ff7a163 Mon Sep 17 00:00:00 2001 From: Brandon Vandegrift <798832-bmv437@users.noreply.gitlab.com> Date: Thu, 6 Mar 2025 10:42:38 -0500 Subject: [PATCH 11/17] Remove wee_alloc due to memory leak issues --- Cargo.lock | 21 --------------------- veilid-core/Cargo.toml | 1 - veilid-core/tests/web.rs | 4 ---- veilid-tools/Cargo.toml | 1 - veilid-tools/tests/web.rs | 4 ---- veilid-wasm/Cargo.toml | 1 - veilid-wasm/src/lib.rs | 5 ----- 7 files changed, 37 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 815eb980..f1b9df07 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3287,12 +3287,6 @@ dependencies = [ "autocfg", ] -[[package]] -name = "memory_units" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8452105ba047068f40ff7093dd1d9da90898e63dd61736462e9cdda6a90ad3c3" - [[package]] name = "mime" version = "0.3.17" @@ -6503,7 +6497,6 @@ dependencies = [ "web-sys", "webpki", "webpki-roots 0.25.4", - "wee_alloc", "winapi", "windows 0.51.1", "windows-permissions", @@ -6694,7 +6687,6 @@ dependencies = [ "wasm-bindgen-futures", "wasm-bindgen-test", "wasm-logger", - "wee_alloc", "winapi", "ws_stream_tungstenite", "ws_stream_wasm", @@ -6736,7 +6728,6 @@ dependencies = [ "wasm-bindgen", "wasm-bindgen-futures", "wasm-bindgen-test", - "wee_alloc", ] [[package]] @@ -6944,18 +6935,6 @@ version = "0.25.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" -[[package]] -name = "wee_alloc" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbb3b5a6b2bb17cb6ad44a2e68a43e8d2722c997da10e928665c72ec6c0a0b8e" -dependencies = [ - "cfg-if 0.1.10", - "libc", - "memory_units", - "winapi", -] - [[package]] name = "wg" version = "0.9.2" diff --git a/veilid-core/Cargo.toml b/veilid-core/Cargo.toml index 5aefc1ee..df969792 100644 --- a/veilid-core/Cargo.toml +++ b/veilid-core/Cargo.toml @@ -271,7 +271,6 @@ serial_test = { version = "2.0.0", default-features = false, features = [ ] } wasm-bindgen-test = "0.3.50" console_error_panic_hook = "0.1.7" -wee_alloc = "0.4.5" wasm-logger = "0.2.0" ### BUILD OPTIONS diff --git a/veilid-core/tests/web.rs b/veilid-core/tests/web.rs index 1fe84138..2bbfbac5 100644 --- a/veilid-core/tests/web.rs +++ b/veilid-core/tests/web.rs @@ -10,10 +10,6 @@ use wasm_bindgen_test::*; wasm_bindgen_test_configure!(run_in_browser); -extern crate wee_alloc; -#[global_allocator] -static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT; - static SETUP_ONCE: Once = Once::new(); pub fn setup() -> () { SETUP_ONCE.call_once(|| { diff --git a/veilid-tools/Cargo.toml b/veilid-tools/Cargo.toml index 2ccbd97c..60a4fde7 100644 --- a/veilid-tools/Cargo.toml +++ b/veilid-tools/Cargo.toml @@ -184,7 +184,6 @@ serial_test = { version = "2.0.0", default-features = false, features = [ ] } console_error_panic_hook = "0.1.7" wasm-bindgen-test = "0.3.50" -wee_alloc = "0.4.5" wasm-logger = "0.2.0" tracing-wasm = { version = "0.2.1" } diff --git a/veilid-tools/tests/web.rs b/veilid-tools/tests/web.rs index 26de66e7..f54309fd 100644 --- a/veilid-tools/tests/web.rs +++ b/veilid-tools/tests/web.rs @@ -9,10 +9,6 @@ use wasm_bindgen_test::*; wasm_bindgen_test_configure!(run_in_browser); -extern crate wee_alloc; -#[global_allocator] -static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT; - static SETUP_ONCE: Once = Once::new(); pub fn setup() -> () { SETUP_ONCE.call_once(|| { diff --git a/veilid-wasm/Cargo.toml b/veilid-wasm/Cargo.toml index f032e59e..55aa92cb 100644 --- a/veilid-wasm/Cargo.toml +++ b/veilid-wasm/Cargo.toml @@ -28,7 +28,6 @@ tracing-subscriber = "^0" wasm-bindgen = { version = "^0", features = ["serde-serialize"] } console_error_panic_hook = "^0" -wee_alloc = "^0" cfg-if = "^1" wasm-bindgen-futures = "^0" js-sys = "^0" diff --git a/veilid-wasm/src/lib.rs b/veilid-wasm/src/lib.rs index 595bc65e..35bbc638 100644 --- a/veilid-wasm/src/lib.rs +++ b/veilid-wasm/src/lib.rs @@ -35,11 +35,6 @@ pub mod veilid_table_db_js; mod wasm_helpers; use wasm_helpers::*; -// Allocator -extern crate wee_alloc; -#[global_allocator] -static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT; - // API Singleton lazy_static! { static ref VEILID_API: SendWrapper>> = From bd5492404b9600a82bf63d713d48f94edf335e99 Mon Sep 17 00:00:00 2001 From: Christien Rioux Date: Thu, 6 Mar 2025 14:29:45 -0500 Subject: [PATCH 12/17] fix python async context --- veilid-python/veilid/api.py | 59 ++++++++++++++++++++++++++++---- veilid-python/veilid/json_api.py | 20 ++++++++--- 2 files changed, 67 insertions(+), 12 deletions(-) diff --git a/veilid-python/veilid/api.py b/veilid-python/veilid/api.py index de043b67..b54786de 100644 --- a/veilid-python/veilid/api.py +++ b/veilid-python/veilid/api.py @@ -6,11 +6,20 @@ from .state import VeilidState class RoutingContext(ABC): + ref_count: int + + def __init__( + self, + ): + self.ref_count = 0 + async def __aenter__(self) -> Self: + self.ref_count += 1 return self async def __aexit__(self, *excinfo): - if not self.is_done(): + self.ref_count -= 1 + if self.ref_count == 0 and not self.is_done(): await self.release() @abstractmethod @@ -109,13 +118,22 @@ class RoutingContext(ABC): class TableDbTransaction(ABC): + ref_count: int + + def __init__( + self, + ): + self.ref_count = 0 + async def __aenter__(self) -> Self: + self.ref_count += 1 return self async def __aexit__(self, *excinfo): - if not self.is_done(): - await self.rollback() - + self.ref_count -= 1 + if self.ref_count == 0 and not self.is_done(): + await self.release() + @abstractmethod def is_done(self) -> bool: pass @@ -138,11 +156,20 @@ class TableDbTransaction(ABC): class TableDb(ABC): + ref_count: int + + def __init__( + self, + ): + self.ref_count = 0 + async def __aenter__(self) -> Self: + self.ref_count += 1 return self async def __aexit__(self, *excinfo): - if not self.is_done(): + self.ref_count -= 1 + if self.ref_count == 0 and not self.is_done(): await self.release() @abstractmethod @@ -179,11 +206,20 @@ class TableDb(ABC): class CryptoSystem(ABC): + ref_count: int + + def __init__( + self, + ): + self.ref_count = 0 + async def __aenter__(self) -> Self: + self.ref_count += 1 return self async def __aexit__(self, *excinfo): - if not self.is_done(): + self.ref_count -= 1 + if self.ref_count == 0 and not self.is_done(): await self.release() @abstractmethod @@ -306,11 +342,20 @@ class CryptoSystem(ABC): class VeilidAPI(ABC): + ref_count: int + + def __init__( + self, + ): + self.ref_count = 0 + async def __aenter__(self) -> Self: + self.ref_count += 1 return self async def __aexit__(self, *excinfo): - if not self.is_done(): + self.ref_count -= 1 + if self.ref_count == 0 and not self.is_done(): await self.release() @abstractmethod diff --git a/veilid-python/veilid/json_api.py b/veilid-python/veilid/json_api.py index 5969c5ea..7b75d6ed 100644 --- a/veilid-python/veilid/json_api.py +++ b/veilid-python/veilid/json_api.py @@ -99,6 +99,8 @@ class _JsonVeilidAPI(VeilidAPI): update_callback: Callable[[VeilidUpdate], Awaitable], validate_schema: bool = True, ): + super().__init__() + self.reader = reader self.writer = writer self.update_callback = update_callback @@ -308,7 +310,7 @@ class _JsonVeilidAPI(VeilidAPI): # Validate if we have a validator if response["op"] != req["op"]: - raise ValueError("Response op does not match request op") + raise ValueError(f"Response op does not match request op: {response['op']} != {req['op']}") if validate is not None: validate(req, response) @@ -459,7 +461,7 @@ class _JsonVeilidAPI(VeilidAPI): def validate_rc_op(request: dict, response: dict): if response["rc_op"] != request["rc_op"]: - raise ValueError("Response rc_op does not match request rc_op") + raise ValueError(f"Response rc_op does not match request rc_op: {response["rc_op"]} != {request["rc_op"]}") class _JsonRoutingContext(RoutingContext): @@ -468,6 +470,8 @@ class _JsonRoutingContext(RoutingContext): done: bool def __init__(self, api: _JsonVeilidAPI, rc_id: int): + super().__init__() + self.api = api self.rc_id = rc_id self.done = False @@ -728,7 +732,7 @@ class _JsonRoutingContext(RoutingContext): def validate_tx_op(request: dict, response: dict): if response["tx_op"] != request["tx_op"]: - raise ValueError("Response tx_op does not match request tx_op") + raise ValueError(f"Response tx_op does not match request tx_op: {response['tx_op']} != {request['tx_op']}") class _JsonTableDbTransaction(TableDbTransaction): @@ -737,6 +741,8 @@ class _JsonTableDbTransaction(TableDbTransaction): done: bool def __init__(self, api: _JsonVeilidAPI, tx_id: int): + super().__init__() + self.api = api self.tx_id = tx_id self.done = False @@ -810,7 +816,7 @@ class _JsonTableDbTransaction(TableDbTransaction): def validate_db_op(request: dict, response: dict): if response["db_op"] != request["db_op"]: - raise ValueError("Response db_op does not match request db_op") + raise ValueError(f"Response db_op does not match request db_op: {response['db_op']} != {request['db_op']}") class _JsonTableDb(TableDb): @@ -819,6 +825,8 @@ class _JsonTableDb(TableDb): done: bool def __init__(self, api: _JsonVeilidAPI, db_id: int): + super().__init__() + self.api = api self.db_id = db_id self.done = False @@ -929,7 +937,7 @@ class _JsonTableDb(TableDb): def validate_cs_op(request: dict, response: dict): if response["cs_op"] != request["cs_op"]: - raise ValueError("Response cs_op does not match request cs_op") + raise ValueError(f"Response cs_op does not match request cs_op: {response['cs_op']} != {request['cs_op']}") class _JsonCryptoSystem(CryptoSystem): @@ -938,6 +946,8 @@ class _JsonCryptoSystem(CryptoSystem): done: bool def __init__(self, api: _JsonVeilidAPI, cs_id: int): + super().__init__() + self.api = api self.cs_id = cs_id self.done = False From 67eeb87c280aa6a39cc196c371dc3d4c94c70f70 Mon Sep 17 00:00:00 2001 From: Christien Rioux Date: Tue, 11 Mar 2025 09:31:15 -0400 Subject: [PATCH 13/17] validate types for python api calls add more stress tests to python fix deadlock in veilid_api duration testing correct offline_subkey_writes inflight reporting --- veilid-core/src/logging/api_tracing_layer.rs | 28 +- veilid-core/src/storage_manager/mod.rs | 105 ++++++-- veilid-python/tests/test_dht.py | 257 ++++++++++++++++--- veilid-python/veilid/json_api.py | 165 ++++++++++++ veilid-python/veilid/types.py | 34 ++- 5 files changed, 499 insertions(+), 90 deletions(-) diff --git a/veilid-core/src/logging/api_tracing_layer.rs b/veilid-core/src/logging/api_tracing_layer.rs index 812e8d08..c2498a64 100644 --- a/veilid-core/src/logging/api_tracing_layer.rs +++ b/veilid-core/src/logging/api_tracing_layer.rs @@ -193,16 +193,13 @@ impl registry::LookupSpan<'a>> Layer for ApiTracingLa attrs.record(&mut new_debug_record); if let Some(span_ref) = ctx.span(id) { - span_ref - .extensions_mut() - .insert::(new_debug_record); + let mut extensions_mut = span_ref.extensions_mut(); + extensions_mut.insert::(new_debug_record); if crate::DURATION_LOG_FACILITIES.contains(&attrs.metadata().target()) { - span_ref - .extensions_mut() - .insert::(SpanDuration { - start: Timestamp::now(), - end: Timestamp::default(), - }); + extensions_mut.insert::(SpanDuration { + start: Timestamp::now(), + end: Timestamp::default(), + }); } } } @@ -213,14 +210,14 @@ impl registry::LookupSpan<'a>> Layer for ApiTracingLa return; } if let Some(span_ref) = ctx.span(&id) { - if let Some(span_duration) = span_ref.extensions_mut().get_mut::() { + let mut extensions_mut = span_ref.extensions_mut(); + if let Some(span_duration) = extensions_mut.get_mut::() { span_duration.end = Timestamp::now(); let duration = span_duration.end.saturating_sub(span_duration.start); let meta = span_ref.metadata(); - let mut extensions = span_ref.extensions_mut(); let log_key = - if let Some(span_ksr) = extensions.get_mut::() { + if let Some(span_ksr) = extensions_mut.get_mut::() { span_ksr.log_key() } else { "" @@ -254,10 +251,9 @@ impl registry::LookupSpan<'a>> Layer for ApiTracingLa return; } if let Some(span_ref) = ctx.span(id) { - if let Some(debug_record) = span_ref - .extensions_mut() - .get_mut::() - { + let mut extensions_mut = span_ref.extensions_mut(); + + if let Some(debug_record) = extensions_mut.get_mut::() { values.record(debug_record); } } diff --git a/veilid-core/src/storage_manager/mod.rs b/veilid-core/src/storage_manager/mod.rs index 722c41b9..4502bb1b 100644 --- a/veilid-core/src/storage_manager/mod.rs +++ b/veilid-core/src/storage_manager/mod.rs @@ -59,6 +59,8 @@ struct StorageManagerInner { pub remote_record_store: Option>, /// Record subkeys that have not been pushed to the network because they were written to offline pub offline_subkey_writes: HashMap, + /// Record subkeys that are currently being written to in the foreground + pub active_subkey_writes: HashMap, /// Storage manager metadata that is persistent, including copy of offline subkey writes pub metadata_db: Option, /// Background processing task (not part of attachment manager tick tree so it happens when detached too) @@ -73,6 +75,7 @@ impl fmt::Debug for StorageManagerInner { .field("local_record_store", &self.local_record_store) .field("remote_record_store", &self.remote_record_store) .field("offline_subkey_writes", &self.offline_subkey_writes) + .field("active_subkey_writes", &self.active_subkey_writes) //.field("metadata_db", &self.metadata_db) //.field("tick_future", &self.tick_future) .finish() @@ -736,7 +739,21 @@ impl StorageManager { ) .await?; - if !self.dht_is_online() { + // Note that we are writing this subkey actively + // If it appears we are already doing this, then put it to the offline queue + let already_writing = { + let asw = inner.active_subkey_writes.entry(key).or_default(); + if asw.contains(subkey) { + veilid_log!(self debug "Already writing to this subkey: {}:{}", key, subkey); + true + } else { + // Add to our list of active subkey writes + asw.insert(subkey); + false + } + }; + + if already_writing || !self.dht_is_online() { veilid_log!(self debug "Writing subkey offline: {}:{} len={}", key, subkey, signed_value_data.value_data().data().len() ); // Add to offline writes to flush Self::add_offline_subkey_write_inner(&mut inner, key, subkey, safety_selection); @@ -764,41 +781,68 @@ impl StorageManager { // Failed to write, try again later let mut inner = self.inner.lock().await; Self::add_offline_subkey_write_inner(&mut inner, key, subkey, safety_selection); + + // Remove from active subkey writes + let asw = inner.active_subkey_writes.get_mut(&key).unwrap(); + if !asw.remove(subkey) { + panic!("missing active subkey write: {}:{}", key, subkey); + } + if asw.is_empty() { + inner.active_subkey_writes.remove(&key); + } return Err(e); } }; - // Wait for the first result - let Ok(result) = res_rx.recv_async().await else { - apibail_internal!("failed to receive results"); + let process = || async { + // Wait for the first result + let Ok(result) = res_rx.recv_async().await else { + apibail_internal!("failed to receive results"); + }; + let result = result?; + let partial = result.fanout_result.kind.is_partial(); + + // Process the returned result + let out = self + .process_outbound_set_value_result( + key, + subkey, + signed_value_data.value_data().clone(), + safety_selection, + result, + ) + .await?; + + // If there's more to process, do it in the background + if partial { + self.process_deferred_outbound_set_value_result( + res_rx, + key, + subkey, + out.clone() + .unwrap_or_else(|| signed_value_data.value_data().clone()), + safety_selection, + ); + } + + Ok(out) }; - let result = result?; - let partial = result.fanout_result.kind.is_partial(); - // Process the returned result - let out = self - .process_outbound_set_value_result( - key, - subkey, - signed_value_data.value_data().clone(), - safety_selection, - result, - ) - .await?; + let out = process().await; - // If there's more to process, do it in the background - if partial { - self.process_deferred_outbound_set_value_result( - res_rx, - key, - subkey, - out.clone() - .unwrap_or_else(|| signed_value_data.value_data().clone()), - safety_selection, - ); + // Remove active subkey write + let mut inner = self.inner.lock().await; + + // Remove from active subkey writes + let asw = inner.active_subkey_writes.get_mut(&key).unwrap(); + if !asw.remove(subkey) { + panic!("missing active subkey write: {}:{}", key, subkey); + } + if asw.is_empty() { + inner.active_subkey_writes.remove(&key); } - Ok(out) + out } /// Create,update or cancel an outbound watch to a DHT value @@ -1019,11 +1063,18 @@ impl StorageManager { ); // Get the offline subkeys for this record still only returning the ones we're inspecting + // Merge in the currently offline in-flight records and the actively written records as well + let active_subkey_writes = inner + .active_subkey_writes + .get(&key) + .cloned() + .unwrap_or_default(); let offline_subkey_writes = inner .offline_subkey_writes .get(&key) .map(|o| o.subkeys.union(&o.subkeys_in_flight)) .unwrap_or_default() + .union(&active_subkey_writes) .intersect(&subkeys); // If this is the maximum scope we're interested in, return the report diff --git a/veilid-python/tests/test_dht.py b/veilid-python/tests/test_dht.py index ce02662b..7cd46c62 100644 --- a/veilid-python/tests/test_dht.py +++ b/veilid-python/tests/test_dht.py @@ -1,6 +1,6 @@ # Routing context veilid tests -from typing import Awaitable, Callable +from typing import Any, Awaitable, Callable, Optional import pytest import asyncio import time @@ -374,13 +374,13 @@ async def test_inspect_dht_record(api_connection: veilid.VeilidAPI): rr = await rc.inspect_dht_record(rec.key, [], veilid.DHTReportScope.LOCAL) print("rr: {}", rr.__dict__) - assert rr.subkeys == [[0,1]] + assert rr.subkeys == [(0,1)] assert rr.local_seqs == [0, 0xFFFFFFFF] assert rr.network_seqs == [] rr2 = await rc.inspect_dht_record(rec.key, [], veilid.DHTReportScope.SYNC_GET) print("rr2: {}", rr2.__dict__) - assert rr2.subkeys == [[0,1]] + assert rr2.subkeys == [(0,1)] assert rr2.local_seqs == [0, 0xFFFFFFFF] assert rr2.network_seqs == [0, 0xFFFFFFFF] @@ -390,42 +390,28 @@ async def test_inspect_dht_record(api_connection: veilid.VeilidAPI): -async def _run_test_schema_limit(api_connection: veilid.VeilidAPI, open_record: Callable[[veilid.RoutingContext, int], Awaitable[tuple[veilid.TypedKey, veilid.PublicKey, veilid.SecretKey]]], count: int, test_data: bytes, ): +async def _run_test_schema_limit(api_connection: veilid.VeilidAPI, open_record: Callable[[veilid.RoutingContext, int], Awaitable[tuple[veilid.DHTRecordDescriptor, Optional[veilid.KeyPair]]]], count: int, test_data: bytes): rc = await api_connection.new_routing_context() async with rc: - (key, owner, secret) = await open_record(rc, count) - print(f'{key} {owner}:{secret}') + (desc, writer) = await open_record(rc, count) + print(f'{desc.key} {writer}') # write dht records on server 0 records = [] print(f'writing {count} subkeys') for n in range(count): - await rc.set_dht_value(key, ValueSubkey(n), test_data) + await rc.set_dht_value(desc.key, ValueSubkey(n), test_data) print(f' {n}') - print('syncing records to the network') + await sync(rc, [desc]) - while True: - donerecords = set() - subkeysleft = 0 - - rr = await rc.inspect_dht_record(key, []) - left = 0; [left := left + (x[1]-x[0]+1) for x in rr.offline_subkeys] - if left == 0: - break - print(f' {left} subkeys left') - time.sleep(1) - - await rc.close_dht_record(key) - - await api_connection.debug("record purge local") - await api_connection.debug("record purge remote") + await rc.close_dht_record(desc.key) # read dht records on server 0 print(f'reading {count} subkeys') - desc1 = await rc.open_dht_record(key) + desc1 = await rc.open_dht_record(desc.key) for n in range(count): - vd0 = await rc.get_dht_value(key, ValueSubkey(n), force_refresh=True) + vd0 = await rc.get_dht_value(desc1.key, ValueSubkey(n)) assert vd0.data == test_data print(f' {n}') @@ -433,10 +419,10 @@ async def _run_test_schema_limit(api_connection: veilid.VeilidAPI, open_record: @pytest.mark.asyncio async def test_schema_limit_dflt(api_connection: veilid.VeilidAPI): - async def open_record(rc: veilid.RoutingContext, count: int) -> tuple[veilid.TypedKey, veilid.PublicKey, veilid.SecretKey]: + async def open_record(rc: veilid.RoutingContext, count: int) -> tuple[veilid.DHTRecordDescriptor, Optional[veilid.KeyPair]]: schema = veilid.DHTSchema.dflt(count) desc = await rc.create_dht_record(schema) - return (desc.key, desc.owner, desc.owner_secret) + return (desc, desc.owner_key_pair()) print("Test with maximum number of subkeys before lower limit hit") @@ -474,7 +460,7 @@ async def test_schema_limit_smpl(api_connection: veilid.VeilidAPI): desc = await rc.create_dht_record(schema) await rc.open_dht_record(desc.key, writer_keypair) - return (desc.key, writer_keypair.key(), writer_keypair.secret()) + return (desc, writer_keypair) print("Test with maximum number of subkeys before lower limit hit") TEST_DATA = b"A" * 32768 @@ -545,18 +531,7 @@ async def test_dht_integration_writer_reader(): await rc0.set_dht_value(desc.key, ValueSubkey(0), TEST_DATA) - print('syncing records to the network') - recleft = len(records) - for desc in records: - while True: - rr = await rc0.inspect_dht_record(desc.key, []) - left = 0; [left := left + (x[1]-x[0]+1) for x in rr.offline_subkeys] - if left == 0: - await rc0.close_dht_record(desc.key) - break - print(f' {recleft} records {left} subkeys left') - time.sleep(0.1) - recleft-=1 + await sync(rc0, records) # read dht records on server 1 print(f'reading {COUNT} records') @@ -636,6 +611,96 @@ async def test_dht_write_read_local(): print(f' {n}') n += 1 + +@pytest.mark.skipif(os.getenv("STRESS") != "1", reason="stress test takes a long time") +@pytest.mark.asyncio +async def test_dht_write_read_full_subkeys_local(): + + async def null_update_callback(update: veilid.VeilidUpdate): + pass + + try: + api0 = await veilid.api_connector(null_update_callback, 0) + except veilid.VeilidConnectionError: + pytest.skip("Unable to connect to veilid-server 0.") + + async with api0: + # purge local and remote record stores to ensure we start fresh + await api0.debug("record purge local") + await api0.debug("record purge remote") + + # make routing contexts + rc0 = await api0.new_routing_context() + async with rc0: + + # Number of records + COUNT = 8 + # Number of subkeys per record + SUBKEY_COUNT = 32 + # Nonce to encrypt test data + NONCE = veilid.Nonce.from_bytes(b"A"*24) + # Secret to encrypt test data + SECRET = veilid.SharedSecret.from_bytes(b"A"*32) + # Max subkey size + MAX_SUBKEY_SIZE = min(32768, 1024*1024/SUBKEY_COUNT) + # MAX_SUBKEY_SIZE = 256 + + # write dht records on server 0 + records = [] + subkey_data_list = [] + schema = veilid.DHTSchema.dflt(SUBKEY_COUNT) + print(f'writing {COUNT} records with full subkeys') + init_futures = set() + for n in range(COUNT): + + # Make encrypted data that is consistent and hard to compress + subkey_data = bytes(chr(ord("A")+n)*MAX_SUBKEY_SIZE, 'ascii') + print(f"subkey_data({n}):len={len(subkey_data)}") + + cs = await api0.best_crypto_system() + async with cs: + subkey_data = await cs.crypt_no_auth(subkey_data, NONCE, SECRET) + subkey_data_list.append(subkey_data) + + + desc = await rc0.create_dht_record(schema) + records.append(desc) + + for i in range(SUBKEY_COUNT): + init_futures.add(rc0.set_dht_value(desc.key, ValueSubkey(i), subkey_data)) + + print(f' {n}: {desc.key} {desc.owner}:{desc.owner_secret}') + + # Wait for all records to synchronize, with progress bars + await sync_win(rc0, records, SUBKEY_COUNT, init_futures) + + for desc0 in records: + await rc0.close_dht_record(desc0.key) + + await api0.debug("record purge local") + await api0.debug("record purge remote") + + # read dht records on server 0 + print(f'reading {COUNT} records') + for n, desc0 in enumerate(records): + desc1 = await rc0.open_dht_record(desc0.key) + + for i in range(SUBKEY_COUNT): + vd0 = None + while vd0 == None: + vd0 = await rc0.get_dht_value(desc1.key, ValueSubkey(i), force_refresh=True) + if vd0 != None: + assert vd0.data == subkey_data_list[n] + break + time.sleep(1) + print(f"retrying record {n} subkey {i}") + + + await rc0.close_dht_record(desc1.key) + + print(f' {n}') + + async def sync(rc: veilid.RoutingContext, records: list[veilid.DHTRecordDescriptor]): print('syncing records to the network') syncrecords = records.copy() @@ -646,9 +711,119 @@ async def sync(rc: veilid.RoutingContext, records: list[veilid.DHTRecordDescript rr = await rc.inspect_dht_record(desc.key, []) left = 0; [left := left + (x[1]-x[0]+1) for x in rr.offline_subkeys] if left == 0: - donerecords.add(desc) + if veilid.ValueSeqNum.NONE not in rr.local_seqs: + donerecords.add(desc) else: subkeysleft += left syncrecords = [x for x in syncrecords if x not in donerecords] print(f' {len(syncrecords)} records {subkeysleft} subkeys left') time.sleep(1) + + +async def sync_win( + rc: veilid.RoutingContext, + records: list[veilid.DHTRecordDescriptor], + subkey_count: int, + init_futures: set[Awaitable[Any]] + ): + import curses + + screen = curses.initscr() + + curses.start_color() + curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_BLUE) + curses.init_pair(2, curses.COLOR_BLACK, curses.COLOR_CYAN) + curses.init_pair(3, curses.COLOR_BLACK, curses.COLOR_YELLOW) + curses.init_pair(4, curses.COLOR_BLACK, curses.COLOR_GREEN) + + HEIGHT=len(records) + 3 + GRAPHWIDTH = subkey_count + WIDTH=GRAPHWIDTH + 4 + 1 + 43 + 2 + + cur_lines = curses.LINES + cur_cols = curses.COLS + win = curses.newwin(HEIGHT, WIDTH, + max(0, int(cur_lines/2) - int(HEIGHT/2)), + max(0, int(cur_cols/2) - int(WIDTH/2))) + win.clear() + win.border(0,0,0,0) + win.nodelay(True) + + # Record inspection and completion state + + # Records we are done inspecting and have finished sync + donerecords: set[veilid.TypedKey] = set() + # Records we are currently inspecting that are in the futures set + futurerecords: set[veilid.TypedKey] = set() + # All the futures we are waiting for + futures = set() + # The record report state + recordreports: dict[veilid.TypedKey, veilid.DHTRecordReport] = dict() + + # Add initial futures with None key + for fut in init_futures: + async def _do_init_fut(fut): + return (None, await fut) + futures.add(asyncio.create_task(_do_init_fut(fut))) + + # Loop until all records are completed + while len(donerecords) != len(records): + + # Update the futures with inspects for unfinished records + for n, desc in enumerate(records): + if desc.key in donerecords or desc.key in futurerecords: + continue + async def _do_inspect(key: veilid.TypedKey): + return (key, await rc.inspect_dht_record(key, [])) + futures.add(asyncio.create_task(_do_inspect(desc.key))) + futurerecords.add(desc.key) + + # Wait for some futures to finish + done, futures = await asyncio.wait(futures, return_when = asyncio.FIRST_COMPLETED) + + # Process finished futures into the state + for rr_fut in done: + key: veilid.TypedKey + rr: veilid.DHTRecordReport + key, rr = await rr_fut + if key is not None: + futurerecords.remove(key) + + if len(rr.subkeys) == 1 and rr.subkeys[0] == (0, subkey_count-1) and veilid.ValueSeqNum.NONE not in rr.local_seqs and len(rr.offline_subkeys) == 0: + if key in recordreports: + del recordreports[key] + donerecords.add(key) + else: + recordreports[key] = rr + + # Re-render the state + if cur_lines != curses.LINES or cur_cols != curses.COLS: + cur_lines = curses.LINES + cur_cols = curses.COLS + win.move( + max(0, int(cur_lines/2) - int(HEIGHT/2)), + max(0, int(cur_cols/2) - int(WIDTH/2))) + win.border(0,0,0,0) + win.addstr(1, 1, "syncing records to the network", curses.color_pair(0)) + for n, rr in enumerate(records): + key = rr.key + win.addstr(n+2, GRAPHWIDTH+1, key, curses.color_pair(0)) + + if key in donerecords: + win.addstr(n+2, 1, " " * subkey_count, curses.color_pair(4)) + elif key in recordreports: + rr = recordreports[key] + win.addstr(n+2, 1, " " * subkey_count, curses.color_pair(1)) + for (a,b) in rr.subkeys: + for m in range(a, b+1): + if rr.local_seqs[m] != veilid.ValueSeqNum.NONE: + win.addstr(n+2, m+1, " ", curses.color_pair(2)) + for (a,b) in rr.offline_subkeys: + win.addstr(n+2, a+1, " " * (b-a+1), curses.color_pair(3)) + else: + win.addstr(n+2, 1, " " * subkey_count, curses.color_pair(1)) + + win.refresh() + + curses.endwin() + diff --git a/veilid-python/veilid/json_api.py b/veilid-python/veilid/json_api.py index 7b75d6ed..0a88051c 100644 --- a/veilid-python/veilid/json_api.py +++ b/veilid-python/veilid/json_api.py @@ -338,6 +338,12 @@ class _JsonVeilidAPI(VeilidAPI): async def new_custom_private_route( self, kinds: list[CryptoKind], stability: Stability, sequencing: Sequencing ) -> tuple[RouteId, bytes]: + assert isinstance(kinds, list) + for k in kinds: + assert isinstance(k, CryptoKind) + assert isinstance(stability, Stability) + assert isinstance(sequencing, Sequencing) + return NewPrivateRouteResult.from_json( raise_api_result( await self.send_ndjson_request( @@ -350,6 +356,8 @@ class _JsonVeilidAPI(VeilidAPI): ).to_tuple() async def import_remote_private_route(self, blob: bytes) -> RouteId: + assert isinstance(blob, bytes) + return RouteId( raise_api_result( await self.send_ndjson_request(Operation.IMPORT_REMOTE_PRIVATE_ROUTE, blob=blob) @@ -357,11 +365,16 @@ class _JsonVeilidAPI(VeilidAPI): ) async def release_private_route(self, route_id: RouteId): + assert isinstance(route_id, RouteId) + raise_api_result( await self.send_ndjson_request(Operation.RELEASE_PRIVATE_ROUTE, route_id=route_id) ) async def app_call_reply(self, call_id: OperationId, message: bytes): + assert isinstance(call_id, OperationId) + assert isinstance(message, bytes) + raise_api_result( await self.send_ndjson_request( Operation.APP_CALL_REPLY, call_id=call_id, message=message @@ -373,6 +386,9 @@ class _JsonVeilidAPI(VeilidAPI): return _JsonRoutingContext(self, rc_id) async def open_table_db(self, name: str, column_count: int) -> TableDb: + assert isinstance(name, str) + assert isinstance(column_count, int) + db_id = raise_api_result( await self.send_ndjson_request( Operation.OPEN_TABLE_DB, name=name, column_count=column_count @@ -381,11 +397,15 @@ class _JsonVeilidAPI(VeilidAPI): return _JsonTableDb(self, db_id) async def delete_table_db(self, name: str) -> bool: + assert isinstance(name, str) + return raise_api_result( await self.send_ndjson_request(Operation.DELETE_TABLE_DB, name=name) ) async def get_crypto_system(self, kind: CryptoKind) -> CryptoSystem: + assert isinstance(kind, CryptoKind) + cs_id = raise_api_result( await self.send_ndjson_request(Operation.GET_CRYPTO_SYSTEM, kind=kind) ) @@ -398,6 +418,13 @@ class _JsonVeilidAPI(VeilidAPI): async def verify_signatures( self, node_ids: list[TypedKey], data: bytes, signatures: list[TypedSignature] ) -> Optional[list[TypedKey]]: + assert isinstance(node_ids, list) + for ni in node_ids: + assert isinstance(ni, TypedKey) + assert isinstance(data, bytes) + for sig in signatures: + assert isinstance(sig, TypedSignature) + out = raise_api_result( await self.send_ndjson_request( Operation.VERIFY_SIGNATURES, @@ -418,6 +445,11 @@ class _JsonVeilidAPI(VeilidAPI): async def generate_signatures( self, data: bytes, key_pairs: list[TypedKeyPair] ) -> list[TypedSignature]: + assert isinstance(data, bytes) + assert isinstance(key_pairs, list) + for kp in key_pairs: + assert isinstance(kp, TypedKeyPair) + return list( map( lambda x: TypedSignature(x), @@ -430,6 +462,8 @@ class _JsonVeilidAPI(VeilidAPI): ) async def generate_key_pair(self, kind: CryptoKind) -> list[TypedKeyPair]: + assert isinstance(kind, CryptoKind) + return list( map( lambda x: TypedKeyPair(x), @@ -443,6 +477,7 @@ class _JsonVeilidAPI(VeilidAPI): return Timestamp(raise_api_result(await self.send_ndjson_request(Operation.NOW))) async def debug(self, command: str) -> str: + assert isinstance(command, str) return raise_api_result(await self.send_ndjson_request(Operation.DEBUG, command=command)) async def veilid_version_string(self) -> str: @@ -501,6 +536,8 @@ class _JsonRoutingContext(RoutingContext): self.done = True async def with_default_safety(self, release=True) -> Self: + assert isinstance(release, bool) + new_rc_id = raise_api_result( await self.api.send_ndjson_request( Operation.ROUTING_CONTEXT, @@ -514,6 +551,9 @@ class _JsonRoutingContext(RoutingContext): return self.__class__(self.api, new_rc_id) async def with_safety(self, safety_selection: SafetySelection, release=True) -> Self: + assert isinstance(safety_selection, SafetySelection) + assert isinstance(release, bool) + new_rc_id = raise_api_result( await self.api.send_ndjson_request( Operation.ROUTING_CONTEXT, @@ -528,6 +568,9 @@ class _JsonRoutingContext(RoutingContext): return self.__class__(self.api, new_rc_id) async def with_sequencing(self, sequencing: Sequencing, release=True) -> Self: + assert isinstance(sequencing, Sequencing) + assert isinstance(release, bool) + new_rc_id = raise_api_result( await self.api.send_ndjson_request( Operation.ROUTING_CONTEXT, @@ -555,6 +598,9 @@ class _JsonRoutingContext(RoutingContext): ) ) async def app_call(self, target: TypedKey | RouteId, message: bytes) -> bytes: + assert isinstance(target, TypedKey) or isinstance(target, RouteId) + assert isinstance(message, bytes) + return urlsafe_b64decode_no_pad( raise_api_result( await self.api.send_ndjson_request( @@ -569,6 +615,9 @@ class _JsonRoutingContext(RoutingContext): ) async def app_message(self, target: TypedKey | RouteId, message: bytes): + assert isinstance(target, TypedKey) or isinstance(target, RouteId) + assert isinstance(message, bytes) + raise_api_result( await self.api.send_ndjson_request( Operation.ROUTING_CONTEXT, @@ -583,6 +632,10 @@ class _JsonRoutingContext(RoutingContext): async def create_dht_record( self, schema: DHTSchema, owner: Optional[KeyPair] = None, kind: Optional[CryptoKind] = None ) -> DHTRecordDescriptor: + assert isinstance(schema, DHTSchema) + assert owner is None or isinstance(owner, KeyPair) + assert kind is None or isinstance(kind, CryptoKind) + return DHTRecordDescriptor.from_json( raise_api_result( await self.api.send_ndjson_request( @@ -600,6 +653,9 @@ class _JsonRoutingContext(RoutingContext): async def open_dht_record( self, key: TypedKey, writer: Optional[KeyPair] = None ) -> DHTRecordDescriptor: + assert isinstance(key, TypedKey) + assert writer is None or isinstance(writer, KeyPair) + return DHTRecordDescriptor.from_json( raise_api_result( await self.api.send_ndjson_request( @@ -614,6 +670,8 @@ class _JsonRoutingContext(RoutingContext): ) async def close_dht_record(self, key: TypedKey): + assert isinstance(key, TypedKey) + raise_api_result( await self.api.send_ndjson_request( Operation.ROUTING_CONTEXT, @@ -625,6 +683,8 @@ class _JsonRoutingContext(RoutingContext): ) async def delete_dht_record(self, key: TypedKey): + assert isinstance(key, TypedKey) + raise_api_result( await self.api.send_ndjson_request( Operation.ROUTING_CONTEXT, @@ -638,6 +698,10 @@ class _JsonRoutingContext(RoutingContext): async def get_dht_value( self, key: TypedKey, subkey: ValueSubkey, force_refresh: bool = False ) -> Optional[ValueData]: + assert isinstance(key, TypedKey) + assert isinstance(subkey, ValueSubkey) + assert isinstance(force_refresh, bool) + ret = raise_api_result( await self.api.send_ndjson_request( Operation.ROUTING_CONTEXT, @@ -654,6 +718,11 @@ class _JsonRoutingContext(RoutingContext): async def set_dht_value( self, key: TypedKey, subkey: ValueSubkey, data: bytes, writer: Optional[KeyPair] = None ) -> Optional[ValueData]: + assert isinstance(key, TypedKey) + assert isinstance(subkey, ValueSubkey) + assert isinstance(data, bytes) + assert writer is None or isinstance(writer, KeyPair) + ret = raise_api_result( await self.api.send_ndjson_request( Operation.ROUTING_CONTEXT, @@ -675,6 +744,15 @@ class _JsonRoutingContext(RoutingContext): expiration: Timestamp = 0, count: int = 0xFFFFFFFF, ) -> Timestamp: + assert isinstance(key, TypedKey) + assert isinstance(subkeys, list) + for s in subkeys: + assert isinstance(s, tuple) + assert isinstance(s[0], ValueSubkey) + assert isinstance(s[1], ValueSubkey) + assert isinstance(expiration, Timestamp) + assert isinstance(count, int) + return Timestamp( raise_api_result( await self.api.send_ndjson_request( @@ -693,6 +771,13 @@ class _JsonRoutingContext(RoutingContext): async def cancel_dht_watch( self, key: TypedKey, subkeys: list[tuple[ValueSubkey, ValueSubkey]] ) -> bool: + assert isinstance(key, TypedKey) + assert isinstance(subkeys, list) + for s in subkeys: + assert isinstance(s, tuple) + assert isinstance(s[0], ValueSubkey) + assert isinstance(s[1], ValueSubkey) + return raise_api_result( await self.api.send_ndjson_request( Operation.ROUTING_CONTEXT, @@ -710,6 +795,14 @@ class _JsonRoutingContext(RoutingContext): subkeys: list[tuple[ValueSubkey, ValueSubkey]], scope: DHTReportScope = DHTReportScope.LOCAL, ) -> DHTRecordReport: + assert isinstance(key, TypedKey) + assert isinstance(subkeys, list) + for s in subkeys: + assert isinstance(s, tuple) + assert isinstance(s[0], ValueSubkey) + assert isinstance(s[1], ValueSubkey) + assert isinstance(scope, DHTReportScope) + return DHTRecordReport.from_json( raise_api_result( await self.api.send_ndjson_request( @@ -790,6 +883,10 @@ class _JsonTableDbTransaction(TableDbTransaction): self.done = True async def store(self, key: bytes, value: bytes, col: int = 0): + assert isinstance(key, bytes) + assert isinstance(value, bytes) + assert isinstance(col, int) + await self.api.send_ndjson_request( Operation.TABLE_DB_TRANSACTION, validate=validate_tx_op, @@ -801,6 +898,9 @@ class _JsonTableDbTransaction(TableDbTransaction): ) async def delete(self, key: bytes, col: int = 0): + assert isinstance(key, bytes) + assert isinstance(col, int) + await self.api.send_ndjson_request( Operation.TABLE_DB_TRANSACTION, validate=validate_tx_op, @@ -866,6 +966,8 @@ class _JsonTableDb(TableDb): ) async def get_keys(self, col: int = 0) -> list[bytes]: + assert isinstance(col, int) + return list( map( lambda x: urlsafe_b64decode_no_pad(x), @@ -893,6 +995,10 @@ class _JsonTableDb(TableDb): return _JsonTableDbTransaction(self.api, tx_id) async def store(self, key: bytes, value: bytes, col: int = 0): + assert isinstance(key, bytes) + assert isinstance(value, bytes) + assert isinstance(col, int) + return raise_api_result( await self.api.send_ndjson_request( Operation.TABLE_DB, @@ -906,6 +1012,9 @@ class _JsonTableDb(TableDb): ) async def load(self, key: bytes, col: int = 0) -> Optional[bytes]: + assert isinstance(key, bytes) + assert isinstance(col, int) + res = raise_api_result( await self.api.send_ndjson_request( Operation.TABLE_DB, @@ -919,6 +1028,9 @@ class _JsonTableDb(TableDb): return None if res is None else urlsafe_b64decode_no_pad(res) async def delete(self, key: bytes, col: int = 0) -> Optional[bytes]: + assert isinstance(key, bytes) + assert isinstance(col, int) + res = raise_api_result( await self.api.send_ndjson_request( Operation.TABLE_DB, @@ -989,6 +1101,9 @@ class _JsonCryptoSystem(CryptoSystem): self.done = True async def cached_dh(self, key: PublicKey, secret: SecretKey) -> SharedSecret: + assert isinstance(key, PublicKey) + assert isinstance(secret, SecretKey) + return SharedSecret( raise_api_result( await self.api.send_ndjson_request( @@ -1003,6 +1118,9 @@ class _JsonCryptoSystem(CryptoSystem): ) async def compute_dh(self, key: PublicKey, secret: SecretKey) -> SharedSecret: + assert isinstance(key, PublicKey) + assert isinstance(secret, SecretKey) + return SharedSecret( raise_api_result( await self.api.send_ndjson_request( @@ -1017,6 +1135,10 @@ class _JsonCryptoSystem(CryptoSystem): ) async def generate_shared_secret(self, key: PublicKey, secret: SecretKey, domain: bytes) -> SharedSecret: + assert isinstance(key, PublicKey) + assert isinstance(secret, SecretKey) + assert isinstance(domain, bytes) + return SharedSecret( raise_api_result( await self.api.send_ndjson_request( @@ -1032,6 +1154,8 @@ class _JsonCryptoSystem(CryptoSystem): ) async def random_bytes(self, len: int) -> bytes: + assert isinstance(len, int) + return urlsafe_b64decode_no_pad( raise_api_result( await self.api.send_ndjson_request( @@ -1055,6 +1179,9 @@ class _JsonCryptoSystem(CryptoSystem): ) async def hash_password(self, password: bytes, salt: bytes) -> str: + assert isinstance(password, bytes) + assert isinstance(salt, bytes) + return raise_api_result( await self.api.send_ndjson_request( Operation.CRYPTO_SYSTEM, @@ -1067,6 +1194,9 @@ class _JsonCryptoSystem(CryptoSystem): ) async def verify_password(self, password: bytes, password_hash: str) -> bool: + assert isinstance(password, bytes) + assert isinstance(password_hash, str) + return raise_api_result( await self.api.send_ndjson_request( Operation.CRYPTO_SYSTEM, @@ -1079,6 +1209,9 @@ class _JsonCryptoSystem(CryptoSystem): ) async def derive_shared_secret(self, password: bytes, salt: bytes) -> SharedSecret: + assert isinstance(password, bytes) + assert isinstance(salt, bytes) + return SharedSecret( raise_api_result( await self.api.send_ndjson_request( @@ -1129,6 +1262,8 @@ class _JsonCryptoSystem(CryptoSystem): ) async def generate_hash(self, data: bytes) -> HashDigest: + assert isinstance(data, bytes) + return HashDigest( raise_api_result( await self.api.send_ndjson_request( @@ -1142,6 +1277,9 @@ class _JsonCryptoSystem(CryptoSystem): ) async def validate_key_pair(self, key: PublicKey, secret: SecretKey) -> bool: + assert isinstance(key, PublicKey) + assert isinstance(secret, SecretKey) + return raise_api_result( await self.api.send_ndjson_request( Operation.CRYPTO_SYSTEM, @@ -1154,6 +1292,9 @@ class _JsonCryptoSystem(CryptoSystem): ) async def validate_hash(self, data: bytes, hash_digest: HashDigest) -> bool: + assert isinstance(data, bytes) + assert isinstance(hash_digest, HashDigest) + return raise_api_result( await self.api.send_ndjson_request( Operation.CRYPTO_SYSTEM, @@ -1166,6 +1307,9 @@ class _JsonCryptoSystem(CryptoSystem): ) async def distance(self, key1: CryptoKey, key2: CryptoKey) -> CryptoKeyDistance: + assert isinstance(key1, CryptoKey) + assert isinstance(key2, CryptoKey) + return CryptoKeyDistance( raise_api_result( await self.api.send_ndjson_request( @@ -1180,6 +1324,10 @@ class _JsonCryptoSystem(CryptoSystem): ) async def sign(self, key: PublicKey, secret: SecretKey, data: bytes) -> Signature: + assert isinstance(key, PublicKey) + assert isinstance(secret, SecretKey) + assert isinstance(data, bytes) + return Signature( raise_api_result( await self.api.send_ndjson_request( @@ -1195,6 +1343,10 @@ class _JsonCryptoSystem(CryptoSystem): ) async def verify(self, key: PublicKey, data: bytes, signature: Signature): + assert isinstance(key, PublicKey) + assert isinstance(data, bytes) + assert isinstance(signature, Signature) + return raise_api_result( await self.api.send_ndjson_request( Operation.CRYPTO_SYSTEM, @@ -1224,6 +1376,11 @@ class _JsonCryptoSystem(CryptoSystem): shared_secret: SharedSecret, associated_data: Optional[bytes], ) -> bytes: + assert isinstance(body, bytes) + assert isinstance(nonce, Nonce) + assert isinstance(shared_secret, SharedSecret) + assert associated_data is None or isinstance(associated_data, bytes) + return urlsafe_b64decode_no_pad( raise_api_result( await self.api.send_ndjson_request( @@ -1246,6 +1403,11 @@ class _JsonCryptoSystem(CryptoSystem): shared_secret: SharedSecret, associated_data: Optional[bytes], ) -> bytes: + assert isinstance(body, bytes) + assert isinstance(nonce, Nonce) + assert isinstance(shared_secret, SharedSecret) + assert associated_data is None or isinstance(associated_data, bytes) + return urlsafe_b64decode_no_pad( raise_api_result( await self.api.send_ndjson_request( @@ -1262,6 +1424,9 @@ class _JsonCryptoSystem(CryptoSystem): ) async def crypt_no_auth(self, body: bytes, nonce: Nonce, shared_secret: SharedSecret) -> bytes: + assert isinstance(body, bytes) + assert isinstance(nonce, Nonce) + assert isinstance(shared_secret, SharedSecret) return urlsafe_b64decode_no_pad( raise_api_result( await self.api.send_ndjson_request( diff --git a/veilid-python/veilid/types.py b/veilid-python/veilid/types.py index 8ebad0fa..8ec6a782 100644 --- a/veilid-python/veilid/types.py +++ b/veilid-python/veilid/types.py @@ -2,7 +2,7 @@ import base64 import json from enum import StrEnum from functools import total_ordering -from typing import Any, Optional, Self, Tuple +from typing import Any, Optional, Self #################################################################### @@ -122,6 +122,7 @@ class EncodedString(str): @classmethod def from_bytes(cls, b: bytes) -> Self: + assert isinstance(b, bytes) return cls(urlsafe_b64encode_no_pad(b)) @@ -160,6 +161,8 @@ class Nonce(EncodedString): class KeyPair(str): @classmethod def from_parts(cls, key: PublicKey, secret: SecretKey) -> Self: + assert isinstance(key, PublicKey) + assert isinstance(secret, SecretKey) return cls(f"{key}:{secret}") def key(self) -> PublicKey: @@ -168,7 +171,7 @@ class KeyPair(str): def secret(self) -> SecretKey: return SecretKey(self.split(":", 1)[1]) - def to_parts(self) -> Tuple[PublicKey, SecretKey]: + def to_parts(self) -> tuple[PublicKey, SecretKey]: public, secret = self.split(":", 1) return (PublicKey(public), SecretKey(secret)) @@ -188,6 +191,8 @@ class CryptoTyped(str): class TypedKey(CryptoTyped): @classmethod def from_value(cls, kind: CryptoKind, value: PublicKey) -> Self: + assert isinstance(kind, CryptoKind) + assert isinstance(value, PublicKey) return cls(f"{kind}:{value}") def value(self) -> PublicKey: @@ -197,6 +202,8 @@ class TypedKey(CryptoTyped): class TypedSecret(CryptoTyped): @classmethod def from_value(cls, kind: CryptoKind, value: SecretKey) -> Self: + assert isinstance(kind, CryptoKind) + assert isinstance(value, SecretKey) return cls(f"{kind}:{value}") def value(self) -> SecretKey: @@ -206,6 +213,8 @@ class TypedSecret(CryptoTyped): class TypedKeyPair(CryptoTyped): @classmethod def from_value(cls, kind: CryptoKind, value: KeyPair) -> Self: + assert isinstance(kind, CryptoKind) + assert isinstance(value, KeyPair) return cls(f"{kind}:{value}") def value(self) -> KeyPair: @@ -215,6 +224,8 @@ class TypedKeyPair(CryptoTyped): class TypedSignature(CryptoTyped): @classmethod def from_value(cls, kind: CryptoKind, value: Signature) -> Self: + assert isinstance(kind, CryptoKind) + assert isinstance(value, Signature) return cls(f"{kind}:{value}") def value(self) -> Signature: @@ -226,7 +237,7 @@ class ValueSubkey(int): class ValueSeqNum(int): - pass + NONE = 4294967295 #################################################################### @@ -284,10 +295,13 @@ class NewPrivateRouteResult: blob: bytes def __init__(self, route_id: RouteId, blob: bytes): + assert isinstance(route_id, RouteId) + assert isinstance(blob, bytes) + self.route_id = route_id self.blob = blob - def to_tuple(self) -> Tuple[RouteId, bytes]: + def to_tuple(self) -> tuple[RouteId, bytes]: return (self.route_id, self.blob) @classmethod @@ -300,6 +314,9 @@ class DHTSchemaSMPLMember: m_cnt: int def __init__(self, m_key: PublicKey, m_cnt: int): + assert isinstance(m_key, PublicKey) + assert isinstance(m_cnt, int) + self.m_key = m_key self.m_cnt = m_cnt @@ -321,10 +338,15 @@ class DHTSchema: @classmethod def dflt(cls, o_cnt: int) -> Self: + assert isinstance(o_cnt, int) return cls(DHTSchemaKind.DFLT, o_cnt=o_cnt) @classmethod def smpl(cls, o_cnt: int, members: list[DHTSchemaSMPLMember]) -> Self: + assert isinstance(o_cnt, int) + assert isinstance(members, list) + for m in members: + assert isinstance(m, DHTSchemaSMPLMember) return cls(DHTSchemaKind.SMPL, o_cnt=o_cnt, members=members) @classmethod @@ -404,8 +426,8 @@ class DHTRecordReport: @classmethod def from_json(cls, j: dict) -> Self: return cls( - [[p[0], p[1]] for p in j["subkeys"]], - [[p[0], p[1]] for p in j["offline_subkeys"]], + [(p[0], p[1]) for p in j["subkeys"]], + [(p[0], p[1]) for p in j["offline_subkeys"]], [ValueSeqNum(s) for s in j["local_seqs"]], [ValueSeqNum(s) for s in j["network_seqs"]], ) From 6018d385e84143309a064643dfcd5713a77be29d Mon Sep 17 00:00:00 2001 From: Christien Rioux Date: Tue, 11 Mar 2025 13:30:12 -0400 Subject: [PATCH 14/17] [ci skip] fix veilid_api duration logging, add keyed spans to veilid-api tracing so they show up --- veilid-core/src/veilid_api/api.rs | 59 ++++++++------- veilid-core/src/veilid_api/routing_context.rs | 75 ++++++++++--------- veilid-python/tests/test_dht.py | 2 + 3 files changed, 77 insertions(+), 59 deletions(-) diff --git a/veilid-core/src/veilid_api/api.rs b/veilid-core/src/veilid_api/api.rs index e0a0790d..5e1fb4bb 100644 --- a/veilid-core/src/veilid_api/api.rs +++ b/veilid-core/src/veilid_api/api.rs @@ -1,5 +1,7 @@ use super::*; +impl_veilid_log_facility!("veilid_api"); + ///////////////////////////////////////////////////////////////////////////////////////////////////// pub(super) struct VeilidAPIInner { @@ -41,10 +43,9 @@ pub struct VeilidAPI { } impl VeilidAPI { - #[instrument(target = "veilid_api", level = "debug", skip_all)] + #[instrument(target = "veilid_api", level = "debug", fields(__VEILID_LOG_KEY = context.log_key()), skip_all)] pub(crate) fn new(context: VeilidCoreContext) -> Self { - event!(target: "veilid_api", Level::DEBUG, - "VeilidAPI::new()"); + veilid_log!(context debug "VeilidAPI::new()"); Self { inner: Arc::new(Mutex::new(VeilidAPIInner { context: Some(context), @@ -59,10 +60,9 @@ impl VeilidAPI { } /// Shut down Veilid and terminate the API. - #[instrument(target = "veilid_api", level = "debug", skip_all)] + #[instrument(target = "veilid_api", level = "debug", fields(__VEILID_LOG_KEY = self.log_key()), skip_all)] pub async fn shutdown(self) { - event!(target: "veilid_api", Level::DEBUG, - "VeilidAPI::shutdown()"); + veilid_log!(self debug "VeilidAPI::shutdown()"); let context = { self.inner.lock().context.take() }; if let Some(context) = context { api_shutdown(context).await; @@ -152,6 +152,15 @@ impl VeilidAPI { callback(&mut inner.debug_cache) } + #[must_use] + pub(crate) fn log_key(&self) -> &str { + let inner = self.inner.lock(); + let Some(context) = &inner.context else { + return ""; + }; + context.log_key() + } + //////////////////////////////////////////////////////////////// // Attach/Detach @@ -174,9 +183,9 @@ impl VeilidAPI { } /// Connect to the network. - #[instrument(target = "veilid_api", level = "debug", skip_all, ret, err)] + #[instrument(target = "veilid_api", level = "debug", fields(__VEILID_LOG_KEY = self.log_key()), skip_all, ret, err)] pub async fn attach(&self) -> VeilidAPIResult<()> { - event!(target: "veilid_api", Level::DEBUG, + veilid_log!(self debug "VeilidAPI::attach()"); let attachment_manager = self.core_context()?.attachment_manager(); @@ -187,9 +196,9 @@ impl VeilidAPI { } /// Disconnect from the network. - #[instrument(target = "veilid_api", level = "debug", skip_all, ret, err)] + #[instrument(target = "veilid_api", level = "debug", fields(__VEILID_LOG_KEY = self.log_key()), skip_all, ret, err)] pub async fn detach(&self) -> VeilidAPIResult<()> { - event!(target: "veilid_api", Level::DEBUG, + veilid_log!(self debug "VeilidAPI::detach()"); let attachment_manager = self.core_context()?.attachment_manager(); @@ -203,9 +212,9 @@ impl VeilidAPI { // Routing Context /// Get a new `RoutingContext` object to use to send messages over the Veilid network with default safety, sequencing, and stability parameters. - #[instrument(target = "veilid_api", level = "debug", skip_all, err, ret)] + #[instrument(target = "veilid_api", level = "debug", fields(__VEILID_LOG_KEY = self.log_key()), skip_all, err, ret)] pub fn routing_context(&self) -> VeilidAPIResult { - event!(target: "veilid_api", Level::DEBUG, + veilid_log!(self debug "VeilidAPI::routing_context()"); RoutingContext::try_new(self.clone()) @@ -218,11 +227,11 @@ impl VeilidAPI { /// `VLD0:XmnGyJrjMJBRC5ayJZRPXWTBspdX36-pbLb98H3UMeE` but if the prefix is left off /// `XmnGyJrjMJBRC5ayJZRPXWTBspdX36-pbLb98H3UMeE` will be parsed with the 'best' cryptosystem /// available (at the time of this writing this is `VLD0`). - #[instrument(target = "veilid_api", level = "debug", skip(self), fields(s=s.to_string()), ret, err)] + #[instrument(target = "veilid_api", level = "debug", skip(self), fields(__VEILID_LOG_KEY = self.log_key(), s=s.to_string()), ret, err)] pub fn parse_as_target(&self, s: S) -> VeilidAPIResult { let s = s.to_string(); - event!(target: "veilid_api", Level::DEBUG, + veilid_log!(self debug "VeilidAPI::parse_as_target(s: {:?})", s); // Is this a route id? @@ -272,14 +281,14 @@ impl VeilidAPI { /// /// Returns a route id and 'blob' that can be published over some means (DHT or otherwise) to be /// imported by another Veilid node. - #[instrument(target = "veilid_api", level = "debug", skip(self), ret)] + #[instrument(target = "veilid_api", level = "debug", fields(__VEILID_LOG_KEY = self.log_key()), skip(self), ret)] pub async fn new_custom_private_route( &self, crypto_kinds: &[CryptoKind], stability: Stability, sequencing: Sequencing, ) -> VeilidAPIResult<(RouteId, Vec)> { - event!(target: "veilid_api", Level::DEBUG, + veilid_log!(self debug "VeilidAPI::new_custom_private_route(crypto_kinds: {:?}, stability: {:?}, sequencing: {:?})", crypto_kinds, stability, @@ -336,9 +345,9 @@ impl VeilidAPI { /// Import a private route blob as a remote private route. /// /// Returns a route id that can be used to send private messages to the node creating this route. - #[instrument(target = "veilid_api", level = "debug", skip(self), ret, err)] + #[instrument(target = "veilid_api", level = "debug", fields(__VEILID_LOG_KEY = self.log_key()), skip(self), ret, err)] pub fn import_remote_private_route(&self, blob: Vec) -> VeilidAPIResult { - event!(target: "veilid_api", Level::DEBUG, + veilid_log!(self debug "VeilidAPI::import_remote_private_route(blob: {:?})", blob); let routing_table = self.core_context()?.routing_table(); let rss = routing_table.route_spec_store(); @@ -349,9 +358,9 @@ impl VeilidAPI { /// /// This will deactivate the route and free its resources and it can no longer be sent to /// or received from. - #[instrument(target = "veilid_api", level = "debug", skip(self), ret, err)] + #[instrument(target = "veilid_api", level = "debug", fields(__VEILID_LOG_KEY = self.log_key()), skip(self), ret, err)] pub fn release_private_route(&self, route_id: RouteId) -> VeilidAPIResult<()> { - event!(target: "veilid_api", Level::DEBUG, + veilid_log!(self debug "VeilidAPI::release_private_route(route_id: {:?})", route_id); let routing_table = self.core_context()?.routing_table(); let rss = routing_table.route_spec_store(); @@ -368,13 +377,13 @@ impl VeilidAPI { /// /// * `call_id` - specifies which call to reply to, and it comes from a [VeilidUpdate::AppCall], specifically the [VeilidAppCall::id()] value. /// * `message` - is an answer blob to be returned by the remote node's [RoutingContext::app_call()] function, and may be up to 32768 bytes. - #[instrument(target = "veilid_api", level = "debug", skip(self), ret, err)] + #[instrument(target = "veilid_api", level = "debug", fields(__VEILID_LOG_KEY = self.log_key()), skip(self), ret, err)] pub async fn app_call_reply( &self, call_id: OperationId, message: Vec, ) -> VeilidAPIResult<()> { - event!(target: "veilid_api", Level::DEBUG, + veilid_log!(self debug "VeilidAPI::app_call_reply(call_id: {:?}, message: {:?})", call_id, message); let rpc_processor = self.core_context()?.rpc_processor(); @@ -387,7 +396,7 @@ impl VeilidAPI { // Tunnel Building #[cfg(feature = "unstable-tunnels")] - #[instrument(target = "veilid_api", level = "debug", skip(self), ret, err)] + #[instrument(target = "veilid_api", level = "debug", fields(__VEILID_LOG_KEY = self.log_key()), skip(self), ret, err)] pub async fn start_tunnel( &self, _endpoint_mode: TunnelMode, @@ -397,7 +406,7 @@ impl VeilidAPI { } #[cfg(feature = "unstable-tunnels")] - #[instrument(target = "veilid_api", level = "debug", skip(self), ret, err)] + #[instrument(target = "veilid_api", level = "debug", fields(__VEILID_LOG_KEY = self.log_key()), skip(self), ret, err)] pub async fn complete_tunnel( &self, _endpoint_mode: TunnelMode, @@ -408,7 +417,7 @@ impl VeilidAPI { } #[cfg(feature = "unstable-tunnels")] - #[instrument(target = "veilid_api", level = "debug", skip(self), ret, err)] + #[instrument(target = "veilid_api", level = "debug", fields(__VEILID_LOG_KEY = self.log_key()), skip(self), ret, err)] pub async fn cancel_tunnel(&self, _tunnel_id: TunnelId) -> VeilidAPIResult { panic!("unimplemented"); } diff --git a/veilid-core/src/veilid_api/routing_context.rs b/veilid-core/src/veilid_api/routing_context.rs index 5e212bf4..07d90a85 100644 --- a/veilid-core/src/veilid_api/routing_context.rs +++ b/veilid-core/src/veilid_api/routing_context.rs @@ -1,5 +1,7 @@ use super::*; +impl_veilid_log_facility!("veilid_api"); + /////////////////////////////////////////////////////////////////////////////////////// /// Valid destinations for a message sent over a routing context. @@ -62,6 +64,11 @@ impl RoutingContext { }) } + #[must_use] + pub(crate) fn log_key(&self) -> &str { + self.api.log_key() + } + /// Turn on sender privacy, enabling the use of safety routes. This is the default and /// calling this function is only necessary if you have previously disable safety or used other parameters. /// @@ -72,9 +79,9 @@ impl RoutingContext { /// * Sequencing default is to prefer ordered before unordered message delivery. /// /// To customize the safety selection in use, use [RoutingContext::with_safety()]. - #[instrument(target = "veilid_api", level = "debug", ret, err)] + #[instrument(target = "veilid_api", level = "debug", fields(__VEILID_LOG_KEY = self.log_key()), ret, err)] pub fn with_default_safety(self) -> VeilidAPIResult { - event!(target: "veilid_api", Level::DEBUG, + veilid_log!(self debug "RoutingContext::with_default_safety(self: {:?})", self); let config = self.api.config()?; @@ -89,9 +96,9 @@ impl RoutingContext { } /// Use a custom [SafetySelection]. Can be used to disable safety via [SafetySelection::Unsafe]. - #[instrument(target = "veilid_api", level = "debug", ret, err)] + #[instrument(target = "veilid_api", level = "debug", fields(__VEILID_LOG_KEY = self.log_key()), ret, err)] pub fn with_safety(self, safety_selection: SafetySelection) -> VeilidAPIResult { - event!(target: "veilid_api", Level::DEBUG, + veilid_log!(self debug "RoutingContext::with_safety(self: {:?}, safety_selection: {:?})", self, safety_selection); Ok(Self { @@ -101,9 +108,9 @@ impl RoutingContext { } /// Use a specified [Sequencing] preference, with or without privacy. - #[instrument(target = "veilid_api", level = "debug", ret)] + #[instrument(target = "veilid_api", level = "debug", fields(__VEILID_LOG_KEY = self.log_key()), ret)] pub fn with_sequencing(self, sequencing: Sequencing) -> Self { - event!(target: "veilid_api", Level::DEBUG, + veilid_log!(self debug "RoutingContext::with_sequencing(self: {:?}, sequencing: {:?})", self, sequencing); Self { @@ -140,9 +147,9 @@ impl RoutingContext { self.api.clone() } - #[instrument(target = "veilid_api", level = "debug", ret, err)] + #[instrument(target = "veilid_api", level = "debug", fields(__VEILID_LOG_KEY = self.log_key()), ret, err)] async fn get_destination(&self, target: Target) -> VeilidAPIResult { - event!(target: "veilid_api", Level::DEBUG, + veilid_log!(self debug "RoutingContext::get_destination(self: {:?}, target: {:?})", self, target); let rpc_processor = self.api.core_context()?.rpc_processor(); @@ -165,9 +172,9 @@ impl RoutingContext { /// * `message` - an arbitrary message blob of up to 32768 bytes. /// /// Returns an answer blob of up to 32768 bytes. - #[instrument(target = "veilid_api", level = "debug", ret, err)] + #[instrument(target = "veilid_api", level = "debug", fields(__VEILID_LOG_KEY = self.log_key()), ret, err)] pub async fn app_call(&self, target: Target, message: Vec) -> VeilidAPIResult> { - event!(target: "veilid_api", Level::DEBUG, + veilid_log!(self debug "RoutingContext::app_call(self: {:?}, target: {:?}, message: {:?})", self, target, message); let rpc_processor = self.api.core_context()?.rpc_processor(); @@ -199,9 +206,9 @@ impl RoutingContext { /// /// * `target` - can be either a direct node id or a private route. /// * `message` - an arbitrary message blob of up to 32768 bytes. - #[instrument(target = "veilid_api", level = "debug", ret, err)] + #[instrument(target = "veilid_api", level = "debug", fields(__VEILID_LOG_KEY = self.log_key()), ret, err)] pub async fn app_message(&self, target: Target, message: Vec) -> VeilidAPIResult<()> { - event!(target: "veilid_api", Level::DEBUG, + veilid_log!(self debug "RoutingContext::app_message(self: {:?}, target: {:?}, message: {:?})", self, target, message); let rpc_processor = self.api.core_context()?.rpc_processor(); @@ -230,14 +237,14 @@ impl RoutingContext { /// DHT Records /// Deterministicly builds the record key for a given schema and owner public key - #[instrument(target = "veilid_api", level = "debug", ret, err)] + #[instrument(target = "veilid_api", level = "debug", fields(__VEILID_LOG_KEY = self.log_key()), ret, err)] pub fn get_dht_record_key( &self, schema: DHTSchema, owner_key: &PublicKey, kind: Option, ) -> VeilidAPIResult { - event!(target: "veilid_api", Level::DEBUG, + veilid_log!(self debug "RoutingContext::get_dht_record_key(self: {:?}, schema: {:?}, owner_key: {:?}, kind: {:?})", self, schema, owner_key, kind); schema.validate()?; @@ -256,14 +263,14 @@ impl RoutingContext { /// Returns the newly allocated DHT record's key if successful. /// /// Note: if you pass in an owner keypair this call is a deterministic! This means that if you try to create a new record for a given owner and schema that already exists it *will* fail. - #[instrument(target = "veilid_api", level = "debug", ret, err)] + #[instrument(target = "veilid_api", level = "debug", fields(__VEILID_LOG_KEY = self.log_key()), ret, err)] pub async fn create_dht_record( &self, schema: DHTSchema, owner: Option, kind: Option, ) -> VeilidAPIResult { - event!(target: "veilid_api", Level::DEBUG, + veilid_log!(self debug "RoutingContext::create_dht_record(self: {:?}, schema: {:?}, owner: {:?}, kind: {:?})", self, schema, owner, kind); schema.validate()?; @@ -291,13 +298,13 @@ impl RoutingContext { /// safety selection. /// /// Returns the DHT record descriptor for the opened record if successful. - #[instrument(target = "veilid_api", level = "debug", ret, err)] + #[instrument(target = "veilid_api", level = "debug", fields(__VEILID_LOG_KEY = self.log_key()), ret, err)] pub async fn open_dht_record( &self, key: TypedKey, default_writer: Option, ) -> VeilidAPIResult { - event!(target: "veilid_api", Level::DEBUG, + veilid_log!(self debug "RoutingContext::open_dht_record(self: {:?}, key: {:?}, default_writer: {:?})", self, key, default_writer); Crypto::validate_crypto_kind(key.kind)?; @@ -311,9 +318,9 @@ impl RoutingContext { /// Closes a DHT record at a specific key that was opened with create_dht_record or open_dht_record. /// /// Closing a record allows you to re-open it with a different routing context. - #[instrument(target = "veilid_api", level = "debug", ret, err)] + #[instrument(target = "veilid_api", level = "debug", fields(__VEILID_LOG_KEY = self.log_key()), ret, err)] pub async fn close_dht_record(&self, key: TypedKey) -> VeilidAPIResult<()> { - event!(target: "veilid_api", Level::DEBUG, + veilid_log!(self debug "RoutingContext::close_dht_record(self: {:?}, key: {:?})", self, key); Crypto::validate_crypto_kind(key.kind)?; @@ -327,9 +334,9 @@ impl RoutingContext { /// If the record is opened, it must be closed before it is deleted. /// Deleting a record does not delete it from the network, but will remove the storage of the record /// locally, and will prevent its value from being refreshed on the network by this node. - #[instrument(target = "veilid_api", level = "debug", ret, err)] + #[instrument(target = "veilid_api", level = "debug", fields(__VEILID_LOG_KEY = self.log_key()), ret, err)] pub async fn delete_dht_record(&self, key: TypedKey) -> VeilidAPIResult<()> { - event!(target: "veilid_api", Level::DEBUG, + veilid_log!(self debug "RoutingContext::delete_dht_record(self: {:?}, key: {:?})", self, key); Crypto::validate_crypto_kind(key.kind)?; @@ -344,14 +351,14 @@ impl RoutingContext { /// /// Returns `None` if the value subkey has not yet been set. /// Returns `Some(data)` if the value subkey has valid data. - #[instrument(target = "veilid_api", level = "debug", ret, err)] + #[instrument(target = "veilid_api", level = "debug", fields(__VEILID_LOG_KEY = self.log_key()), ret, err)] pub async fn get_dht_value( &self, key: TypedKey, subkey: ValueSubkey, force_refresh: bool, ) -> VeilidAPIResult> { - event!(target: "veilid_api", Level::DEBUG, + veilid_log!(self debug "RoutingContext::get_dht_value(self: {:?}, key: {:?}, subkey: {:?}, force_refresh: {:?})", self, key, subkey, force_refresh); Crypto::validate_crypto_kind(key.kind)?; @@ -367,7 +374,7 @@ impl RoutingContext { /// /// Returns `None` if the value was successfully put. /// Returns `Some(data)` if the value put was older than the one available on the network. - #[instrument(target = "veilid_api", level = "debug", skip(data), fields(data = print_data(&data, Some(64))), ret, err)] + #[instrument(target = "veilid_api", level = "debug", skip(data), fields(__VEILID_LOG_KEY = self.log_key(), data = print_data(&data, Some(64))), ret, err)] pub async fn set_dht_value( &self, key: TypedKey, @@ -375,7 +382,7 @@ impl RoutingContext { data: Vec, writer: Option, ) -> VeilidAPIResult> { - event!(target: "veilid_api", Level::DEBUG, + veilid_log!(self debug "RoutingContext::set_dht_value(self: {:?}, key: {:?}, subkey: {:?}, data: len={}, writer: {:?})", self, key, subkey, data.len(), writer); Crypto::validate_crypto_kind(key.kind)?; @@ -404,7 +411,7 @@ impl RoutingContext { /// * If a member (either the owner or a SMPL schema member) has opened the key for writing (even if no writing is performed) then the watch will be signed and guaranteed network.dht.member_watch_limit per writer. /// /// Members can be specified via the SMPL schema and do not need to allocate writable subkeys in order to offer a member watch capability. - #[instrument(target = "veilid_api", level = "debug", ret, err)] + #[instrument(target = "veilid_api", level = "debug", fields(__VEILID_LOG_KEY = self.log_key()), ret, err)] pub async fn watch_dht_values( &self, key: TypedKey, @@ -412,7 +419,7 @@ impl RoutingContext { expiration: Timestamp, count: u32, ) -> VeilidAPIResult { - event!(target: "veilid_api", Level::DEBUG, + veilid_log!(self debug "RoutingContext::watch_dht_values(self: {:?}, key: {:?}, subkeys: {:?}, expiration: {}, count: {})", self, key, subkeys, expiration, count); Crypto::validate_crypto_kind(key.kind)?; @@ -430,13 +437,13 @@ impl RoutingContext { /// /// Returns Ok(true) if there is any remaining watch for this record. /// Returns Ok(false) if the entire watch has been cancelled. - #[instrument(target = "veilid_api", level = "debug", ret, err)] + #[instrument(target = "veilid_api", level = "debug", fields(__VEILID_LOG_KEY = self.log_key()), ret, err)] pub async fn cancel_dht_watch( &self, key: TypedKey, subkeys: ValueSubkeyRangeSet, ) -> VeilidAPIResult { - event!(target: "veilid_api", Level::DEBUG, + veilid_log!(self debug "RoutingContext::cancel_dht_watch(self: {:?}, key: {:?}, subkeys: {:?}", self, key, subkeys); Crypto::validate_crypto_kind(key.kind)?; @@ -484,14 +491,14 @@ impl RoutingContext { /// Useful for determine which subkeys would change with an SetValue operation. /// /// Returns a DHTRecordReport with the subkey ranges that were returned that overlapped the schema, and sequence numbers for each of the subkeys in the range. - #[instrument(target = "veilid_api", level = "debug", ret, err)] + #[instrument(target = "veilid_api", level = "debug", fields(__VEILID_LOG_KEY = self.log_key()), ret, err)] pub async fn inspect_dht_record( &self, key: TypedKey, subkeys: ValueSubkeyRangeSet, scope: DHTReportScope, ) -> VeilidAPIResult { - event!(target: "veilid_api", Level::DEBUG, + veilid_log!(self debug "RoutingContext::inspect_dht_record(self: {:?}, key: {:?}, subkeys: {:?}, scope: {:?})", self, key, subkeys, scope); Crypto::validate_crypto_kind(key.kind)?; @@ -504,13 +511,13 @@ impl RoutingContext { /// Block Store #[cfg(feature = "unstable-blockstore")] - #[instrument(target = "veilid_api", level = "debug", ret, err)] + #[instrument(target = "veilid_api", level = "debug", fields(__VEILID_LOG_KEY = self.log_key()), ret, err)] pub async fn find_block(&self, _block_id: PublicKey) -> VeilidAPIResult> { panic!("unimplemented"); } #[cfg(feature = "unstable-blockstore")] - #[instrument(target = "veilid_api", level = "debug", ret, err)] + #[instrument(target = "veilid_api", level = "debug", fields(__VEILID_LOG_KEY = self.log_key()), ret, err)] pub async fn supply_block(&self, _block_id: PublicKey) -> VeilidAPIResult { panic!("unimplemented"); } diff --git a/veilid-python/tests/test_dht.py b/veilid-python/tests/test_dht.py index 7cd46c62..7d9a2eae 100644 --- a/veilid-python/tests/test_dht.py +++ b/veilid-python/tests/test_dht.py @@ -824,6 +824,8 @@ async def sync_win( win.addstr(n+2, 1, " " * subkey_count, curses.color_pair(1)) win.refresh() + time.sleep(.5) + curses.endwin() From 3a8150c0627cbf5ca240762770db8d4bb18c6b87 Mon Sep 17 00:00:00 2001 From: Christien Rioux Date: Tue, 11 Mar 2025 21:39:56 -0400 Subject: [PATCH 15/17] [ci skip] fix wasm unit tests --- Cargo.lock | 15 +------ veilid-core/Cargo.toml | 2 +- .../record_store/inspect_cache.rs | 4 +- .../src/table_store/tests/test_table_store.rs | 45 +++++++++++++++++++ veilid-core/tests/web.rs | 4 +- veilid-tools/Cargo.toml | 2 +- veilid-tools/tests/web.rs | 4 +- 7 files changed, 55 insertions(+), 21 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f1b9df07..a72c3ffe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6090,17 +6090,6 @@ dependencies = [ "tracing-log 0.2.0", ] -[[package]] -name = "tracing-wasm" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4575c663a174420fa2d78f4108ff68f65bf2fbb7dd89f33749b6e826b3626e07" -dependencies = [ - "tracing", - "tracing-subscriber", - "wasm-bindgen", -] - [[package]] name = "triomphe" version = "0.1.14" @@ -6483,12 +6472,12 @@ dependencies = [ "tracing-error", "tracing-oslog", "tracing-subscriber", - "tracing-wasm", "tsify", "veilid-bugsalot", "veilid-hashlink", "veilid-igd", "veilid-tools", + "veilid-tracing-wasm", "wasm-bindgen", "wasm-bindgen-futures", "wasm-bindgen-test", @@ -6680,9 +6669,9 @@ dependencies = [ "tracing", "tracing-oslog", "tracing-subscriber", - "tracing-wasm", "validator", "veilid-bugsalot", + "veilid-tracing-wasm", "wasm-bindgen", "wasm-bindgen-futures", "wasm-bindgen-test", diff --git a/veilid-core/Cargo.toml b/veilid-core/Cargo.toml index df969792..e37c70dd 100644 --- a/veilid-core/Cargo.toml +++ b/veilid-core/Cargo.toml @@ -216,7 +216,6 @@ ws_stream_wasm = "0.7.4" # Logging wasm-logger = "0.2.0" -tracing-wasm = "0.2.1" # Data Structures keyvaluedb-web = "0.1.2" @@ -272,6 +271,7 @@ serial_test = { version = "2.0.0", default-features = false, features = [ wasm-bindgen-test = "0.3.50" console_error_panic_hook = "0.1.7" wasm-logger = "0.2.0" +veilid-tracing-wasm = "^0" ### BUILD OPTIONS diff --git a/veilid-core/src/storage_manager/record_store/inspect_cache.rs b/veilid-core/src/storage_manager/record_store/inspect_cache.rs index 12e2136e..01e28f5f 100644 --- a/veilid-core/src/storage_manager/record_store/inspect_cache.rs +++ b/veilid-core/src/storage_manager/record_store/inspect_cache.rs @@ -68,9 +68,9 @@ impl InspectCache { }; if idx < entry.1.seqs.len() { entry.1.seqs[idx] = seq; - } else if idx > entry.1.seqs.len() { + } else { panic!( - "representational error in l2 inspect cache: {} > {}", + "representational error in l2 inspect cache: {} >= {}", idx, entry.1.seqs.len() ) diff --git a/veilid-core/src/table_store/tests/test_table_store.rs b/veilid-core/src/table_store/tests/test_table_store.rs index edf72ae0..207b9e48 100644 --- a/veilid-core/src/table_store/tests/test_table_store.rs +++ b/veilid-core/src/table_store/tests/test_table_store.rs @@ -1,5 +1,6 @@ use crate::tests::test_veilid_config::*; use crate::*; +use futures_util::StreamExt as _; async fn startup() -> VeilidAPI { trace!("test_table_store: starting"); @@ -266,11 +267,55 @@ pub async fn test_protect_unprotect(vcrypto: &AsyncCryptoSystemGuard<'_>, ts: &T } } +pub async fn test_store_load_json_many(ts: &TableStore) { + trace!("test_json"); + + let _ = ts.delete("test").await; + let db = ts.open("test", 3).await.expect("should have opened"); + + let rows = 16; + let valuesize = 32768; + let parallel = 10; + + let value = vec!["ABCD".to_string(); valuesize]; + + let mut unord = FuturesUnordered::new(); + + let mut r = 0; + let start_ts = Timestamp::now(); + loop { + while r < rows && unord.len() < parallel { + let key = format!("key_{}", r); + r += 1; + + unord.push(Box::pin(async { + let key = key; + db.store_json(0, key.as_bytes(), &value) + .await + .expect("should store"); + let value2 = db + .load_json::>(0, key.as_bytes()) + .await + .expect("should load") + .expect("should exist"); + assert_eq!(value, value2); + })); + } + if unord.next().await.is_none() { + break; + } + } + let end_ts = Timestamp::now(); + trace!("test_store_load_json_many duration={}", (end_ts - start_ts)); +} + pub async fn test_all() { let api = startup().await; let crypto = api.crypto().unwrap(); let ts = api.table_store().unwrap(); + test_store_load_json_many(&ts).await; + for ck in VALID_CRYPTO_KINDS { let vcrypto = crypto.get_async(ck).unwrap(); test_protect_unprotect(&vcrypto, &ts).await; diff --git a/veilid-core/tests/web.rs b/veilid-core/tests/web.rs index 2bbfbac5..ff142536 100644 --- a/veilid-core/tests/web.rs +++ b/veilid-core/tests/web.rs @@ -18,8 +18,8 @@ pub fn setup() -> () { let config = veilid_tracing_wasm::WASMLayerConfig::new() .with_report_logs_in_timings(false) .with_max_level(Level::TRACE) - .with_console_config(tracing_wasm::ConsoleConfig::ReportWithoutConsoleColor); - tracing_wasm::set_as_global_default_with_config(config); + .with_console_config(veilid_tracing_wasm::ConsoleConfig::ReportWithoutConsoleColor); + veilid_tracing_wasm::set_as_global_default_with_config(config); }); } diff --git a/veilid-tools/Cargo.toml b/veilid-tools/Cargo.toml index 60a4fde7..68c6d5cb 100644 --- a/veilid-tools/Cargo.toml +++ b/veilid-tools/Cargo.toml @@ -185,7 +185,7 @@ serial_test = { version = "2.0.0", default-features = false, features = [ console_error_panic_hook = "0.1.7" wasm-bindgen-test = "0.3.50" wasm-logger = "0.2.0" -tracing-wasm = { version = "0.2.1" } +veilid-tracing-wasm = "^0" ### BUILD OPTIONS diff --git a/veilid-tools/tests/web.rs b/veilid-tools/tests/web.rs index f54309fd..0ff48b5b 100644 --- a/veilid-tools/tests/web.rs +++ b/veilid-tools/tests/web.rs @@ -18,8 +18,8 @@ pub fn setup() -> () { let config = veilid_tracing_wasm::WASMLayerConfig::new() .with_report_logs_in_timings(false); .with_max_level(Level::TRACE); - .with_console_config(tracing_wasm::ConsoleConfig::ReportWithoutConsoleColor); - tracing_wasm::set_as_global_default_with_config(config); + .with_console_config(veilid_tracing_wasm::ConsoleConfig::ReportWithoutConsoleColor); + veilid_tracing_wasm::set_as_global_default_with_config(config); } else { wasm_logger::init(wasm_logger::Config::default()); } From 85d0e1ea109008615365db49fcd4a09e63f31f88 Mon Sep 17 00:00:00 2001 From: carvilsi Date: Fri, 14 Mar 2025 12:19:54 +0100 Subject: [PATCH 16/17] updates README code snippet to work with current version --- README-pt_BR.md | 2 +- README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README-pt_BR.md b/README-pt_BR.md index 63713d17..f2ee033c 100644 --- a/README-pt_BR.md +++ b/README-pt_BR.md @@ -67,7 +67,7 @@ async fn main() { }; let veilid = veilid_core::api_startup_config(update_callback, config).await.unwrap(); - println!("Node ID: {}", veilid.config().unwrap().get_veilid_state().config.network.routing_table.node_id); + println!("Node ID: {}", veilid.config().unwrap().get().network.routing_table.node_id); veilid.attach().await.unwrap(); // Until CTRL+C is pressed, keep running tokio::signal::ctrl_c().await.unwrap(); diff --git a/README.md b/README.md index 64730488..bede35a7 100644 --- a/README.md +++ b/README.md @@ -66,7 +66,7 @@ async fn main() { }; let veilid = veilid_core::api_startup_config(update_callback, config).await.unwrap(); - println!("Node ID: {}", veilid.config().unwrap().get_veilid_state().config.network.routing_table.node_id); + println!("Node ID: {}", veilid.config().unwrap().get().network.routing_table.node_id); veilid.attach().await.unwrap(); // Until CTRL+C is pressed, keep running tokio::signal::ctrl_c().await.unwrap(); From 15f3c6cf779a2f0f0439b9628652070275ead0b7 Mon Sep 17 00:00:00 2001 From: Christien Rioux Date: Fri, 14 Mar 2025 13:35:12 -0400 Subject: [PATCH 17/17] [ci skip] simplify upnp code --- .../native/discovery_context.rs | 425 +++++++++--------- veilid-core/src/network_manager/send_data.rs | 10 +- veilid-flutter/example/pubspec.lock | 88 ++-- 3 files changed, 266 insertions(+), 257 deletions(-) diff --git a/veilid-core/src/network_manager/native/discovery_context.rs b/veilid-core/src/network_manager/native/discovery_context.rs index b2b221ed..7f8b87fc 100644 --- a/veilid-core/src/network_manager/native/discovery_context.rs +++ b/veilid-core/src/network_manager/native/discovery_context.rs @@ -7,9 +7,6 @@ use stop_token::future::FutureExt as _; impl_veilid_log_facility!("net"); -const PORT_MAP_VALIDATE_TRY_COUNT: usize = 3; -const PORT_MAP_VALIDATE_DELAY_MS: u32 = 500; -const PORT_MAP_TRY_COUNT: usize = 3; const EXTERNAL_INFO_NODE_COUNT: usize = 20; const EXTERNAL_INFO_CONCURRENCY: usize = 20; const EXTERNAL_INFO_VALIDATIONS: usize = 5; @@ -121,7 +118,8 @@ struct ExternalInfo { } struct DiscoveryContextInner { - external_info: Vec, + external_infos: Vec, + mapped_dial_info: Option, } pub(super) struct DiscoveryContextUnlockedInner { @@ -163,7 +161,8 @@ impl DiscoveryContext { registry, unlocked_inner: Arc::new(DiscoveryContextUnlockedInner { config, intf_addrs }), inner: Arc::new(Mutex::new(DiscoveryContextInner { - external_info: Vec::new(), + external_infos: Vec::new(), + mapped_dial_info: None, })), stop_token, } @@ -405,11 +404,11 @@ impl DiscoveryContext { { let mut inner = self.inner.lock(); - inner.external_info = external_address_infos; + inner.external_infos = external_address_infos; veilid_log!(self debug "External Addresses ({:?}:{:?}):\n{}", protocol_type, address_type, - inner.external_info.iter().map(|x| format!(" {} <- {}",x.address, x.node)).collect::>().join("\n")); + inner.external_infos.iter().map(|x| format!(" {} <- {}",x.address, x.node)).collect::>().join("\n")); } true @@ -454,77 +453,87 @@ impl DiscoveryContext { AddressType::IPV4 => IGDAddressType::IPV4, }; - let external_1 = self.inner.lock().external_info.first().unwrap().clone(); - let igd_manager = self.network_manager().net().igd_manager.clone(); - let mut tries = 0; - loop { - tries += 1; - // Attempt a port mapping. If this doesn't succeed, it's not going to - let mapped_external_address = igd_manager - .map_any_port( - igd_protocol_type, - igd_address_type, - local_port, - Some(external_1.address.ip_addr()), - ) - .await?; + // Attempt a port mapping. If this doesn't succeed, it's not going to + let mapped_external_address = igd_manager + .map_any_port(igd_protocol_type, igd_address_type, local_port, None) + .await?; - // Make dial info from the port mapping - let external_mapped_dial_info = self.network_manager().net().make_dial_info( - SocketAddress::from_socket_addr(mapped_external_address), - protocol_type, - ); + // Make dial info from the port mapping + let external_mapped_dial_info = self.network_manager().net().make_dial_info( + SocketAddress::from_socket_addr(mapped_external_address), + protocol_type, + ); - // Attempt to validate the port mapping - let mut validate_tries = 0; - loop { - validate_tries += 1; + Some(external_mapped_dial_info) + } - // Ensure people can reach us. If we're firewalled off, this is useless - if self - .validate_dial_info( - external_1.node.clone(), - external_mapped_dial_info.clone(), - false, - ) - .await - { - return Some(external_mapped_dial_info); - } - - if validate_tries != PORT_MAP_VALIDATE_TRY_COUNT { - veilid_log!(self debug "UPNP port mapping succeeded but port {}/{} is still unreachable.\nretrying\n", - local_port, igd_protocol_type); - sleep(PORT_MAP_VALIDATE_DELAY_MS).await - } else { - break; - } - } - - // Release the mapping if we're still unreachable - let _ = igd_manager - .unmap_port( - igd_protocol_type, - igd_address_type, - external_1.address.port(), - ) - .await; - - if tries == PORT_MAP_TRY_COUNT { - veilid_log!(self warn "UPNP port mapping succeeded but port {}/{} is still unreachable.\nYou may need to add a local firewall allowed port on this machine.\n", - local_port, igd_protocol_type - ); - break; + fn matches_mapped_dial_info(&self, dial_info: &DialInfo) -> bool { + let mut skip = false; + if let Some(mapped_dial_info) = self.inner.lock().mapped_dial_info.as_ref() { + if mapped_dial_info == dial_info { + skip = true; } } - None + skip } /////// // Per-protocol discovery routines + // If we know we are not behind NAT, check our firewall status + #[instrument(level = "trace", skip(self), ret)] + fn protocol_process_mapped_dial_info( + &self, + all_possibilities: &mut DialInfoClassAllPossibilities, + unord: &mut FuturesUnordered>, + ) { + let (external_infos, mapped_dial_info) = { + let inner = self.inner.lock(); + let Some(mapped_dial_info) = inner.mapped_dial_info.clone() else { + return; + }; + + (inner.external_infos.clone(), mapped_dial_info) + }; + + // Have all the external validator nodes check us + for external_info in external_infos { + let possibilities = vec![(DialInfoClass::Mapped, 1)]; + all_possibilities.add(&possibilities); + + let this = self.clone(); + let mapped_dial_info = mapped_dial_info.clone(); + let do_no_nat_fut: PinBoxFutureStatic = Box::pin(async move { + // Do a validate_dial_info on the external address from a redirected node + if this + .validate_dial_info(external_info.node.clone(), mapped_dial_info.clone(), true) + .await + { + // Add public dial info with Direct dialinfo class + DetectionResultKind::Result { + possibilities, + result: DetectionResult { + config: this.config, + ddi: DetectedDialInfo::Detected(DialInfoDetail { + dial_info: mapped_dial_info.clone(), + class: DialInfoClass::Mapped, + }), + external_address_types: AddressTypeSet::only( + external_info.address.address_type(), + ), + }, + } + } else { + DetectionResultKind::Failure { possibilities } + } + }); + + unord.push(do_no_nat_fut); + } + } + // If we know we are not behind NAT, check our firewall status #[instrument(level = "trace", skip(self), ret)] fn protocol_process_no_nat( @@ -532,15 +541,20 @@ impl DiscoveryContext { all_possibilities: &mut DialInfoClassAllPossibilities, unord: &mut FuturesUnordered>, ) { - let external_infos = self.inner.lock().external_info.clone(); + let external_infos = self.inner.lock().external_infos.clone(); // Have all the external validator nodes check us for external_info in external_infos { - let this = self.clone(); + // If this is the same as an existing upnp mapping, skip it, since + // we are already validating that + if self.matches_mapped_dial_info(&external_info.dial_info) { + continue; + } let possibilities = vec![(DialInfoClass::Direct, 1), (DialInfoClass::Blocked, 1)]; all_possibilities.add(&possibilities); + let this = self.clone(); let do_no_nat_fut: PinBoxFutureStatic = Box::pin(async move { // Do a validate_dial_info on the external address from a redirected node if this @@ -597,7 +611,7 @@ impl DiscoveryContext { // Get the external dial info histogram for our use here let external_info = { let inner = self.inner.lock(); - inner.external_info.clone() + inner.external_infos.clone() }; let local_port = self.config.port; @@ -673,47 +687,51 @@ impl DiscoveryContext { // If we have no external address that matches our local port, then lets try that port // on our best external address and see if there's a port forward someone added manually /////////// - let this = self.clone(); if local_port_matching_external_info.is_none() && best_external_info.is_some() { let c_external_1 = best_external_info.as_ref().unwrap().clone(); - let c_this = this.clone(); - let possibilities = vec![(DialInfoClass::Direct, 1)]; - all_possibilities.add(&possibilities); + // Do a validate_dial_info on the external address, but with the same port as the local port of local interface, from a redirected node + // This test is to see if a node had manual port forwarding done with the same port number as the local listener + let mut external_1_dial_info_with_local_port = c_external_1.dial_info.clone(); + external_1_dial_info_with_local_port.set_port(local_port); - let do_manual_map_fut: PinBoxFutureStatic = Box::pin(async move { - // Do a validate_dial_info on the external address, but with the same port as the local port of local interface, from a redirected node - // This test is to see if a node had manual port forwarding done with the same port number as the local listener - let mut external_1_dial_info_with_local_port = c_external_1.dial_info.clone(); - external_1_dial_info_with_local_port.set_port(local_port); + // If this is the same as an existing upnp mapping, skip it, since + // we are already validating that + if !self.matches_mapped_dial_info(&external_1_dial_info_with_local_port) { + let possibilities = vec![(DialInfoClass::Direct, 1)]; + all_possibilities.add(&possibilities); - if this - .validate_dial_info( - c_external_1.node.clone(), - external_1_dial_info_with_local_port.clone(), - true, - ) - .await - { - // Add public dial info with Direct dialinfo class - return DetectionResultKind::Result { - possibilities, - result: DetectionResult { - config: c_this.config, - ddi: DetectedDialInfo::Detected(DialInfoDetail { - dial_info: external_1_dial_info_with_local_port, - class: DialInfoClass::Direct, - }), - external_address_types: AddressTypeSet::only( - c_external_1.address.address_type(), - ), - }, - }; - } + let c_this = self.clone(); + let do_manual_map_fut: PinBoxFutureStatic = + Box::pin(async move { + if c_this + .validate_dial_info( + c_external_1.node.clone(), + external_1_dial_info_with_local_port.clone(), + true, + ) + .await + { + // Add public dial info with Direct dialinfo class + return DetectionResultKind::Result { + possibilities, + result: DetectionResult { + config: c_this.config, + ddi: DetectedDialInfo::Detected(DialInfoDetail { + dial_info: external_1_dial_info_with_local_port, + class: DialInfoClass::Direct, + }), + external_address_types: AddressTypeSet::only( + c_external_1.address.address_type(), + ), + }, + }; + } - DetectionResultKind::Failure { possibilities } - }); - unord.push(do_manual_map_fut); + DetectionResultKind::Failure { possibilities } + }); + unord.push(do_manual_map_fut); + } } // NAT Detection @@ -724,86 +742,39 @@ impl DiscoveryContext { // Full Cone NAT Detection /////////// - let c_this = self.clone(); - let c_external_1 = external_info.first().cloned().unwrap(); let possibilities = vec![(DialInfoClass::FullConeNAT, 1)]; all_possibilities.add(&possibilities); - let do_full_cone_fut: PinBoxFutureStatic = Box::pin(async move { - let mut retry_count = retry_count; - - // Let's see what kind of NAT we have - // Does a redirected dial info validation from a different address and a random port find us? - loop { - if c_this - .validate_dial_info( - c_external_1.node.clone(), - c_external_1.dial_info.clone(), - true, - ) - .await - { - // Yes, another machine can use the dial info directly, so Full Cone - // Add public dial info with full cone NAT network class - - return DetectionResultKind::Result { - possibilities, - result: DetectionResult { - config: c_this.config, - ddi: DetectedDialInfo::Detected(DialInfoDetail { - dial_info: c_external_1.dial_info, - class: DialInfoClass::FullConeNAT, - }), - external_address_types: AddressTypeSet::only( - c_external_1.address.address_type(), - ), - }, - }; - } - if retry_count == 0 { - break; - } - retry_count -= 1; - } - - DetectionResultKind::Failure { possibilities } - }); - unord.push(do_full_cone_fut); let c_this = self.clone(); let c_external_1 = external_info.first().cloned().unwrap(); - let c_external_2 = external_info.get(1).cloned().unwrap(); - let possibilities = vec![ - (DialInfoClass::AddressRestrictedNAT, 1), - (DialInfoClass::PortRestrictedNAT, 1), - ]; - all_possibilities.add(&possibilities); - let do_restricted_cone_fut: PinBoxFutureStatic = - Box::pin(async move { + + // If this is the same as an existing upnp mapping, skip it, since + // we are already validating that + if !self.matches_mapped_dial_info(&c_external_1.dial_info) { + let do_full_cone_fut: PinBoxFutureStatic = Box::pin(async move { let mut retry_count = retry_count; - // We are restricted, determine what kind of restriction - - // If we're going to end up as a restricted NAT of some sort - // Address is the same, so it's address or port restricted - + // Let's see what kind of NAT we have + // Does a redirected dial info validation from a different address and a random port find us? loop { - // Do a validate_dial_info on the external address from a random port if c_this .validate_dial_info( - c_external_2.node.clone(), + c_external_1.node.clone(), c_external_1.dial_info.clone(), - false, + true, ) .await { - // Got a reply from a non-default port, which means we're only address restricted + // Yes, another machine can use the dial info directly, so Full Cone + // Add public dial info with full cone NAT network class + return DetectionResultKind::Result { possibilities, result: DetectionResult { config: c_this.config, ddi: DetectedDialInfo::Detected(DialInfoDetail { - dial_info: c_external_1.dial_info.clone(), - class: DialInfoClass::AddressRestrictedNAT, + dial_info: c_external_1.dial_info, + class: DialInfoClass::FullConeNAT, }), external_address_types: AddressTypeSet::only( c_external_1.address.address_type(), @@ -811,29 +782,83 @@ impl DiscoveryContext { }, }; } - if retry_count == 0 { break; } retry_count -= 1; } - // Didn't get a reply from a non-default port, which means we are also port restricted - DetectionResultKind::Result { - possibilities, - result: DetectionResult { - config: c_this.config, - ddi: DetectedDialInfo::Detected(DialInfoDetail { - dial_info: c_external_1.dial_info.clone(), - class: DialInfoClass::PortRestrictedNAT, - }), - external_address_types: AddressTypeSet::only( - c_external_1.address.address_type(), - ), - }, - } + DetectionResultKind::Failure { possibilities } }); - unord.push(do_restricted_cone_fut); + unord.push(do_full_cone_fut); + + let possibilities = vec![ + (DialInfoClass::AddressRestrictedNAT, 1), + (DialInfoClass::PortRestrictedNAT, 1), + ]; + all_possibilities.add(&possibilities); + + let c_this = self.clone(); + let c_external_1 = external_info.first().cloned().unwrap(); + let c_external_2 = external_info.get(1).cloned().unwrap(); + let do_restricted_cone_fut: PinBoxFutureStatic = + Box::pin(async move { + let mut retry_count = retry_count; + + // We are restricted, determine what kind of restriction + + // If we're going to end up as a restricted NAT of some sort + // Address is the same, so it's address or port restricted + + loop { + // Do a validate_dial_info on the external address from a random port + if c_this + .validate_dial_info( + c_external_2.node.clone(), + c_external_1.dial_info.clone(), + false, + ) + .await + { + // Got a reply from a non-default port, which means we're only address restricted + return DetectionResultKind::Result { + possibilities, + result: DetectionResult { + config: c_this.config, + ddi: DetectedDialInfo::Detected(DialInfoDetail { + dial_info: c_external_1.dial_info.clone(), + class: DialInfoClass::AddressRestrictedNAT, + }), + external_address_types: AddressTypeSet::only( + c_external_1.address.address_type(), + ), + }, + }; + } + + if retry_count == 0 { + break; + } + retry_count -= 1; + } + + // Didn't get a reply from a non-default port, which means we are also port restricted + DetectionResultKind::Result { + possibilities, + result: DetectionResult { + config: c_this.config, + ddi: DetectedDialInfo::Detected(DialInfoDetail { + dial_info: c_external_1.dial_info.clone(), + class: DialInfoClass::PortRestrictedNAT, + }), + external_address_types: AddressTypeSet::only( + c_external_1.address.address_type(), + ), + }, + } + }); + unord.push(do_restricted_cone_fut); + } } /// Run a discovery for a particular context @@ -861,34 +886,16 @@ impl DiscoveryContext { let enable_upnp = self.config().with(|c| c.network.upnp); if enable_upnp { - let this = self.clone(); + // Attempt a port mapping via all available and enabled mechanisms + // Try this before the direct mapping in the event that we are restarting + // and may not have recorded a mapping created the last time + if let Some(external_mapped_dial_info) = self.try_upnp_port_mapping().await { + // Got a port mapping, store it + self.inner.lock().mapped_dial_info = Some(external_mapped_dial_info); - let possibilities = vec![(DialInfoClass::Mapped, 1)]; - all_possibilities.add(&possibilities); - - let do_mapped_fut: PinBoxFutureStatic = Box::pin(async move { - // Attempt a port mapping via all available and enabled mechanisms - // Try this before the direct mapping in the event that we are restarting - // and may not have recorded a mapping created the last time - if let Some(external_mapped_dial_info) = this.try_upnp_port_mapping().await { - // Got a port mapping, let's use it - return DetectionResultKind::Result { - possibilities, - result: DetectionResult { - config: this.config, - ddi: DetectedDialInfo::Detected(DialInfoDetail { - dial_info: external_mapped_dial_info.clone(), - class: DialInfoClass::Mapped, - }), - external_address_types: AddressTypeSet::only( - external_mapped_dial_info.address_type(), - ), - }, - }; - } - DetectionResultKind::Failure { possibilities } - }); - unord.push(do_mapped_fut); + // And validate it + self.protocol_process_mapped_dial_info(&mut all_possibilities, &mut unord); + } } // NAT Detection @@ -898,7 +905,7 @@ impl DiscoveryContext { let local_address_in_external_info = self .inner .lock() - .external_info + .external_infos .iter() .find_map(|ei| self.intf_addrs.contains(&ei.address).then_some(true)) .unwrap_or_default(); diff --git a/veilid-core/src/network_manager/send_data.rs b/veilid-core/src/network_manager/send_data.rs index b3adddc5..0d8d1697 100644 --- a/veilid-core/src/network_manager/send_data.rs +++ b/veilid-core/src/network_manager/send_data.rs @@ -367,14 +367,16 @@ impl NetworkManager { data }; - let connection_initial_timeout_us = self - .config() - .with(|c| c.network.connection_initial_timeout_ms as u64 * 1000); + let excessive_reverse_connect_duration_us = self.config().with(|c| { + (c.network.connection_initial_timeout_ms * 2 + + c.network.reverse_connection_receipt_time_ms) as u64 + * 1000 + }); let unique_flow = network_result_try!( pin_future!(debug_duration( || { self.do_reverse_connect(relay_nr.clone(), target_node_ref.clone(), data) }, - Some(connection_initial_timeout_us * 2) + Some(excessive_reverse_connect_duration_us) )) .await? ); diff --git a/veilid-flutter/example/pubspec.lock b/veilid-flutter/example/pubspec.lock index 5fa08dea..8f014dba 100644 --- a/veilid-flutter/example/pubspec.lock +++ b/veilid-flutter/example/pubspec.lock @@ -13,10 +13,10 @@ packages: dependency: transitive description: name: async - sha256: "947bfcf187f74dbc5e146c9eb9c0f10c9f8b30743e341481c1e2ed3ecc18c20c" + sha256: d2872f9c19731c2e5f10444b14686eb7cc85c76274bd6c16e1816bff9a3bab63 url: "https://pub.dev" source: hosted - version: "2.11.0" + version: "2.12.0" async_tools: dependency: transitive description: @@ -29,10 +29,10 @@ packages: dependency: transitive description: name: boolean_selector - sha256: "6cfb5af12253eaf2b368f07bacc5a80d1301a071c73360d746b7f2e32d762c66" + sha256: "8aab1771e1243a5063b8b0ff68042d67334e3feab9e95b9490f9a6ebf73b42ea" url: "https://pub.dev" source: hosted - version: "2.1.1" + version: "2.1.2" change_case: dependency: transitive description: @@ -45,10 +45,10 @@ packages: dependency: transitive description: name: characters - sha256: "04a925763edad70e8443c99234dc3328f442e811f1d8fd1a72f1c8ad0f69a605" + sha256: f71061c654a3380576a52b451dd5532377954cf9dbd272a78fc8479606670803 url: "https://pub.dev" source: hosted - version: "1.3.0" + version: "1.4.0" charcode: dependency: transitive description: @@ -61,18 +61,18 @@ packages: dependency: transitive description: name: clock - sha256: cb6d7f03e1de671e34607e909a7213e31d7752be4fb66a86d29fe1eb14bfb5cf + sha256: fddb70d9b5277016c77a80201021d40a2247104d9f4aa7bab7157b7e3f05b84b url: "https://pub.dev" source: hosted - version: "1.1.1" + version: "1.1.2" collection: dependency: transitive description: name: collection - sha256: a1ace0a119f20aabc852d165077c036cd864315bd99b7eaa10a60100341941bf + sha256: "2f5709ae4d3d59dd8f7cd309b4e023046b57d8a6c82130785d2b0e5868084e76" url: "https://pub.dev" source: hosted - version: "1.19.0" + version: "1.19.1" convert: dependency: transitive description: @@ -101,10 +101,10 @@ packages: dependency: transitive description: name: fake_async - sha256: "511392330127add0b769b75a987850d136345d9227c6b94c96a04cf4a391bf78" + sha256: "6a95e56b2449df2273fd8c45a662d6947ce1ebb7aafe80e550a3f68297f3cacc" url: "https://pub.dev" source: hosted - version: "1.3.1" + version: "1.3.2" ffi: dependency: transitive description: @@ -117,10 +117,10 @@ packages: dependency: transitive description: name: file - sha256: "5fc22d7c25582e38ad9a8515372cd9a93834027aacf1801cf01164dac0ffa08c" + sha256: a3b4f84adafef897088c160faf7dfffb7696046cb13ae90b508c2cbc95d3b8d4 url: "https://pub.dev" source: hosted - version: "7.0.0" + version: "7.0.1" fixnum: dependency: transitive description: @@ -195,18 +195,18 @@ packages: dependency: transitive description: name: leak_tracker - sha256: "7bb2830ebd849694d1ec25bf1f44582d6ac531a57a365a803a6034ff751d2d06" + sha256: c35baad643ba394b40aac41080300150a4f08fd0fd6a10378f8f7c6bc161acec url: "https://pub.dev" source: hosted - version: "10.0.7" + version: "10.0.8" leak_tracker_flutter_testing: dependency: transitive description: name: leak_tracker_flutter_testing - sha256: "9491a714cca3667b60b5c420da8217e6de0d1ba7a5ec322fab01758f6998f379" + sha256: f8b613e7e6a13ec79cfdc0e97638fddb3ab848452eff057653abd3edba760573 url: "https://pub.dev" source: hosted - version: "3.0.8" + version: "3.0.9" leak_tracker_testing: dependency: transitive description: @@ -243,10 +243,10 @@ packages: dependency: transitive description: name: matcher - sha256: d2323aa2060500f906aa31a895b4030b6da3ebdcc5619d14ce1aada65cd161cb + sha256: dc58c723c3c24bf8d3e2d3ad3f2f9d7bd9cf43ec6feaa64181775e60190153f2 url: "https://pub.dev" source: hosted - version: "0.12.16+1" + version: "0.12.17" material_color_utilities: dependency: transitive description: @@ -259,18 +259,18 @@ packages: dependency: transitive description: name: meta - sha256: bdb68674043280c3428e9ec998512fb681678676b3c54e773629ffe74419f8c7 + sha256: e3641ec5d63ebf0d9b41bd43201a66e3fc79a65db5f61fc181f04cd27aab950c url: "https://pub.dev" source: hosted - version: "1.15.0" + version: "1.16.0" path: dependency: "direct main" description: name: path - sha256: "087ce49c3f0dc39180befefc60fdb4acd8f8620e5682fe2476afd0b3688bb4af" + sha256: "75cca69d1490965be98c73ceaea117e8a04dd21217b37b292c9ddbec0d955bc5" url: "https://pub.dev" source: hosted - version: "1.9.0" + version: "1.9.1" path_provider: dependency: "direct main" description: @@ -323,10 +323,10 @@ packages: dependency: transitive description: name: platform - sha256: "9b71283fc13df574056616011fb138fd3b793ea47cc509c189a6c3fa5f8a1a65" + sha256: "5d6b1b0036a5f331ebc77c850ebc8506cbc1e9416c27e59b439f917a902a4984" url: "https://pub.dev" source: hosted - version: "3.1.5" + version: "3.1.6" plugin_platform_interface: dependency: transitive description: @@ -339,10 +339,10 @@ packages: dependency: transitive description: name: process - sha256: "21e54fd2faf1b5bdd5102afd25012184a6793927648ea81eea80552ac9405b32" + sha256: "107d8be718f120bbba9dcd1e95e3bd325b1b4a4f07db64154635ba03f2567a0d" url: "https://pub.dev" source: hosted - version: "5.0.2" + version: "5.0.3" quiver: dependency: transitive description: @@ -360,34 +360,34 @@ packages: dependency: transitive description: name: source_span - sha256: "53e943d4206a5e30df338fd4c6e7a077e02254531b138a15aec3bd143c1a8b3c" + sha256: "254ee5351d6cb365c859e20ee823c3bb479bf4a293c22d17a9f1bf144ce86f7c" url: "https://pub.dev" source: hosted - version: "1.10.0" + version: "1.10.1" stack_trace: dependency: transitive description: name: stack_trace - sha256: "9f47fd3630d76be3ab26f0ee06d213679aa425996925ff3feffdec504931c377" + sha256: "8b27215b45d22309b5cddda1aa2b19bdfec9df0e765f2de506401c071d38d1b1" url: "https://pub.dev" source: hosted - version: "1.12.0" + version: "1.12.1" stream_channel: dependency: transitive description: name: stream_channel - sha256: ba2aa5d8cc609d96bbb2899c28934f9e1af5cddbd60a827822ea467161eb54e7 + sha256: "969e04c80b8bcdf826f8f16579c7b14d780458bd97f56d107d3950fdbeef059d" url: "https://pub.dev" source: hosted - version: "2.1.2" + version: "2.1.4" string_scanner: dependency: transitive description: name: string_scanner - sha256: "688af5ed3402a4bde5b3a6c15fd768dbf2621a614950b17f04626c431ab3c4c3" + sha256: "921cd31725b72fe181906c6a94d987c78e3b98c2e205b397ea399d4054872b43" url: "https://pub.dev" source: hosted - version: "1.3.0" + version: "1.4.1" sync_http: dependency: transitive description: @@ -416,18 +416,18 @@ packages: dependency: transitive description: name: term_glyph - sha256: a29248a84fbb7c79282b40b8c72a1209db169a2e0542bce341da992fe1bc7e84 + sha256: "7f554798625ea768a7518313e58f83891c7f5024f88e46e7182a4558850a4b8e" url: "https://pub.dev" source: hosted - version: "1.2.1" + version: "1.2.2" test_api: dependency: transitive description: name: test_api - sha256: "664d3a9a64782fcdeb83ce9c6b39e78fd2971d4e37827b9b06c3aa1edc5e760c" + sha256: fb31f383e2ee25fbbfe06b40fe21e1e458d14080e3c67e7ba0acfde4df4e0bbd url: "https://pub.dev" source: hosted - version: "0.7.3" + version: "0.7.4" typed_data: dependency: transitive description: @@ -450,7 +450,7 @@ packages: path: ".." relative: true source: path - version: "0.4.1" + version: "0.4.3" veilid_test: dependency: "direct dev" description: @@ -462,10 +462,10 @@ packages: dependency: transitive description: name: vm_service - sha256: f6be3ed8bd01289b34d679c2b62226f63c0e69f9fd2e50a6b3c1c729a961041b + sha256: "0968250880a6c5fe7edc067ed0a13d4bae1577fe2771dcf3010d52c4a9d3ca14" url: "https://pub.dev" source: hosted - version: "14.3.0" + version: "14.3.1" webdriver: dependency: transitive description: @@ -499,5 +499,5 @@ packages: source: hosted version: "0.0.6" sdks: - dart: ">=3.5.0 <4.0.0" + dart: ">=3.7.0-0 <4.0.0" flutter: ">=3.24.0"