From c272c768fc6232fdabc172f0831d6488f7420c37 Mon Sep 17 00:00:00 2001 From: Christien Rioux Date: Fri, 17 May 2024 18:23:34 -0400 Subject: [PATCH 01/14] initial version of preempt for dht get_value --- veilid-core/src/rpc_processor/fanout_call.rs | 7 + veilid-core/src/storage_manager/get_value.rs | 392 ++++++++++++------ veilid-core/src/storage_manager/mod.rs | 94 +++-- .../storage_manager/storage_manager_inner.rs | 17 + .../src/storage_manager/watch_value.rs | 23 +- .../src/veilid_api/types/veilid_state.rs | 36 ++ veilid-tools/src/deferred_stream_processor.rs | 125 ++++++ veilid-tools/src/lib.rs | 3 + 8 files changed, 529 insertions(+), 168 deletions(-) create mode 100644 veilid-tools/src/deferred_stream_processor.rs diff --git a/veilid-core/src/rpc_processor/fanout_call.rs b/veilid-core/src/rpc_processor/fanout_call.rs index 9399a171..c64c688a 100644 --- a/veilid-core/src/rpc_processor/fanout_call.rs +++ b/veilid-core/src/rpc_processor/fanout_call.rs @@ -10,10 +10,16 @@ where #[derive(Debug, Copy, Clone)] pub(crate) enum FanoutResultKind { + Partial, Timeout, Finished, Exhausted, } +impl FanoutResultKind { + pub fn is_partial(&self) -> bool { + matches!(self, Self::Partial) + } +} #[derive(Debug, Clone)] pub(crate) struct FanoutResult { @@ -23,6 +29,7 @@ pub(crate) struct FanoutResult { pub(crate) fn debug_fanout_result(result: &FanoutResult) -> String { let kc = match result.kind { + FanoutResultKind::Partial => "P", FanoutResultKind::Timeout => "T", FanoutResultKind::Finished => "F", FanoutResultKind::Exhausted => "E", diff --git a/veilid-core/src/storage_manager/get_value.rs b/veilid-core/src/storage_manager/get_value.rs index 3cdf3210..9e0d6fee 100644 --- a/veilid-core/src/storage_manager/get_value.rs +++ b/veilid-core/src/storage_manager/get_value.rs @@ -10,6 +10,8 @@ struct OutboundGetValueContext { pub descriptor: Option>, /// The parsed schema from the descriptor if we have one pub schema: Option, + /// If we should send a partial update with the current contetx + pub send_partial_update: bool, } /// The result of the outbound_get_value operation @@ -29,7 +31,7 @@ impl StorageManager { subkey: ValueSubkey, safety_selection: SafetySelection, last_get_result: GetResult, - ) -> VeilidAPIResult { + ) -> VeilidAPIResult>> { let routing_table = rpc_processor.routing_table(); // Get the DHT parameters for 'GetValue' @@ -49,171 +51,301 @@ impl StorageManager { inner.get_value_nodes(key)?.unwrap_or_default() }; - // Make do-get-value answer context + // Parse the schema let schema = if let Some(d) = &last_get_result.opt_descriptor { Some(d.schema()?) } else { None }; + + // Make the return channel + let (out_tx, out_rx) = flume::unbounded::>(); + + // Make do-get-value answer context let context = Arc::new(Mutex::new(OutboundGetValueContext { value: last_get_result.opt_value, value_nodes: vec![], descriptor: last_get_result.opt_descriptor.clone(), schema, + send_partial_update: false, })); // Routine to call to generate fanout - let call_routine = |next_node: NodeRef| { - let rpc_processor = rpc_processor.clone(); + let call_routine = { let context = context.clone(); - let last_descriptor = last_get_result.opt_descriptor.clone(); - async move { - let gva = network_result_try!( - rpc_processor - .clone() - .rpc_call_get_value( - Destination::direct(next_node.clone()).with_safety(safety_selection), - key, - subkey, - last_descriptor.map(|x| (*x).clone()), - ) - .await? - ); + let rpc_processor = rpc_processor.clone(); + move |next_node: NodeRef| { + let context = context.clone(); + let rpc_processor = rpc_processor.clone(); + let last_descriptor = last_get_result.opt_descriptor.clone(); + async move { + let gva = network_result_try!( + rpc_processor + .clone() + .rpc_call_get_value( + Destination::direct(next_node.clone()) + .with_safety(safety_selection), + key, + subkey, + last_descriptor.map(|x| (*x).clone()), + ) + .await? + ); - // Keep the descriptor if we got one. If we had a last_descriptor it will - // already be validated by rpc_call_get_value - if let Some(descriptor) = gva.answer.descriptor { - let mut ctx = context.lock(); - if ctx.descriptor.is_none() && ctx.schema.is_none() { - let schema = match descriptor.schema() { - Ok(v) => v, - Err(e) => { - return Ok(NetworkResult::invalid_message(e)); - } + // Keep the descriptor if we got one. If we had a last_descriptor it will + // already be validated by rpc_call_get_value + if let Some(descriptor) = gva.answer.descriptor { + let mut ctx = context.lock(); + if ctx.descriptor.is_none() && ctx.schema.is_none() { + let schema = match descriptor.schema() { + Ok(v) => v, + Err(e) => { + return Ok(NetworkResult::invalid_message(e)); + } + }; + ctx.schema = Some(schema); + ctx.descriptor = Some(Arc::new(descriptor)); + } + } + + // Keep the value if we got one and it is newer and it passes schema validation + if let Some(value) = gva.answer.value { + log_dht!(debug "Got value back: len={} seq={}", value.value_data().data().len(), value.value_data().seq()); + let mut ctx = context.lock(); + + // Ensure we have a schema and descriptor + let (Some(descriptor), Some(schema)) = (&ctx.descriptor, &ctx.schema) + else { + // Got a value but no descriptor for it + // Move to the next node + return Ok(NetworkResult::invalid_message( + "Got value with no descriptor", + )); }; - ctx.schema = Some(schema); - ctx.descriptor = Some(Arc::new(descriptor)); - } - } - // Keep the value if we got one and it is newer and it passes schema validation - if let Some(value) = gva.answer.value { - log_dht!(debug "Got value back: len={} seq={}", value.value_data().data().len(), value.value_data().seq()); - let mut ctx = context.lock(); + // Validate with schema + if !schema.check_subkey_value_data( + descriptor.owner(), + subkey, + value.value_data(), + ) { + // Validation failed, ignore this value + // Move to the next node + return Ok(NetworkResult::invalid_message(format!( + "Schema validation failed on subkey {}", + subkey + ))); + } - // Ensure we have a schema and descriptor - let (Some(descriptor), Some(schema)) = (&ctx.descriptor, &ctx.schema) else { - // Got a value but no descriptor for it - // Move to the next node - return Ok(NetworkResult::invalid_message( - "Got value with no descriptor", - )); - }; + // If we have a prior value, see if this is a newer sequence number + if let Some(prior_value) = &ctx.value { + let prior_seq = prior_value.value_data().seq(); + let new_seq = value.value_data().seq(); - // Validate with schema - if !schema.check_subkey_value_data( - descriptor.owner(), - subkey, - value.value_data(), - ) { - // Validation failed, ignore this value - // Move to the next node - return Ok(NetworkResult::invalid_message(format!( - "Schema validation failed on subkey {}", - subkey - ))); - } - - // If we have a prior value, see if this is a newer sequence number - if let Some(prior_value) = &ctx.value { - let prior_seq = prior_value.value_data().seq(); - let new_seq = value.value_data().seq(); - - if new_seq == prior_seq { - // If sequence number is the same, the data should be the same - if prior_value.value_data() != value.value_data() { - // Move to the next node - return Ok(NetworkResult::invalid_message("value data mismatch")); + if new_seq == prior_seq { + // If sequence number is the same, the data should be the same + if prior_value.value_data() != value.value_data() { + // Move to the next node + return Ok(NetworkResult::invalid_message( + "value data mismatch", + )); + } + // Increase the consensus count for the existing value + ctx.value_nodes.push(next_node); + } else if new_seq > prior_seq { + // If the sequence number is greater, start over with the new value + ctx.value = Some(Arc::new(value)); + // One node has shown us this value so far + ctx.value_nodes = vec![next_node]; + // Send an update since the value changed + ctx.send_partial_update = true; + } else { + // If the sequence number is older, ignore it } - // Increase the consensus count for the existing value - ctx.value_nodes.push(next_node); - } else if new_seq > prior_seq { - // If the sequence number is greater, start over with the new value + } else { + // If we have no prior value, keep it ctx.value = Some(Arc::new(value)); // One node has shown us this value so far ctx.value_nodes = vec![next_node]; - } else { - // If the sequence number is older, ignore it + // Send an update since the value changed + ctx.send_partial_update = true; } - } else { - // If we have no prior value, keep it - ctx.value = Some(Arc::new(value)); - // One node has shown us this value so far - ctx.value_nodes = vec![next_node]; } + + // Return peers if we have some + log_network_result!(debug "GetValue fanout call returned peers {}", gva.answer.peers.len()); + + Ok(NetworkResult::value(gva.answer.peers)) } - - // Return peers if we have some - log_network_result!(debug "GetValue fanout call returned peers {}", gva.answer.peers.len()); - - Ok(NetworkResult::value(gva.answer.peers)) } }; // Routine to call to check if we're done at each step - let check_done = |_closest_nodes: &[NodeRef]| { - // If we have reached sufficient consensus, return done - let ctx = context.lock(); - if ctx.value.is_some() - && ctx.descriptor.is_some() - && ctx.value_nodes.len() >= consensus_count - { - return Some(()); + let check_done = { + let context = context.clone(); + let out_tx = out_tx.clone(); + move |_closest_nodes: &[NodeRef]| { + let mut ctx = context.lock(); + + // send partial update if desired + if ctx.send_partial_update { + ctx.send_partial_update=false; + + // return partial result + let fanout_result = FanoutResult { + kind: FanoutResultKind::Partial, + value_nodes: ctx.value_nodes.clone(), + }; + if let Err(e) = out_tx.send(Ok(OutboundGetValueResult { + fanout_result, + get_result: GetResult { + opt_value: ctx.value.clone(), + opt_descriptor: ctx.descriptor.clone(), + }, + })) { + log_dht!(debug "Sending partial GetValue result failed: {}", e); + } + } + + // If we have reached sufficient consensus, return done + if ctx.value.is_some() + && ctx.descriptor.is_some() + && ctx.value_nodes.len() >= consensus_count + { + return Some(()); + } + None } - None }; - // Call the fanout - let fanout_call = FanoutCall::new( - routing_table.clone(), + // Call the fanout in a spawned task + spawn(Box::pin(async move { + let fanout_call = FanoutCall::new( + routing_table.clone(), + key, + key_count, + fanout, + timeout_us, + capability_fanout_node_info_filter(vec![CAP_DHT]), + call_routine, + check_done, + ); + + let kind = match fanout_call.run(init_fanout_queue).await { + // If we don't finish in the timeout (too much time passed checking for consensus) + TimeoutOr::Timeout => FanoutResultKind::Timeout, + // If we finished with or without consensus (enough nodes returning the same value) + TimeoutOr::Value(Ok(Some(()))) => FanoutResultKind::Finished, + // If we ran out of nodes before getting consensus) + TimeoutOr::Value(Ok(None)) => FanoutResultKind::Exhausted, + // Failed + TimeoutOr::Value(Err(e)) => { + // If we finished with an error, return that + log_dht!(debug "GetValue fanout error: {}", e); + if let Err(e) = out_tx.send(Err(e.into())) { + log_dht!(debug "Sending GetValue fanout error failed: {}", e); + } + return; + } + }; + + let ctx = context.lock(); + let fanout_result = FanoutResult { + kind, + value_nodes: ctx.value_nodes.clone(), + }; + log_network_result!(debug "GetValue Fanout: {:?}", fanout_result); + + if let Err(e) = out_tx.send(Ok(OutboundGetValueResult { + fanout_result, + get_result: GetResult { + opt_value: ctx.value.clone(), + opt_descriptor: ctx.descriptor.clone(), + }, + })) { + log_dht!(debug "Sending GetValue result failed: {}", e); + } + })) + .detach(); + + Ok(out_rx) + } + + pub(super) fn process_deferred_outbound_get_value_result_inner(&self, inner: &mut StorageManagerInner, res_rx: flume::Receiver>, key: TypedKey, subkey: ValueSubkey, last_seq: ValueSeqNum) { + let this = self.clone(); + inner.process_deferred_results( + res_rx, + Box::new( + move |result: VeilidAPIResult| -> SendPinBoxFuture { + let this = this.clone(); + Box::pin(async move { + let result = match result { + Ok(v) => v, + Err(e) => { + log_rtab!(debug "Deferred fanout error: {}", e); + return false; + } + }; + let is_partial = result.fanout_result.kind.is_partial(); + let value_data = match this.process_outbound_get_value_result(key, subkey, Some(last_seq), result).await { + Ok(Some(v)) => v, + Ok(None) => { + return is_partial; + } + Err(e) => { + log_rtab!(debug "Deferred fanout error: {}", e); + return false; + } + }; + if is_partial { + // If more partial results show up, don't send an update until we're done + return true; + } + // If we processed the final result, possibly send an update + // if the sequence number changed since our first partial update + // Send with a max count as this is not attached to any watch + if last_seq != value_data.seq() { + if let Err(e) = this.update_callback_value_change(key,ValueSubkeyRangeSet::single(subkey), u32::MAX, Some(value_data)).await { + log_rtab!(debug "Failed sending deferred fanout value change: {}", e); + } + } + + // Return done + false + }) + }, + ), + ); + } + + pub(super) async fn process_outbound_get_value_result(&self, key: TypedKey, subkey: ValueSubkey, opt_last_seq: Option, result: get_value::OutboundGetValueResult) -> Result, VeilidAPIError> { + // See if we got a value back + let Some(get_result_value) = result.get_result.opt_value else { + // If we got nothing back then we also had nothing beforehand, return nothing + return Ok(None); + }; + + // Keep the list of nodes that returned a value for later reference + let mut inner = self.lock().await?; + + inner.process_fanout_results( key, - key_count, - fanout, - timeout_us, - capability_fanout_node_info_filter(vec![CAP_DHT]), - call_routine, - check_done, + core::iter::once((subkey, &result.fanout_result)), + false, ); - let kind = match fanout_call.run(init_fanout_queue).await { - // If we don't finish in the timeout (too much time passed checking for consensus) - TimeoutOr::Timeout => FanoutResultKind::Timeout, - // If we finished with or without consensus (enough nodes returning the same value) - TimeoutOr::Value(Ok(Some(()))) => FanoutResultKind::Finished, - // If we ran out of nodes before getting consensus) - TimeoutOr::Value(Ok(None)) => FanoutResultKind::Exhausted, - // Failed - TimeoutOr::Value(Err(e)) => { - // If we finished with an error, return that - log_dht!(debug "GetValue Fanout Error: {}", e); - return Err(e.into()); - } - }; - - let ctx = context.lock(); - let fanout_result = FanoutResult { - kind, - value_nodes: ctx.value_nodes.clone(), - }; - log_network_result!(debug "GetValue Fanout: {:?}", fanout_result); - - Ok(OutboundGetValueResult { - fanout_result, - get_result: GetResult { - opt_value: ctx.value.clone(), - opt_descriptor: ctx.descriptor.clone(), - }, - }) + // If we got a new value back then write it to the opened record + if Some(get_result_value.value_data().seq()) != opt_last_seq { + inner + .handle_set_local_value( + key, + subkey, + get_result_value.clone(), + WatchUpdateMode::UpdateAll, + ) + .await?; + } + Ok(Some(get_result_value.value_data().clone())) } /// Handle a received 'Get Value' query diff --git a/veilid-core/src/storage_manager/mod.rs b/veilid-core/src/storage_manager/mod.rs index 7aa52ad9..b8088ba8 100644 --- a/veilid-core/src/storage_manager/mod.rs +++ b/veilid-core/src/storage_manager/mod.rs @@ -264,7 +264,7 @@ impl StorageManager { // No last descriptor, no last value // Use the safety selection we opened the record with let subkey: ValueSubkey = 0; - let result = self + let res_rx = self .outbound_get_value( rpc_processor, key, @@ -273,12 +273,24 @@ impl StorageManager { GetResult::default(), ) .await?; + // Wait for the first result + let Ok(result) = res_rx.recv_async().await else { + apibail_internal!("failed to receive results"); + }; + let result = result?; // If we got nothing back, the key wasn't found if result.get_result.opt_value.is_none() && result.get_result.opt_descriptor.is_none() { // No result apibail_key_not_found!(key); }; + let last_seq = result + .get_result + .opt_value + .as_ref() + .unwrap() + .value_data() + .seq(); // Reopen inner to store value we just got let mut inner = self.lock().await?; @@ -295,9 +307,16 @@ impl StorageManager { } // Open the new record - inner + let out = inner .open_new_record(key, writer, subkey, result.get_result, safety_selection) - .await + .await; + + if out.is_ok() { + self.process_deferred_outbound_get_value_result_inner( + &mut inner, res_rx, key, subkey, last_seq, + ); + } + out } /// Close an opened local record @@ -402,7 +421,7 @@ impl StorageManager { .opt_value .as_ref() .map(|v| v.value_data().seq()); - let result = self + let res_rx = self .outbound_get_value( rpc_processor, key, @@ -412,32 +431,33 @@ impl StorageManager { ) .await?; - // See if we got a value back - let Some(get_result_value) = result.get_result.opt_value else { - // If we got nothing back then we also had nothing beforehand, return nothing - return Ok(None); + // Wait for the first result + let Ok(result) = res_rx.recv_async().await else { + apibail_internal!("failed to receive results"); }; + let result = result?; + let partial = result.fanout_result.kind.is_partial(); - // Keep the list of nodes that returned a value for later reference - let mut inner = self.lock().await?; - inner.process_fanout_results( - key, - core::iter::once((subkey, &result.fanout_result)), - false, - ); + // Process the returned result + let out = self + .process_outbound_get_value_result(key, subkey, opt_last_seq, result) + .await?; - // If we got a new value back then write it to the opened record - if Some(get_result_value.value_data().seq()) != opt_last_seq { - inner - .handle_set_local_value( + if let Some(out) = &out { + // If there's more to process, do it in the background + if partial { + let mut inner = self.lock().await?; + self.process_deferred_outbound_get_value_result_inner( + &mut inner, + res_rx, key, subkey, - get_result_value.clone(), - WatchUpdateMode::UpdateAll, - ) - .await?; + out.seq(), + ); + } } - Ok(Some(get_result_value.value_data().clone())) + + Ok(out) } /// Set the value of a subkey on an opened local record @@ -920,6 +940,31 @@ impl StorageManager { Ok(()) } + // Send a value change up through the callback + #[instrument(level = "trace", skip(self), err)] + async fn update_callback_value_change( + &self, + key: TypedKey, + subkeys: ValueSubkeyRangeSet, + count: u32, + value: Option, + ) -> Result<(), VeilidAPIError> { + let opt_update_callback = { + let inner = self.lock().await?; + inner.update_callback.clone() + }; + + if let Some(update_callback) = opt_update_callback { + update_callback(VeilidUpdate::ValueChange(Box::new(VeilidValueChange { + key, + subkeys, + count, + value, + }))); + } + Ok(()) + } + fn check_fanout_set_offline( &self, key: TypedKey, @@ -927,6 +972,7 @@ impl StorageManager { fanout_result: &FanoutResult, ) -> bool { match fanout_result.kind { + FanoutResultKind::Partial => false, FanoutResultKind::Timeout => { log_stor!(debug "timeout in set_value, adding offline subkey: {}:{}", key, subkey); true diff --git a/veilid-core/src/storage_manager/storage_manager_inner.rs b/veilid-core/src/storage_manager/storage_manager_inner.rs index 57ffdb1a..b43c78a1 100644 --- a/veilid-core/src/storage_manager/storage_manager_inner.rs +++ b/veilid-core/src/storage_manager/storage_manager_inner.rs @@ -32,6 +32,8 @@ pub(super) struct StorageManagerInner { pub tick_future: Option>, /// Update callback to send ValueChanged notification to pub update_callback: Option, + /// Deferred result processor + pub deferred_result_processor: DeferredStreamProcessor, /// The maximum consensus count set_consensus_count: usize, @@ -88,6 +90,7 @@ impl StorageManagerInner { opt_routing_table: Default::default(), tick_future: Default::default(), update_callback: None, + deferred_result_processor: DeferredStreamProcessor::default(), set_consensus_count, } } @@ -126,6 +129,9 @@ impl StorageManagerInner { self.load_metadata().await?; + // Start deferred results processors + self.deferred_result_processor.init().await; + // Schedule tick let tick_future = interval(1000, move || { let this = outer_self.clone(); @@ -151,6 +157,9 @@ impl StorageManagerInner { f.await; } + // Stop deferred result processor + self.deferred_result_processor.terminate().await; + // Final flush on record stores if let Some(mut local_record_store) = self.local_record_store.take() { if let Err(e) = local_record_store.flush().await { @@ -708,4 +717,12 @@ impl StorageManagerInner { subkeys: ValueSubkeyRangeSet::single(subkey), }); } + + pub fn process_deferred_results( + &mut self, + receiver: flume::Receiver, + handler: impl FnMut(T) -> SendPinBoxFuture + Send + 'static, + ) -> bool { + self.deferred_result_processor.add(receiver, handler) + } } diff --git a/veilid-core/src/storage_manager/watch_value.rs b/veilid-core/src/storage_manager/watch_value.rs index c8b3a77e..2de238e2 100644 --- a/veilid-core/src/storage_manager/watch_value.rs +++ b/veilid-core/src/storage_manager/watch_value.rs @@ -417,7 +417,7 @@ impl StorageManager { watch_id: u64, ) -> VeilidAPIResult> { // Update local record store with new value - let (is_value_seq_newer, opt_update_callback, value) = { + let (is_value_seq_newer, value) = { let mut inner = self.lock().await?; // Don't process update if the record is closed @@ -516,7 +516,7 @@ impl StorageManager { } } - (is_value_seq_newer, inner.update_callback.clone(), value) + (is_value_seq_newer, value) }; // Announce ValueChanged VeilidUpdate @@ -526,18 +526,13 @@ impl StorageManager { let do_update = is_value_seq_newer || subkeys.len() > 1 || count == 0; if do_update { - if let Some(update_callback) = opt_update_callback { - update_callback(VeilidUpdate::ValueChange(Box::new(VeilidValueChange { - key, - subkeys, - count, - value: if is_value_seq_newer { - Some(value.unwrap().value_data().clone()) - } else { - None - }, - }))); - } + let value = if is_value_seq_newer { + Some(value.unwrap().value_data().clone()) + } else { + None + }; + self.update_callback_value_change(key, subkeys, count, value) + .await?; } Ok(NetworkResult::value(())) diff --git a/veilid-core/src/veilid_api/types/veilid_state.rs b/veilid-core/src/veilid_api/types/veilid_state.rs index 41c3bc04..c751d096 100644 --- a/veilid-core/src/veilid_api/types/veilid_state.rs +++ b/veilid-core/src/veilid_api/types/veilid_state.rs @@ -52,58 +52,93 @@ impl TryFrom for AttachmentState { } } +/// Describe the attachment state of the Veilid node #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] #[cfg_attr(target_arch = "wasm32", derive(Tsify))] pub struct VeilidStateAttachment { + /// The overall quality of the routing table if attached, or the current state the attachment state machine. pub state: AttachmentState, + /// If attached and there are enough eachable nodes in the routing table to perform all the actions of the PublicInternet RoutingDomain, + /// including things like private/safety route allocation and DHT operations. pub public_internet_ready: bool, + /// If attached and there are enough eachable nodes in the routing table to perform all the actions of the LocalNetwork RoutingDomain. pub local_network_ready: bool, } +/// Describe a recently accessed peer #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] #[cfg_attr(target_arch = "wasm32", derive(Tsify))] pub struct PeerTableData { + /// The node ids used by this peer #[schemars(with = "Vec")] #[cfg_attr(target_arch = "wasm32", tsify(type = "string[]"))] pub node_ids: Vec, + /// The peer's human readable address. pub peer_address: String, + /// Statistics we have collected on this peer. pub peer_stats: PeerStats, } +/// Describe the current network state of the Veilid node #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] #[cfg_attr(target_arch = "wasm32", derive(Tsify))] pub struct VeilidStateNetwork { + /// If the network has been started or not. pub started: bool, + /// The total number of bytes per second used by Veilid currently in the download direction. pub bps_down: ByteCount, + /// The total number of bytes per second used by Veilid currently in the upload direction. pub bps_up: ByteCount, + /// The list of most recently accessed peers. + /// This is not an active connection table, nor is representative of the entire routing table. pub peers: Vec, } +/// Describe a private route change that has happened #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] #[cfg_attr(target_arch = "wasm32", derive(Tsify))] pub struct VeilidRouteChange { + /// If a private route that was allocated has died, it is listed here. #[schemars(with = "Vec")] pub dead_routes: Vec, + /// If a private route that was imported has died, it is listed here. #[schemars(with = "Vec")] pub dead_remote_routes: Vec, } +/// Describe changes to the Veilid node configuration +/// Currently this is only ever emitted once, however we reserve the right to +/// add the ability to change the configuration or have it changed by the Veilid node +/// itself during runtime. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] #[cfg_attr(target_arch = "wasm32", derive(Tsify))] pub struct VeilidStateConfig { + /// If the Veilid node configuration has changed the full new config will be here. pub config: VeilidConfigInner, } +/// Describe when DHT records have subkey values changed #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] #[cfg_attr(target_arch = "wasm32", derive(Tsify))] pub struct VeilidValueChange { + /// The DHT Record key that changed #[schemars(with = "String")] pub key: TypedKey, + /// The portion of the DHT Record's subkeys that have changed + /// If the subkey range is empty, any watch present on the value has died. pub subkeys: ValueSubkeyRangeSet, + /// The count remaining on the watch that triggered this value change + /// If there is no watch and this is received, it will be set to u32::MAX + /// If this value is zero, any watch present on the value has died. pub count: u32, + /// The (optional) value data for the first subkey in the subkeys range + /// If 'subkeys' is not a single value, other values than the first value + /// must be retrieved with RoutingContext::get_dht_value(). pub value: Option, } +/// An update from the veilid-core to the host application describing a change +/// to the internal state of the Veilid node. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] #[cfg_attr(target_arch = "wasm32", derive(Tsify), tsify(into_wasm_abi))] #[serde(tag = "kind")] @@ -120,6 +155,7 @@ pub enum VeilidUpdate { } from_impl_to_jsvalue!(VeilidUpdate); +/// A queriable state of the internals of veilid-core. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)] #[cfg_attr(target_arch = "wasm32", derive(Tsify), tsify(into_wasm_abi))] pub struct VeilidState { diff --git a/veilid-tools/src/deferred_stream_processor.rs b/veilid-tools/src/deferred_stream_processor.rs new file mode 100644 index 00000000..de9577f7 --- /dev/null +++ b/veilid-tools/src/deferred_stream_processor.rs @@ -0,0 +1,125 @@ +use futures_util::{ + future::{select, Either}, + stream::FuturesUnordered, + StreamExt, +}; +use stop_token::future::FutureExt as _; + +use super::*; + +/// Background processor for streams +/// Handles streams to completion, passing each item from the stream to a callback +pub struct DeferredStreamProcessor { + pub opt_deferred_stream_channel: Option>>, + pub opt_stopper: Option, + pub opt_join_handle: Option>, +} + +impl DeferredStreamProcessor { + /// Create a new DeferredStreamProcessor + pub fn new() -> Self { + Self { + opt_deferred_stream_channel: None, + opt_stopper: None, + opt_join_handle: None, + } + } + + /// Initialize the processor before use + pub async fn init(&mut self) { + let stopper = StopSource::new(); + let stop_token = stopper.token(); + self.opt_stopper = Some(stopper); + let (dsc_tx, dsc_rx) = flume::unbounded::>(); + self.opt_deferred_stream_channel = Some(dsc_tx); + self.opt_join_handle = Some(spawn(Self::processor(stop_token, dsc_rx))); + } + + /// Terminate the processor and ensure all streams are closed + pub async fn terminate(&mut self) { + drop(self.opt_deferred_stream_channel.take()); + drop(self.opt_stopper.take()); + if let Some(jh) = self.opt_join_handle.take() { + jh.await; + } + } + + async fn processor(stop_token: StopToken, dsc_rx: flume::Receiver>) { + let mut unord = FuturesUnordered::>::new(); + + // Ensure the unord never finishes + unord.push(Box::pin(std::future::pending())); + + // Processor loop + let mut unord_fut = unord.next(); + let mut dsc_fut = dsc_rx.recv_async(); + while let Ok(res) = select(unord_fut, dsc_fut) + .timeout_at(stop_token.clone()) + .await + { + match res { + Either::Left((x, old_dsc_fut)) => { + // Unord future processor should never get empty + assert!(x.is_some()); + + // Make another unord future to process + unord_fut = unord.next(); + // put back the other future and keep going + dsc_fut = old_dsc_fut; + } + Either::Right((new_proc, old_unord_fut)) => { + // Immediately drop the old unord future + // because we never care about it completing + drop(old_unord_fut); + let Ok(new_proc) = new_proc else { + break; + }; + + // Add a new stream to process + unord.push(new_proc); + + // Make a new unord future because we don't care about the + // completion of the last unord future, they never return + // anything. + unord_fut = unord.next(); + // Make a new receiver future + dsc_fut = dsc_rx.recv_async(); + } + } + } + } + + /// Queue a stream to process in the background + /// * 'receiver' is the stream to process + /// * 'handler' is the callback to handle each item from the stream + /// Returns 'true' if the stream was added for processing, and 'false' if the stream could not be added, possibly due to not being initialized + pub fn add( + &mut self, + receiver: flume::Receiver, + mut handler: impl FnMut(T) -> SendPinBoxFuture + Send + 'static, + ) -> bool { + let Some(st) = self.opt_stopper.as_ref().map(|s| s.token()) else { + return false; + }; + let Some(dsc_tx) = self.opt_deferred_stream_channel.clone() else { + return false; + }; + let drp = Box::pin(async move { + while let Ok(Ok(res)) = receiver.recv_async().timeout_at(st.clone()).await { + if !handler(res).await { + break; + } + } + }); + if dsc_tx.send(drp).is_err() { + return false; + } + true + } +} + +impl Default for DeferredStreamProcessor { + fn default() -> Self { + Self::new() + } +} diff --git a/veilid-tools/src/lib.rs b/veilid-tools/src/lib.rs index 57845fae..27c3969b 100644 --- a/veilid-tools/src/lib.rs +++ b/veilid-tools/src/lib.rs @@ -29,6 +29,7 @@ pub mod assembly_buffer; pub mod async_peek_stream; pub mod async_tag_lock; pub mod clone_stream; +pub mod deferred_stream_processor; pub mod eventual; pub mod eventual_base; pub mod eventual_value; @@ -162,6 +163,8 @@ pub use async_tag_lock::*; #[doc(inline)] pub use clone_stream::*; #[doc(inline)] +pub use deferred_stream_processor::*; +#[doc(inline)] pub use eventual::*; #[doc(inline)] pub use eventual_base::{EventualCommon, EventualResolvedFuture}; From 8e90a831426b5f6e7a95650512e43a7a718ef776 Mon Sep 17 00:00:00 2001 From: Christien Rioux Date: Fri, 17 May 2024 21:23:42 -0400 Subject: [PATCH 02/14] valueset bugfix --- Cargo.lock | 541 +++++++++---------- veilid-core/src/storage_manager/set_value.rs | 114 ++-- 2 files changed, 326 insertions(+), 329 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index aaa4ca48..285d2f27 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -24,7 +24,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" dependencies = [ "crypto-common", - "generic-array 0.14.7", + "generic-array", ] [[package]] @@ -140,8 +140,8 @@ dependencies = [ [[package]] name = "ansi-parser" -version = "0.9.0" -source = "git+https://gitlab.com/davidbittner/ansi-parser.git#80b28ea6c42fc6ee7c9974aaa8059ae244032365" +version = "0.9.1" +source = "git+https://gitlab.com/davidbittner/ansi-parser.git#a431fb31f8b7f5680525987c1d67d4863ac02660" dependencies = [ "heapless", "nom", @@ -158,47 +158,48 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.13" +version = "0.6.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d96bd03f33fe50a863e394ee9718a706f988b9079b20c3784fb726e7678b62fb" +checksum = "418c75fa768af9c03be99d17643f93f79bbba589895012a80e3452a19ddda15b" dependencies = [ "anstyle", "anstyle-parse", "anstyle-query", "anstyle-wincon", "colorchoice", + "is_terminal_polyfill", "utf8parse", ] [[package]] name = "anstyle" -version = "1.0.6" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" +checksum = "038dfcf04a5feb68e9c60b21c9625a54c2c0616e79b72b0fd87075a056ae1d1b" [[package]] name = "anstyle-parse" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c" +checksum = "c03a11a9034d92058ceb6ee011ce58af4a9bf61491aa7e1e59ecd24bd40d22d4" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648" +checksum = "a64c907d4e79225ac72e2a354c9ce84d50ebb4586dee56c82b3ee73004f537f5" dependencies = [ "windows-sys 0.52.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.2" +version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7" +checksum = "61a38449feb7068f52bb06c12759005cf459ee52bb4adc1d5a7c4322d716fb19" dependencies = [ "anstyle", "windows-sys 0.52.0", @@ -206,25 +207,24 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.82" +version = "1.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f538837af36e6f6a9be0faa67f9a314f8119e4e4b5867c6ab40ed60360142519" +checksum = "27a4bd113ab6da4cd0f521068a6e2ee1065eab54107266a11835d02c8ec86a37" [[package]] name = "arboard" -version = "3.3.2" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2041f1943049c7978768d84e6d0fd95de98b76d6c4727b09e78ec253d29fa58" +checksum = "9fb4009533e8ff8f1450a5bcbc30f4242a1d34442221f72314bea1f5dc9c7f89" dependencies = [ "clipboard-win", "core-graphics", "image", "log", - "objc", - "objc-foundation", - "objc_id", + "objc2", + "objc2-app-kit", + "objc2-foundation", "parking_lot 0.12.2", - "thiserror", "windows-sys 0.48.0", "x11rb", ] @@ -253,18 +253,6 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" -[[package]] -name = "as-slice" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45403b49e3954a4b8428a0ac21a4b7afadccf92bfd96273f1a58cd4812496ae0" -dependencies = [ - "generic-array 0.12.4", - "generic-array 0.13.3", - "generic-array 0.14.7", - "stable_deref_trait", -] - [[package]] name = "async-attributes" version = "1.1.2" @@ -288,12 +276,11 @@ dependencies = [ [[package]] name = "async-channel" -version = "2.2.1" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "136d4d23bcc79e27423727b36823d86233aad06dfea531837b038394d11e9928" +checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a" dependencies = [ "concurrent-queue", - "event-listener 5.3.0", "event-listener-strategy 0.5.2", "futures-core", "pin-project-lite", @@ -318,7 +305,7 @@ version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" dependencies = [ - "async-channel 2.2.1", + "async-channel 2.3.1", "async-executor", "async-io 2.3.2", "async-lock 3.3.0", @@ -461,7 +448,7 @@ dependencies = [ "futures-util", "hickory-resolver", "pin-utils", - "socket2 0.5.6", + "socket2 0.5.7", ] [[package]] @@ -483,7 +470,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -500,7 +487,7 @@ checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -581,9 +568,9 @@ dependencies = [ [[package]] name = "autocfg" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1fdabc7756949593fe60f30ec81974b613357de856987752631dea1e3394c80" +checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "axum" @@ -731,22 +718,16 @@ checksum = "e0b121a9fe0df916e362fb3271088d071159cdf11db0e4182d02152850756eff" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] -[[package]] -name = "block" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d8c1fef690941d3e7788d328517591fecc684c084084702d6ff1641e993699a" - [[package]] name = "block-buffer" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ - "generic-array 0.14.7", + "generic-array", ] [[package]] @@ -755,7 +736,7 @@ version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ - "generic-array 0.14.7", + "generic-array", ] [[package]] @@ -774,13 +755,22 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" +[[package]] +name = "block2" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43ff7d91d3c1d568065b06c899777d1e48dcf76103a672a0adbc238a7f247f1e" +dependencies = [ + "objc2", +] + [[package]] name = "blocking" version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "495f7104e962b7356f0aeb34247aca1fe7d2e783b346582db7f2904cb5717e88" dependencies = [ - "async-channel 2.2.1", + "async-channel 2.3.1", "async-lock 3.3.0", "async-task", "futures-io", @@ -805,9 +795,9 @@ checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "bytemuck" -version = "1.15.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d6d68c57235a3a081186990eca2867354726650f42f7516ca50c28d6281fd15" +checksum = "78834c15cb5d5efe3452d58b1e8ba890dd62d21907f867f383358198e56ebca5" [[package]] name = "byteorder" @@ -841,9 +831,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.95" +version = "1.0.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d32a725bc159af97c3e629873bb9f88fb8cf8a4867175f76dc987815ea07c83b" +checksum = "099a5357d84c4c61eb35fc8eafa9a79a902c2f76911e5747ced4e032edd8d9b4" [[package]] name = "cesu8" @@ -922,7 +912,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ee52072ec15386f770805afd189a01c8841be8696bed250fa2f13c4c0d6dfb7" dependencies = [ - "generic-array 0.14.7", + "generic-array", ] [[package]] @@ -994,7 +984,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -1034,17 +1024,11 @@ dependencies = [ "owo-colors", ] -[[package]] -name = "color_quant" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" - [[package]] name = "colorchoice" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" +checksum = "0b6a852b24ab71dffc585bcb46eaf7959d175cb865a7152e35b348d1b2960422" [[package]] name = "combine" @@ -1100,7 +1084,7 @@ dependencies = [ "rust-ini 0.19.0", "serde", "serde_json", - "toml 0.8.12", + "toml 0.8.13", "yaml-rust", ] @@ -1124,7 +1108,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd326812b3fd01da5bb1af7d340d0d555fd3d4b641e7f1dfcf5962a902952787" dependencies = [ "futures-core", - "prost 0.12.4", + "prost 0.12.6", "prost-types", "tonic 0.10.2", "tracing-core", @@ -1335,7 +1319,7 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ - "generic-array 0.14.7", + "generic-array", "rand_core", "typenum", ] @@ -1346,7 +1330,7 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" dependencies = [ - "generic-array 0.14.7", + "generic-array", "subtle", ] @@ -1357,7 +1341,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "edb49164822f3ee45b17acd4a208cfc1251410cf0cad9a833234c9890774dd9f" dependencies = [ "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -1432,7 +1416,7 @@ dependencies = [ "serde_yaml", "time", "tokio", - "toml 0.8.12", + "toml 0.8.13", "unicode-segmentation", "unicode-width", "xi-unicode", @@ -1471,7 +1455,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -1495,12 +1479,12 @@ dependencies = [ [[package]] name = "darling" -version = "0.20.8" +version = "0.20.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54e36fcd13ed84ffdfda6f5be89b31287cbb80c439841fe69e04841435464391" +checksum = "83b2eb4d90d12bdda5ed17de686c2acb4c57914f8f921b8da7e112b5a36f3fe1" dependencies = [ - "darling_core 0.20.8", - "darling_macro 0.20.8", + "darling_core 0.20.9", + "darling_macro 0.20.9", ] [[package]] @@ -1519,15 +1503,15 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.20.8" +version = "0.20.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c2cf1c23a687a1feeb728783b993c4e1ad83d99f351801977dd809b48d0a70f" +checksum = "622687fe0bac72a04e5599029151f5796111b90f1baaa9b544d807a5e31cd120" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -1543,13 +1527,13 @@ dependencies = [ [[package]] name = "darling_macro" -version = "0.20.8" +version = "0.20.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f" +checksum = "733cabb43482b1a1b53eee8583c2b9e8684d592215ea83efd305dd31bc2f0178" dependencies = [ - "darling_core 0.20.8", + "darling_core 0.20.9", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -1607,7 +1591,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" dependencies = [ - "generic-array 0.14.7", + "generic-array", ] [[package]] @@ -1691,9 +1675,9 @@ dependencies = [ [[package]] name = "either" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a47c1c47d2f5964e29c61246e81db715514cd532db6b5116a25ea3c03d6780a2" +checksum = "3dca9240753cf90908d7e4aac30f630662b02aebaa1b58a3cadabdb23385b58b" [[package]] name = "embedded-io" @@ -1716,7 +1700,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -1736,7 +1720,7 @@ checksum = "f282cfdfe92516eb26c2af8589c274c7c17681f5ecc03c18255fe741c6aa64eb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -1776,10 +1760,10 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e08b6c6ab82d70f08844964ba10c7babb716de2ecaeab9be5717918a5177d3af" dependencies = [ - "darling 0.20.8", + "darling 0.20.9", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -1813,9 +1797,9 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" +checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" dependencies = [ "libc", "windows-sys 0.52.0", @@ -1946,9 +1930,9 @@ dependencies = [ [[package]] name = "fiat-crypto" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38793c55593b33412e3ae40c2c9781ffaa6f438f6f8c10f24e71846fbd7ae01e" +checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" [[package]] name = "filetime" @@ -1964,9 +1948,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.29" +version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4556222738635b7a3417ae6130d8f52201e45a0c4d1a907f0826383adb5f85e7" +checksum = "5f54427cfd1c7829e2a139fcefea601bf088ebca651d2bf53ebc600eac295dae" dependencies = [ "crc32fast", "miniz_oxide", @@ -2032,7 +2016,7 @@ checksum = "1a5c6c585bc94aaf2c7b51dd4c2ba22680844aba4c687be581871a6f518c5742" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -2144,7 +2128,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -2193,24 +2177,6 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "304de19db7028420975a296ab0fcbbc8e69438c4ed254a1e41e2a7f37d5f0e0a" -[[package]] -name = "generic-array" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffdf9f34f1447443d37393cc6c2b8313aebddcd96906caf34e54c68d8e57d7bd" -dependencies = [ - "typenum", -] - -[[package]] -name = "generic-array" -version = "0.13.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f797e67af32588215eaaab8327027ee8e71b9dd0b2b26996aedf20c030fce309" -dependencies = [ - "typenum", -] - [[package]] name = "generic-array" version = "0.14.7" @@ -2352,9 +2318,9 @@ checksum = "1b43ede17f21864e81be2fa654110bf1e793774238d86ef8555c37e6519c0403" [[package]] name = "hash32" -version = "0.1.1" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4041af86e63ac4298ce40e5cca669066e75b6f1aa3390fe2561ffa5e1d9f4cc" +checksum = "47d60b12902ba28e2730cd37e95b8c9223af2808df9e902d4df49588d1470606" dependencies = [ "byteorder", ] @@ -2411,12 +2377,10 @@ dependencies = [ [[package]] name = "heapless" -version = "0.6.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "634bd4d29cbf24424d0a4bfcbf80c6960129dc24424752a7d1d1390607023422" +checksum = "0bfb9eb618601c89945a70e254898da93b13be0388091d42117462b265bb3fad" dependencies = [ - "as-slice", - "generic-array 0.14.7", "hash32", "stable_deref_trait", ] @@ -2607,7 +2571,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.5.6", + "socket2 0.5.7", "tokio", "tower-service", "tracing", @@ -2687,13 +2651,12 @@ dependencies = [ [[package]] name = "image" -version = "0.24.9" +version = "0.25.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5690139d2f55868e080017335e4b94cb7414274c74f1669c84fb5feba2c9f69d" +checksum = "fd54d660e773627692c524beaad361aca785a4f9f5730ce91f42aabe5bce3d11" dependencies = [ "bytemuck", "byteorder", - "color_quant", "num-traits", "png", "tiff", @@ -2737,14 +2700,14 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" dependencies = [ - "generic-array 0.14.7", + "generic-array", ] [[package]] name = "instant" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" dependencies = [ "cfg-if 1.0.0", ] @@ -2766,7 +2729,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.6", + "socket2 0.5.7", "widestring", "windows-sys 0.48.0", "winreg", @@ -2778,6 +2741,12 @@ version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" +[[package]] +name = "is_terminal_polyfill" +version = "1.70.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8478577c03552c21db0e2724ffb8986a5ce7af88107e6be5d2ee6e158c12800" + [[package]] name = "itertools" version = "0.10.5" @@ -3026,9 +2995,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" -version = "0.4.13" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" +checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" [[package]] name = "lock_api" @@ -3073,15 +3042,6 @@ version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75761162ae2b0e580d7e7c390558127e5f01b4194debd6221fd8c207fc80e3f5" -[[package]] -name = "malloc_buf" -version = "0.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62bb907fe88d54d8d9ce32a3cceab4218ed2f6b7d35617cafe9adf84e43919cb" -dependencies = [ - "libc", -] - [[package]] name = "match_cfg" version = "0.1.0" @@ -3138,9 +3098,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7" +checksum = "87dfd01fe195c66b572b37921ad8803d010623c0aca821bea2302239d155cdae" dependencies = [ "adler", "simd-adler32", @@ -3389,9 +3349,9 @@ dependencies = [ [[package]] name = "num" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3135b08af27d103b0a51f2ae0f8632117b7b185ccf931445affa8df530576a41" +checksum = "35bd024e8b2ff75562e5f34e7f4905839deb4b22955ef5e73d2fea1b9813cb23" dependencies = [ "num-bigint", "num-complex", @@ -3403,20 +3363,19 @@ dependencies = [ [[package]] name = "num-bigint" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0" +checksum = "c165a9ab64cf766f73521c0dd2cfdff64f488b8f0b3e621face3462d3db536d7" dependencies = [ - "autocfg", "num-integer", "num-traits", ] [[package]] name = "num-complex" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23c6602fda94a57c990fe0df199a035d83576b496aa29f4e634a8ac6004e68a6" +checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" dependencies = [ "num-traits", ] @@ -3438,9 +3397,9 @@ dependencies = [ [[package]] name = "num-iter" -version = "0.1.44" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d869c01cc0c455284163fd0092f1f93835385ccab5a98a0dcc497b2f8bf055a9" +checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" dependencies = [ "autocfg", "num-integer", @@ -3449,11 +3408,10 @@ dependencies = [ [[package]] name = "num-rational" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0638a1c9d0a3c0914158145bc76cff373a75a627e6ecbfb71cbe6f453a5a19b0" +checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" dependencies = [ - "autocfg", "num-bigint", "num-integer", "num-traits", @@ -3461,9 +3419,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", ] @@ -3509,32 +3467,58 @@ dependencies = [ ] [[package]] -name = "objc" -version = "0.2.7" +name = "objc-sys" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "915b1b472bc21c53464d6c8461c9d3af805ba1ef837e1cac254428f4a77177b1" +checksum = "da284c198fb9b7b0603f8635185e85fbd5b64ee154b1ed406d489077de2d6d60" + +[[package]] +name = "objc2" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4b25e1034d0e636cd84707ccdaa9f81243d399196b8a773946dcffec0401659" dependencies = [ - "malloc_buf", + "objc-sys", + "objc2-encode", ] [[package]] -name = "objc-foundation" -version = "0.1.1" +name = "objc2-app-kit" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1add1b659e36c9607c7aab864a76c7a4c2760cd0cd2e120f3fb8b952c7e22bf9" +checksum = "fb79768a710a9a1798848179edb186d1af7e8a8679f369e4b8d201dd2a034047" dependencies = [ - "block", - "objc", - "objc_id", + "block2", + "objc2", + "objc2-core-data", + "objc2-foundation", ] [[package]] -name = "objc_id" -version = "0.1.1" +name = "objc2-core-data" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c92d4ddb4bd7b50d730c215ff871754d0da6b2178849f8a2a2ab69712d0c073b" +checksum = "6e092bc42eaf30a08844e6a076938c60751225ec81431ab89f5d1ccd9f958d6c" dependencies = [ - "objc", + "block2", + "objc2", + "objc2-foundation", +] + +[[package]] +name = "objc2-encode" +version = "4.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88658da63e4cc2c8adb1262902cd6af51094df0488b760d6fd27194269c0950a" + +[[package]] +name = "objc2-foundation" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfaefe14254871ea16c7d88968c0ff14ba554712a20d76421eec52f0a7fb8904" +dependencies = [ + "block2", + "objc2", ] [[package]] @@ -3801,9 +3785,9 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.14" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" [[package]] name = "pathdiff" @@ -3825,9 +3809,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.9" +version = "2.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "311fb059dee1a7b802f036316d790138c613a4e8b180c822e3925a662e9f0c95" +checksum = "560131c633294438da9f7c4b08189194b20946c8274c6b9e38881a7874dc8ee8" dependencies = [ "memchr", "thiserror", @@ -3836,9 +3820,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.7.9" +version = "2.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f73541b156d32197eecda1a4014d7f868fd2bcb3c550d5386087cfba442bf69c" +checksum = "26293c9193fbca7b1a3bf9b79dc1e388e927e6cacaa78b4a3ab705a1d3d41459" dependencies = [ "pest", "pest_generator", @@ -3846,22 +3830,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.9" +version = "2.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c35eeed0a3fab112f75165fdc026b3913f4183133f19b49be773ac9ea966e8bd" +checksum = "3ec22af7d3fb470a85dd2ca96b7c577a1eb4ef6f1683a9fe9a8c16e136c04687" dependencies = [ "pest", "pest_meta", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] name = "pest_meta" -version = "2.7.9" +version = "2.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2adbf29bb9776f28caece835398781ab24435585fe0d4dc1374a61db5accedca" +checksum = "d7a240022f37c361ec1878d646fc5b7d7c4d28d5946e1a80ad5a7a4f4ca0bdcd" dependencies = [ "once_cell", "pest", @@ -3895,7 +3879,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -3912,9 +3896,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "piper" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "668d31b1c4eba19242f2088b2bf3316b82ca31082a8335764db4e083db7485d4" +checksum = "464db0c665917b13ebb5d453ccdec4add5658ee1adc7affc7677615356a8afaf" dependencies = [ "atomic-waker", "fastrand 2.1.0", @@ -4046,9 +4030,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.81" +version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d1597b0c024618f09a9c3b8655b7e430397a36d23fdafec26d6965e9eec3eba" +checksum = "8ad3d49ab951a01fbaafe34f2ec74122942fe18a3f9814c3268f1bb72042131b" dependencies = [ "unicode-ident", ] @@ -4065,12 +4049,12 @@ dependencies = [ [[package]] name = "prost" -version = "0.12.4" +version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0f5d036824e4761737860779c906171497f6d55681139d8312388f8fe398922" +checksum = "deb1435c188b76130da55f17a466d252ff7b1418b2ad3e037d127b94e3411f29" dependencies = [ "bytes", - "prost-derive 0.12.4", + "prost-derive 0.12.6", ] [[package]] @@ -4088,24 +4072,24 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.12.4" +version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19de2de2a00075bf566bee3bd4db014b11587e84184d3f7a791bc17f1a8e9e48" +checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" dependencies = [ "anyhow", "itertools 0.12.1", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] name = "prost-types" -version = "0.12.4" +version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3235c33eb02c1f1e212abdbe34c78b264b038fb58ca612664343271e36e55ffe" +checksum = "9091c90b0a32608e984ff2fa4091273cbdd755d54935c51d520887f4a1dbd5b0" dependencies = [ - "prost 0.12.4", + "prost 0.12.6", ] [[package]] @@ -4403,9 +4387,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" [[package]] name = "rustc-hash" @@ -4445,7 +4429,7 @@ dependencies = [ "bitflags 2.5.0", "errno", "libc", - "linux-raw-sys 0.4.13", + "linux-raw-sys 0.4.14", "windows-sys 0.52.0", ] @@ -4482,9 +4466,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.15" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80af6f9131f277a45a3fba6ce8e2258037bb0477a67e610d3c1fe046ab31de47" +checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" [[package]] name = "rustyline-async" @@ -4504,9 +4488,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1" +checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" [[package]] name = "same-file" @@ -4519,9 +4503,9 @@ dependencies = [ [[package]] name = "schemars" -version = "0.8.17" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f55c82c700538496bdc329bb4918a81f87cc8888811bd123cf325a0f2f8d309" +checksum = "fc6e7ed6919cb46507fb01ff1654309219f62b4d603822501b0b80d42f6f21ef" dependencies = [ "dyn-clone", "schemars_derive", @@ -4531,14 +4515,14 @@ dependencies = [ [[package]] name = "schemars_derive" -version = "0.8.17" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83263746fe5e32097f06356968a077f96089739c927a61450efa069905eec108" +checksum = "185f2b7aa7e02d418e453790dde16890256bbd2bcd04b7dc5348811052b53f49" dependencies = [ "proc-macro2", "quote", - "serde_derive_internals 0.29.0", - "syn 2.0.60", + "serde_derive_internals 0.29.1", + "syn 2.0.64", ] [[package]] @@ -4585,11 +4569,11 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.10.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "770452e37cad93e0a50d5abc3990d2bc351c36d0328f86cefec2f2fb206eaef6" +checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.5.0", "core-foundation", "core-foundation-sys", "libc", @@ -4598,9 +4582,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.10.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41f3cc463c0ef97e11c3461a9d3787412d30e8e7eb907c79180c4a57bf7c04ef" +checksum = "317936bbbd05227752583946b9e66d7ce3b489f84e11a94a510b4437fef407d7" dependencies = [ "core-foundation-sys", "libc", @@ -4608,9 +4592,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca" +checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" [[package]] name = "send_wrapper" @@ -4629,9 +4613,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.199" +version = "1.0.202" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c9f6e76df036c77cd94996771fb40db98187f096dd0b9af39c6c6e452ba966a" +checksum = "226b61a0d411b2ba5ff6d7f73a476ac4f8bb900373459cd00fab8512828ba395" dependencies = [ "serde_derive", ] @@ -4688,13 +4672,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.199" +version = "1.0.202" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11bd257a6541e141e42ca6d24ae26f7714887b47e89aa739099104c7e4d3b7fc" +checksum = "6048858004bcff69094cd972ed40a32500f153bd3be9f716b2eed2e8217c4838" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -4705,25 +4689,25 @@ checksum = "e578a843d40b4189a4d66bba51d7684f57da5bd7c304c64e14bd63efbef49509" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] name = "serde_derive_internals" -version = "0.29.0" +version = "0.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "330f01ce65a3a5fe59a60c82f3c9a024b573b8a6e875bd233fe5f934e71d54e3" +checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] name = "serde_json" -version = "1.0.116" +version = "1.0.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e17db7126d17feb94eb3fad46bf1a96b034e8aacbc2e775fe81505f8b0b2813" +checksum = "455182ea6142b14f93f4bc5320a2b31c1f266b66a4a5c858b013302a5d8cbfc3" dependencies = [ "itoa", "ryu", @@ -4738,14 +4722,14 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] name = "serde_spanned" -version = "0.6.5" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1" +checksum = "79e674e01f999af37c49f70a6ede167a8a60b2503e56c5599532a65baa5969a0" dependencies = [ "serde", ] @@ -4785,7 +4769,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -4949,9 +4933,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.6" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" +checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" dependencies = [ "libc", "windows-sys 0.52.0", @@ -5037,9 +5021,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.60" +version = "2.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "909518bc7b1c9b779f1bbf07f2929d35af9f0f37e47c6e9ef7f9dddc1e1821f3" +checksum = "7ad3dee41f36859875573074334c200d1add8e4a87bb37113ebd31d926b7b11f" dependencies = [ "proc-macro2", "quote", @@ -5054,9 +5038,9 @@ checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" [[package]] name = "sysinfo" -version = "0.30.11" +version = "0.30.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87341a165d73787554941cd5ef55ad728011566fe714e987d1b976c15dbc3a83" +checksum = "732ffa00f53e6b2af46208fba5718d9662a421049204e156328b66791ffa15ae" dependencies = [ "cfg-if 1.0.0", "core-foundation-sys", @@ -5107,22 +5091,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.59" +version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0126ad08bff79f29fc3ae6a55cc72352056dfff61e3ff8bb7129476d44b23aa" +checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.59" +version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1cd413b5d558b4c5bf3680e324a6fa5014e7b7c067a51e69dbdf47eb7148b66" +checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -5217,7 +5201,7 @@ dependencies = [ "parking_lot 0.12.2", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.6", + "socket2 0.5.7", "tokio-macros", "tracing", "windows-sys 0.48.0", @@ -5241,7 +5225,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -5257,9 +5241,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.10" +version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" +checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" dependencies = [ "bytes", "futures-core", @@ -5267,7 +5251,6 @@ dependencies = [ "futures-sink", "pin-project-lite", "tokio", - "tracing", ] [[package]] @@ -5281,21 +5264,21 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.12" +version = "0.8.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9dd1545e8208b4a5af1aa9bbd0b4cf7e9ea08fabc5d0a5c67fcaafa17433aa3" +checksum = "a4e43f8cc456c9704c851ae29c67e17ef65d2c30017c17a9765b89c382dc8bba" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.12", + "toml_edit 0.22.13", ] [[package]] name = "toml_datetime" -version = "0.6.5" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" +checksum = "4badfd56924ae69bcc9039335b2e017639ce3f9b001c393c1b2d1ef846ce2cbf" dependencies = [ "serde", ] @@ -5313,15 +5296,15 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.12" +version = "0.22.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3328d4f68a705b2a4498da1d580585d39a6510f98318a2cec3018a7ec61ddef" +checksum = "c127785850e8c20836d49732ae6abfa47616e60bf9d9f57c43c250361a9db96c" dependencies = [ "indexmap 2.2.6", "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.7", + "winnow 0.6.8", ] [[package]] @@ -5370,7 +5353,7 @@ dependencies = [ "hyper-timeout", "percent-encoding", "pin-project", - "prost 0.12.4", + "prost 0.12.6", "tokio", "tokio-stream", "tower", @@ -5443,7 +5426,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -5599,7 +5582,7 @@ dependencies = [ "proc-macro2", "quote", "serde_derive_internals 0.28.0", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -5731,9 +5714,9 @@ checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" [[package]] name = "value-bag" -version = "1.8.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74797339c3b98616c009c7c3eb53a0ce41e85c8ec66bd3db96ed132d20cfdee8" +checksum = "5a84c137d37ab0142f0f2ddfe332651fdbf252e7b7dbb4e67b6c1f1b2e925101" [[package]] name = "vcpkg" @@ -5891,7 +5874,7 @@ dependencies = [ "sha2 0.10.8", "shell-words", "simplelog", - "socket2 0.5.6", + "socket2 0.5.7", "static_assertions", "stop-token", "sysinfo", @@ -6128,9 +6111,9 @@ checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "waker-fn" -version = "1.1.1" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3c4517f54858c779bbcbf228f4fca63d121bf85fbecb2dc578cdf4a39395690" +checksum = "317211a0dc0ceedd78fb2ca9a44aed3d7b9b26f81870d485c07122b4350673b7" [[package]] name = "walkdir" @@ -6180,7 +6163,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", "wasm-bindgen-shared", ] @@ -6214,7 +6197,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -6247,7 +6230,7 @@ checksum = "b7f89739351a2e03cb94beb799d47fb2cac01759b40ec441f7de39b00cbf7ef0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -6657,9 +6640,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.7" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14b9415ee827af173ebb3f15f9083df5a122eb93572ec28741fb153356ea2578" +checksum = "c3c52e9c97a68071b23e836c9380edae937f17b9c4667bd021973efc689f618d" dependencies = [ "memchr", ] @@ -6695,9 +6678,9 @@ dependencies = [ [[package]] name = "x11rb" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8f25ead8c7e4cba123243a6367da5d3990e0d3affa708ea19dce96356bd9f1a" +checksum = "5d91ffca73ee7f68ce055750bf9f6eca0780b8c85eff9bc046a3b0da41755e12" dependencies = [ "gethostname", "rustix 0.38.34", @@ -6706,9 +6689,9 @@ dependencies = [ [[package]] name = "x11rb-protocol" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e63e71c4b8bd9ffec2c963173a4dc4cbde9ee96961d4fcb4429db9929b606c34" +checksum = "ec107c4503ea0b4a98ef47356329af139c0a4f7750e621cf2973cd3385ebcb3d" [[package]] name = "x25519-dalek" @@ -6789,22 +6772,22 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.7.32" +version = "0.7.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" +checksum = "ae87e3fcd617500e5d106f0380cf7b77f3c6092aae37191433159dda23cfb087" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.32" +version = "0.7.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" +checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] @@ -6824,7 +6807,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.64", ] [[package]] diff --git a/veilid-core/src/storage_manager/set_value.rs b/veilid-core/src/storage_manager/set_value.rs index 5980e780..a063436e 100644 --- a/veilid-core/src/storage_manager/set_value.rs +++ b/veilid-core/src/storage_manager/set_value.rs @@ -89,58 +89,63 @@ impl StorageManager { ); // If the node was close enough to possibly set the value - if sva.answer.set { - let mut ctx = context.lock(); - - // Keep the value if we got one and it is newer and it passes schema validation - if let Some(value) = sva.answer.value { - log_dht!(debug "Got value back: len={} seq={}", value.value_data().data().len(), value.value_data().seq()); - - // Validate with schema - if !ctx.schema.check_subkey_value_data( - descriptor.owner(), - subkey, - value.value_data(), - ) { - // Validation failed, ignore this value and pretend we never saw this node - return Ok(NetworkResult::invalid_message("Schema validation failed")); - } - - // If we got a value back it should be different than the one we are setting - if ctx.value.value_data() == value.value_data() { - // Move to the next node - return Ok(NetworkResult::invalid_message("same value returned")); - } - - // We have a prior value, ensure this is a newer sequence number - let prior_seq = ctx.value.value_data().seq(); - let new_seq = value.value_data().seq(); - if new_seq >= prior_seq { - // If the sequence number is greater or equal, keep it - ctx.value = Arc::new(value); - // One node has shown us this value so far - ctx.value_nodes = vec![next_node]; - ctx.missed_since_last_set = 0; - } else { - // If the sequence number is older, or an equal sequence number, - // node should have not returned a value here. - // Skip this node and its closer list because it is misbehaving - return Ok(NetworkResult::invalid_message("Sequence number is older")); - } - } else { - // It was set on this node and no newer value was found and returned, - // so increase our consensus count - ctx.value_nodes.push(next_node); - ctx.missed_since_last_set = 0; - } - } else { - let mut ctx = context.lock(); + let mut ctx = context.lock(); + if !sva.answer.set { ctx.missed_since_last_set += 1; + + // Return peers if we have some + log_network_result!(debug "SetValue missed: {}, fanout call returned peers {}", ctx.missed_since_last_set, sva.answer.peers.len()); + return Ok(NetworkResult::value(sva.answer.peers)); } - // Return peers if we have some - log_network_result!(debug "SetValue fanout call returned peers {}", sva.answer.peers.len()); + // See if we got a value back + let Some(value) = sva.answer.value else { + // No newer value was found and returned, so increase our consensus count + ctx.value_nodes.push(next_node); + ctx.missed_since_last_set = 0; + // Return peers if we have some + log_network_result!(debug "SetValue returned no value, fanout call returned peers {}", sva.answer.peers.len()); + return Ok(NetworkResult::value(sva.answer.peers)); + }; + + // Keep the value if we got one and it is newer and it passes schema validation + log_dht!(debug "SetValue got value back: len={} seq={}", value.value_data().data().len(), value.value_data().seq()); + + // Validate with schema + if !ctx.schema.check_subkey_value_data( + descriptor.owner(), + subkey, + value.value_data(), + ) { + // Validation failed, ignore this value and pretend we never saw this node + return Ok(NetworkResult::invalid_message("Schema validation failed")); + } + + // If we got a value back it should be different than the one we are setting + // But in the case of a benign bug, we can just move to the next node + if ctx.value.value_data() == value.value_data() { + ctx.value_nodes.push(next_node); + ctx.missed_since_last_set = 0; + return Ok(NetworkResult::value(sva.answer.peers)); + } + + // We have a prior value, ensure this is a newer sequence number + let prior_seq = ctx.value.value_data().seq(); + let new_seq = value.value_data().seq(); + if new_seq < prior_seq { + // If the sequence number is older node should have not returned a value here. + // Skip this node and its closer list because it is misbehaving + // Ignore this value and pretend we never saw this node + return Ok(NetworkResult::invalid_message("Sequence number is older")); + } + + // If the sequence number is greater or equal, keep it + // even if the sequence number is the same, accept all conflicts in an attempt to resolve them + ctx.value = Arc::new(value); + // One node has shown us this value so far + ctx.value_nodes = vec![next_node]; + ctx.missed_since_last_set = 0; Ok(NetworkResult::value(sva.answer.peers)) } }; @@ -232,8 +237,17 @@ impl StorageManager { // Make sure this value would actually be newer if let Some(last_value) = &last_get_result.opt_value { - if value.value_data().seq() <= last_value.value_data().seq() { - // inbound value is older than or equal to the sequence number that we have, just return the one we have + if value.value_data().seq() < last_value.value_data().seq() { + // inbound value is older than the sequence number that we have, just return the one we have + return Ok(NetworkResult::value(Some(last_value.clone()))); + } else if value.value_data().seq() == last_value.value_data().seq() { + // inbound value is equal to the sequence number that we have + // if the value is the same including the writer, return nothing, + // otherwise return the existing value because it was here first + if value.value_data() == last_value.value_data() { + return Ok(NetworkResult::value(None)); + } + // sequence number is the same but there's a value conflict, return what we have return Ok(NetworkResult::value(Some(last_value.clone()))); } } From 2bb43cebafb49f74c60654f8c7dec73ab1506692 Mon Sep 17 00:00:00 2001 From: Christien Rioux Date: Sat, 18 May 2024 00:21:03 -0400 Subject: [PATCH 03/14] fix node dead check, fix punish clear, fix debug command crashes --- .../src/network_manager/address_filter.rs | 1 + veilid-core/src/network_manager/mod.rs | 8 ++++++++ veilid-core/src/routing_table/bucket_entry.rs | 14 +++++++++++--- veilid-core/src/routing_table/mod.rs | 10 ++++++++++ .../src/routing_table/routing_table_inner.rs | 2 +- veilid-core/src/veilid_api/debug.rs | 19 +++++++++++++++---- .../src/veilid_api/types/veilid_state.rs | 15 +++++++++++++++ 7 files changed, 61 insertions(+), 8 deletions(-) diff --git a/veilid-core/src/network_manager/address_filter.rs b/veilid-core/src/network_manager/address_filter.rs index 2eb104a6..b39dde90 100644 --- a/veilid-core/src/network_manager/address_filter.rs +++ b/veilid-core/src/network_manager/address_filter.rs @@ -272,6 +272,7 @@ impl AddressFilter { let mut inner = self.inner.lock(); inner.punishments_by_ip4.clear(); inner.punishments_by_ip6_prefix.clear(); + self.unlocked_inner.routing_table.clear_punishments(); inner.punishments_by_node_id.clear(); } diff --git a/veilid-core/src/network_manager/mod.rs b/veilid-core/src/network_manager/mod.rs index 3ee0996a..04d95278 100644 --- a/veilid-core/src/network_manager/mod.rs +++ b/veilid-core/src/network_manager/mod.rs @@ -323,6 +323,14 @@ impl NetworkManager { .rpc_processor .clone() } + pub fn opt_rpc_processor(&self) -> Option { + self.unlocked_inner + .components + .read() + .as_ref() + .map(|x| x.rpc_processor.clone()) + } + pub fn connection_manager(&self) -> ConnectionManager { self.unlocked_inner .components diff --git a/veilid-core/src/routing_table/bucket_entry.rs b/veilid-core/src/routing_table/bucket_entry.rs index b85d858e..c5dca3a4 100644 --- a/veilid-core/src/routing_table/bucket_entry.rs +++ b/veilid-core/src/routing_table/bucket_entry.rs @@ -275,9 +275,9 @@ impl BucketEntryInner { && signed_node_info.timestamp() == current_sni.timestamp() { // No need to update the signednodeinfo though since the timestamp is the same - // Touch the node and let it try to live again + // Let the node try to live again but don't mark it as seen yet self.updated_since_last_network_change = true; - self.touch_last_seen(get_aligned_timestamp()); + self.make_not_dead(get_aligned_timestamp()); } return; } @@ -293,10 +293,11 @@ impl BucketEntryInner { let envelope_support = signed_node_info.node_info().envelope_support().to_vec(); // Update the signed node info + // Let the node try to live again but don't mark it as seen yet *opt_current_sni = Some(Box::new(signed_node_info)); self.set_envelope_support(envelope_support); self.updated_since_last_network_change = true; - self.touch_last_seen(get_aligned_timestamp()); + self.make_not_dead(get_aligned_timestamp()); // If we're updating an entry's node info, purge all // but the last connection in our last connections list @@ -760,6 +761,13 @@ impl BucketEntryInner { self.peer_stats.rpc_stats.last_seen_ts = Some(ts); } + pub(super) fn make_not_dead(&mut self, cur_ts: Timestamp) { + self.peer_stats.rpc_stats.last_seen_ts = None; + self.peer_stats.rpc_stats.failed_to_send = 0; + self.peer_stats.rpc_stats.recent_lost_answers = 0; + assert!(!self.check_dead(cur_ts)); + } + pub(super) fn _state_debug_info(&self, cur_ts: Timestamp) -> String { let first_consecutive_seen_ts = if let Some(first_consecutive_seen_ts) = self.peer_stats.rpc_stats.first_consecutive_seen_ts diff --git a/veilid-core/src/routing_table/mod.rs b/veilid-core/src/routing_table/mod.rs index 301fc3df..faf54f4a 100644 --- a/veilid-core/src/routing_table/mod.rs +++ b/veilid-core/src/routing_table/mod.rs @@ -743,6 +743,16 @@ impl RoutingTable { out } + pub fn clear_punishments(&self) { + let cur_ts = get_aligned_timestamp(); + self.inner + .write() + .with_entries_mut(cur_ts, BucketEntryState::Dead, |rti, e| { + e.with_mut(rti, |_rti, ei| ei.set_punished(false)); + Option::<()>::None + }); + } + ////////////////////////////////////////////////////////////////////// // Find Nodes diff --git a/veilid-core/src/routing_table/routing_table_inner.rs b/veilid-core/src/routing_table/routing_table_inner.rs index 25ea2659..b8f8c829 100644 --- a/veilid-core/src/routing_table/routing_table_inner.rs +++ b/veilid-core/src/routing_table/routing_table_inner.rs @@ -860,7 +860,7 @@ impl RoutingTableInner { timestamp: Timestamp, ) -> EyreResult { let nr = self.create_node_ref(outer_self, &TypedKeyGroup::from(node_id), |_rti, e| { - // this node is live because it literally just connected to us + //e.make_not_dead(timestamp); e.touch_last_seen(timestamp); })?; // set the most recent node address for connection finding and udp replies diff --git a/veilid-core/src/veilid_api/debug.rs b/veilid-core/src/veilid_api/debug.rs index 1909f615..8c7674c5 100644 --- a/veilid-core/src/veilid_api/debug.rs +++ b/veilid-core/src/veilid_api/debug.rs @@ -940,6 +940,9 @@ impl VeilidAPI { async fn debug_resolve(&self, args: String) -> VeilidAPIResult { let netman = self.network_manager()?; let routing_table = netman.routing_table(); + let Some(_rpc) = netman.opt_rpc_processor() else { + apibail_internal!("Must be attached first"); + }; let args: Vec = args.split_whitespace().map(|s| s.to_owned()).collect(); @@ -981,7 +984,9 @@ impl VeilidAPI { async fn debug_ping(&self, args: String) -> VeilidAPIResult { let netman = self.network_manager()?; let routing_table = netman.routing_table(); - let rpc = netman.rpc_processor(); + let Some(rpc) = netman.opt_rpc_processor() else { + apibail_internal!("Must be attached first"); + }; let args: Vec = args.split_whitespace().map(|s| s.to_owned()).collect(); @@ -1012,7 +1017,9 @@ impl VeilidAPI { async fn debug_app_message(&self, args: String) -> VeilidAPIResult { let netman = self.network_manager()?; let routing_table = netman.routing_table(); - let rpc = netman.rpc_processor(); + let Some(rpc) = netman.opt_rpc_processor() else { + apibail_internal!("Must be attached first"); + }; let (arg, rest) = args.split_once(' ').unwrap_or((&args, "")); let rest = rest.trim_start().to_owned(); @@ -1046,7 +1053,9 @@ impl VeilidAPI { async fn debug_app_call(&self, args: String) -> VeilidAPIResult { let netman = self.network_manager()?; let routing_table = netman.routing_table(); - let rpc = netman.rpc_processor(); + let Some(rpc) = netman.opt_rpc_processor() else { + apibail_internal!("Must be attached first"); + }; let (arg, rest) = args.split_once(' ').unwrap_or((&args, "")); let rest = rest.trim_start().to_owned(); @@ -1083,7 +1092,9 @@ impl VeilidAPI { async fn debug_app_reply(&self, args: String) -> VeilidAPIResult { let netman = self.network_manager()?; - let rpc = netman.rpc_processor(); + let Some(rpc) = netman.opt_rpc_processor() else { + apibail_internal!("Must be attached first"); + }; let (call_id, data) = if let Some(stripped_args) = args.strip_prefix('#') { let (arg, rest) = stripped_args.split_once(' ').unwrap_or((&args, "")); diff --git a/veilid-core/src/veilid_api/types/veilid_state.rs b/veilid-core/src/veilid_api/types/veilid_state.rs index c751d096..6fb5c7bd 100644 --- a/veilid-core/src/veilid_api/types/veilid_state.rs +++ b/veilid-core/src/veilid_api/types/veilid_state.rs @@ -17,6 +17,21 @@ pub enum AttachmentState { OverAttached = 6, Detaching = 7, } +impl AttachmentState { + pub fn is_detached(&self) -> bool { + matches!(self, Self::Detached) + } + pub fn is_attached(&self) -> bool { + matches!( + self, + Self::AttachedWeak + | Self::AttachedGood + | Self::AttachedStrong + | Self::FullyAttached + | Self::OverAttached + ) + } +} impl fmt::Display for AttachmentState { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { From 25d804f11a70ea2458d14f89efd0c94cb848ae0f Mon Sep 17 00:00:00 2001 From: Christien Rioux Date: Sat, 18 May 2024 01:02:50 -0400 Subject: [PATCH 04/14] debug command crash fix --- veilid-core/src/network_manager/mod.rs | 8 ++++++++ veilid-core/src/veilid_api/debug.rs | 11 ++++++++--- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/veilid-core/src/network_manager/mod.rs b/veilid-core/src/network_manager/mod.rs index 04d95278..66d200af 100644 --- a/veilid-core/src/network_manager/mod.rs +++ b/veilid-core/src/network_manager/mod.rs @@ -340,6 +340,14 @@ impl NetworkManager { .connection_manager .clone() } + pub fn opt_connection_manager(&self) -> Option { + self.unlocked_inner + .components + .read() + .as_ref() + .map(|x| x.connection_manager.clone()) + } + pub fn update_callback(&self) -> UpdateCallback { self.unlocked_inner .update_callback diff --git a/veilid-core/src/veilid_api/debug.rs b/veilid-core/src/veilid_api/debug.rs index 8c7674c5..287af751 100644 --- a/veilid-core/src/veilid_api/debug.rs +++ b/veilid-core/src/veilid_api/debug.rs @@ -853,15 +853,20 @@ impl VeilidAPI { Ok("Buckets purged".to_owned()) } else if args[0] == "connections" { // Purge connection table - let connection_manager = self.network_manager()?.connection_manager(); - connection_manager.shutdown().await; + let opt_connection_manager = self.network_manager()?.opt_connection_manager(); + + if let Some(connection_manager) = &opt_connection_manager { + connection_manager.shutdown().await; + } // Eliminate last_connections from routing table entries self.network_manager()? .routing_table() .purge_last_connections(); - connection_manager.startup().await; + if let Some(connection_manager) = &opt_connection_manager { + connection_manager.startup().await; + } Ok("Connections purged".to_owned()) } else if args[0] == "routes" { From 6a57ee50dc971219176c0bc5858b98a656167e86 Mon Sep 17 00:00:00 2001 From: John Smith Date: Sat, 18 May 2024 18:27:05 -0400 Subject: [PATCH 05/14] unify routing domain handling code, fix edge case generating sender peer info --- veilid-core/src/network_manager/mod.rs | 5 +- .../src/routing_table/route_spec_store/mod.rs | 9 ++ veilid-core/src/rpc_processor/destination.rs | 83 ++++++++++++++ veilid-core/src/rpc_processor/mod.rs | 50 ++++----- veilid-core/src/rpc_processor/rpc_status.rs | 101 +++++------------- veilid-core/src/veilid_api/routing_context.rs | 2 +- 6 files changed, 145 insertions(+), 105 deletions(-) diff --git a/veilid-core/src/network_manager/mod.rs b/veilid-core/src/network_manager/mod.rs index 66d200af..fc352b24 100644 --- a/veilid-core/src/network_manager/mod.rs +++ b/veilid-core/src/network_manager/mod.rs @@ -1090,7 +1090,7 @@ impl NetworkManager { }; // Cache the envelope information in the routing table - let source_noderef = match routing_table.register_node_with_existing_connection( + let mut source_noderef = match routing_table.register_node_with_existing_connection( envelope.get_sender_typed_id(), flow, ts, @@ -1104,6 +1104,9 @@ impl NetworkManager { }; source_noderef.add_envelope_version(envelope.get_version()); + // Enforce routing domain + source_noderef.merge_filter(NodeRefFilter::new().with_routing_domain(routing_domain)); + // Pass message to RPC system rpc.enqueue_direct_message(envelope, source_noderef, flow, routing_domain, body)?; diff --git a/veilid-core/src/routing_table/route_spec_store/mod.rs b/veilid-core/src/routing_table/route_spec_store/mod.rs index 248db49f..82270aef 100644 --- a/veilid-core/src/routing_table/route_spec_store/mod.rs +++ b/veilid-core/src/routing_table/route_spec_store/mod.rs @@ -1056,6 +1056,11 @@ impl RouteSpecStore { // Set sequencing requirement first_hop.set_sequencing(sequencing); + // Enforce the routing domain + first_hop.merge_filter( + NodeRefFilter::new().with_routing_domain(RoutingDomain::PublicInternet), + ); + // Return the compiled safety route //info!("compile_safety_route profile (stub): {} us", (get_timestamp() - profile_start_ts)); return Ok(CompiledRoute { @@ -1113,6 +1118,10 @@ impl RouteSpecStore { // Ensure sequencing requirement is set on first hop first_hop.set_sequencing(safety_spec.sequencing); + // Enforce the routing domain + first_hop + .merge_filter(NodeRefFilter::new().with_routing_domain(RoutingDomain::PublicInternet)); + // Get the safety route secret key let secret = safety_rsd.secret_key; diff --git a/veilid-core/src/rpc_processor/destination.rs b/veilid-core/src/rpc_processor/destination.rs index 62fc5e69..c6c18baa 100644 --- a/veilid-core/src/rpc_processor/destination.rs +++ b/veilid-core/src/rpc_processor/destination.rs @@ -28,6 +28,14 @@ pub(crate) enum Destination { }, } +/// Routing configuration for destination +#[derive(Debug, Clone)] +pub struct UnsafeRoutingInfo { + pub opt_node: Option, + pub opt_relay: Option, + pub opt_routing_domain: Option, +} + impl Destination { pub fn node(&self) -> Option { match self { @@ -138,6 +146,81 @@ impl Destination { } } } + + pub fn get_unsafe_routing_info( + &self, + routing_table: RoutingTable, + ) -> Option { + // If there's a safety route in use, the safety route will be responsible for the routing + match self.get_safety_selection() { + SafetySelection::Unsafe(_) => {} + SafetySelection::Safe(_) => { + return None; + } + } + + // Get: + // * The target node (possibly relayed) + // * The routing domain we are sending to if we can determine it + let (opt_node, opt_relay, opt_routing_domain) = match self { + Destination::Direct { + node, + safety_selection: _, + } => { + let opt_routing_domain = node.best_routing_domain(); + if opt_routing_domain.is_none() { + // No routing domain for target, no node info + // Only a stale connection or no connection exists + log_rpc!(debug "No routing domain for node: node={}", node); + }; + (Some(node.clone()), None, opt_routing_domain) + } + Destination::Relay { + relay, + node, + safety_selection: _, + } => { + // Outbound relays are defined as routing to and from PublicInternet only right now + + // Resolve the relay for this target's routing domain and see if it matches this relay + let mut opt_routing_domain = None; + for target_rd in node.routing_domain_set() { + // Check out inbound/outbound relay to match routing domain + if let Some(relay_node) = routing_table.relay_node(target_rd) { + if relay.same_entry(&relay_node) { + // Relay for this destination is one of our routing domain relays (our inbound or outbound) + opt_routing_domain = Some(target_rd); + break; + } + } + // Check remote node's published relay to see if that who is relaying + if let Some(target_relay) = node.relay(target_rd).ok().flatten() { + if relay.same_entry(&target_relay) { + // Relay for this destination is one of its published relays + opt_routing_domain = Some(target_rd); + break; + } + } + } + if opt_routing_domain.is_none() { + // In the case of an unexpected relay, log it and don't pass any sender peer info into an unexpected relay + log_rpc!(debug "Unexpected relay used for node: relay={}, node={}", relay, node); + }; + + (Some(node.clone()), Some(relay.clone()), opt_routing_domain) + } + Destination::PrivateRoute { + private_route: _, + safety_selection: _, + } => (None, None, Some(RoutingDomain::PublicInternet)), + }; + + Some(UnsafeRoutingInfo { + opt_node, + opt_relay, + opt_routing_domain, + }) + } } impl fmt::Display for Destination { diff --git a/veilid-core/src/rpc_processor/mod.rs b/veilid-core/src/rpc_processor/mod.rs index 8049781f..44567650 100644 --- a/veilid-core/src/rpc_processor/mod.rs +++ b/veilid-core/src/rpc_processor/mod.rs @@ -53,11 +53,13 @@ use storage_manager::*; struct RPCMessageHeaderDetailDirect { /// The decoded header of the envelope envelope: Envelope, - /// The noderef of the peer that sent the message (not the original sender). Ensures node doesn't get evicted from routing table until we're done with it + /// The noderef of the peer that sent the message (not the original sender). + /// Ensures node doesn't get evicted from routing table until we're done with it + /// Should be filted to the routing domain of the peer that we received from peer_noderef: NodeRef, /// The flow from the peer sent the message (not the original sender) flow: Flow, - /// The routing domain the message was sent through + /// The routing domain of the peer that we received from routing_domain: RoutingDomain, } @@ -869,51 +871,36 @@ impl RPCProcessor { // Don't do this if the sender is to remain private // Otherwise we would be attaching the original sender's identity to the final destination, // thus defeating the purpose of the safety route entirely :P - match dest.get_safety_selection() { - SafetySelection::Unsafe(_) => {} - SafetySelection::Safe(_) => { - return SenderPeerInfo::default(); - } - } - - // Get the target we're sending to - let routing_table = self.routing_table(); - let target = match dest { - Destination::Direct { - node: target, - safety_selection: _, - } => target.clone(), - Destination::Relay { - relay: _, - node: target, - safety_selection: _, - } => target.clone(), - Destination::PrivateRoute { - private_route: _, - safety_selection: _, - } => { - return SenderPeerInfo::default(); - } + let Some(UnsafeRoutingInfo { + opt_node, opt_relay: _, opt_routing_domain + }) = dest.get_unsafe_routing_info(self.routing_table.clone()) else { + return SenderPeerInfo::default(); }; - - let Some(routing_domain) = target.best_routing_domain() else { + let Some(node) = opt_node else { + // If this is going over a private route, don't bother sending any sender peer info + // The other side won't accept it because peer info sent over a private route + // could be used to deanonymize the private route's endpoint + return SenderPeerInfo::default(); + }; + let Some(routing_domain) = opt_routing_domain else { // No routing domain for target, no node info // Only a stale connection or no connection exists return SenderPeerInfo::default(); }; // Get the target's node info timestamp - let target_node_info_ts = target.node_info_ts(routing_domain); + let target_node_info_ts = node.node_info_ts(routing_domain); // Return whatever peer info we have even if the network class is not yet valid // That away we overwrite any prior existing valid-network-class nodeinfo in the remote routing table + let routing_table = self.routing_table(); let own_peer_info = routing_table.get_own_peer_info(routing_domain); // Get our node info timestamp let our_node_info_ts = own_peer_info.signed_node_info().timestamp(); // If the target has seen our node info already don't send it again - if target.has_seen_our_node_info_ts(routing_domain, our_node_info_ts) { + if node.has_seen_our_node_info_ts(routing_domain, our_node_info_ts) { return SenderPeerInfo::new_no_peer_info(target_node_info_ts); } @@ -1358,6 +1345,7 @@ impl RPCProcessor { request: RPCMessage, answer: RPCAnswer, ) ->RPCNetworkResult<()> { + // Extract destination from respond_to let dest = network_result_try!(self.get_respond_to_destination(&request)); diff --git a/veilid-core/src/rpc_processor/rpc_status.rs b/veilid-core/src/rpc_processor/rpc_status.rs index d08d50eb..364f01fc 100644 --- a/veilid-core/src/rpc_processor/rpc_status.rs +++ b/veilid-core/src/rpc_processor/rpc_status.rs @@ -23,81 +23,38 @@ impl RPCProcessor { self, dest: Destination, ) -> RPCNetworkResult>> { - let (opt_target_nr, routing_domain, node_status) = match dest.get_safety_selection() { - SafetySelection::Unsafe(_) => { - let (opt_target_nr, routing_domain) = match &dest { - Destination::Direct { - node: target, - safety_selection: _, - } => { - let routing_domain = match target.best_routing_domain() { - Some(rd) => rd, - None => { - // Because this exits before calling 'question()', - // a failure to find a routing domain constitutes a send failure - let send_ts = get_aligned_timestamp(); - self.record_send_failure( - RPCKind::Question, - send_ts, - target.clone(), - None, - None, - ); - return Ok(NetworkResult::no_connection_other( - "no routing domain for target", - )); - } - }; - (Some(target.clone()), routing_domain) - } - Destination::Relay { - relay, - node: target, - safety_selection: _, - } => { - let routing_domain = match relay.best_routing_domain() { - Some(rd) => rd, - None => { - // Because this exits before calling 'question()', - // a failure to find a routing domain constitutes a send failure for both the target and its relay - let send_ts = get_aligned_timestamp(); - self.record_send_failure( - RPCKind::Question, - send_ts, - relay.clone(), - None, - None, - ); - self.record_send_failure( - RPCKind::Question, - send_ts, - target.clone(), - None, - None, - ); - return Ok(NetworkResult::no_connection_other( - "no routing domain for peer", - )); - } - }; - (Some(target.clone()), routing_domain) - } - Destination::PrivateRoute { - private_route: _, - safety_selection: _, - } => (None, RoutingDomain::PublicInternet), - }; + // Determine routing domain and node status to send + let (opt_target_nr, routing_domain, node_status) = if let Some(UnsafeRoutingInfo { + opt_node, + opt_relay, + opt_routing_domain, + }) = + dest.get_unsafe_routing_info(self.routing_table.clone()) + { + let Some(routing_domain) = opt_routing_domain else { + // Because this exits before calling 'question()', + // a failure to find a routing domain constitutes a send failure + // Record the send failure on both the node and its relay + let send_ts = get_aligned_timestamp(); + if let Some(node) = &opt_node { + self.record_send_failure(RPCKind::Question, send_ts, node.clone(), None, None); + } + if let Some(relay) = &opt_relay { + self.record_send_failure(RPCKind::Question, send_ts, relay.clone(), None, None); + } + return Ok(NetworkResult::no_connection_other( + "no routing domain for target", + )); + }; - let node_status = Some(self.network_manager().generate_node_status(routing_domain)); - (opt_target_nr, routing_domain, node_status) - } - SafetySelection::Safe(_) => { - let routing_domain = RoutingDomain::PublicInternet; - let node_status = None; - (None, routing_domain, node_status) - } + let node_status = Some(self.network_manager().generate_node_status(routing_domain)); + (opt_node, routing_domain, node_status) + } else { + // Safety route means we don't exchange node status and things are all PublicInternet RoutingDomain + (None, RoutingDomain::PublicInternet, None) }; + // Create status rpc question let status_q = RPCOperationStatusQ::new(node_status); let question = RPCQuestion::new( network_result_try!(self.get_destination_respond_to(&dest)?), diff --git a/veilid-core/src/veilid_api/routing_context.rs b/veilid-core/src/veilid_api/routing_context.rs index ac120be3..93432415 100644 --- a/veilid-core/src/veilid_api/routing_context.rs +++ b/veilid-core/src/veilid_api/routing_context.rs @@ -416,7 +416,7 @@ impl RoutingContext { /// This is useful for checking if you should push new subkeys to the network, or retrieve the current state of a record from the network /// to see what needs updating locally. /// - /// * `key` is the record key to watch. it must first be opened for reading or writing. + /// * `key` is the record key to inspect. it must first be opened for reading or writing. /// * `subkeys` is the the range of subkeys to inspect. The range must not exceed 512 discrete non-overlapping or adjacent subranges. /// If no range is specified, this is equivalent to inspecting the entire range of subkeys. In total, the list of subkeys returned will be truncated at 512 elements. /// * `scope` is what kind of range the inspection has: From 046c15c66816768641fd16e53fc96389601885e4 Mon Sep 17 00:00:00 2001 From: Christien Rioux Date: Sun, 19 May 2024 10:49:37 -0400 Subject: [PATCH 06/14] dont run routing table ticks that require the network until it has started up --- veilid-core/src/attachment_manager.rs | 2 +- veilid-core/src/network_manager/mod.rs | 23 +- veilid-core/src/network_manager/native/mod.rs | 355 +++++++++--------- veilid-core/src/network_manager/send_data.rs | 1 + veilid-core/src/network_manager/stats.rs | 10 +- veilid-core/src/network_manager/wasm/mod.rs | 127 ++++--- .../src/routing_table/tasks/bootstrap.rs | 3 + veilid-core/src/routing_table/tasks/mod.rs | 5 + 8 files changed, 283 insertions(+), 243 deletions(-) diff --git a/veilid-core/src/attachment_manager.rs b/veilid-core/src/attachment_manager.rs index 85260950..74f507de 100644 --- a/veilid-core/src/attachment_manager.rs +++ b/veilid-core/src/attachment_manager.rs @@ -237,7 +237,7 @@ impl AttachmentManager { } // see if we need to restart the network - if netman.needs_restart() { + if netman.network_needs_restart() { info!("Restarting network"); restart = true; break; diff --git a/veilid-core/src/network_manager/mod.rs b/veilid-core/src/network_manager/mod.rs index fc352b24..bb0d3c7e 100644 --- a/veilid-core/src/network_manager/mod.rs +++ b/veilid-core/src/network_manager/mod.rs @@ -117,8 +117,9 @@ pub(crate) enum NodeContactMethod { /// Must use outbound relay to reach the node OutboundRelay(NodeRef), } -#[derive(Copy, Clone, Debug, PartialEq, Eq, Ord, PartialOrd, Hash)] +#[derive(Clone, Debug, PartialEq, Eq, Ord, PartialOrd, Hash)] struct NodeContactMethodCacheKey { + node_ids: TypedKeyGroup, own_node_info_ts: Timestamp, target_node_info_ts: Timestamp, target_node_ref_filter: Option, @@ -305,6 +306,13 @@ impl NetworkManager { .net .clone() } + fn opt_net(&self) -> Option { + self.unlocked_inner + .components + .read() + .as_ref() + .map(|x| x.net.clone()) + } fn receipt_manager(&self) -> ReceiptManager { self.unlocked_inner .components @@ -512,9 +520,16 @@ impl NetworkManager { } } - pub fn needs_restart(&self) -> bool { - let net = self.net(); - net.needs_restart() + pub fn network_needs_restart(&self) -> bool { + self.opt_net() + .map(|net| net.needs_restart()) + .unwrap_or(false) + } + + pub fn network_is_started(&self) -> bool { + self.opt_net() + .and_then(|net| net.is_started()) + .unwrap_or(false) } pub fn generate_node_status(&self, _routing_domain: RoutingDomain) -> NodeStatus { diff --git a/veilid-core/src/network_manager/native/mod.rs b/veilid-core/src/network_manager/native/mod.rs index 2d33401f..728ce936 100644 --- a/veilid-core/src/network_manager/native/mod.rs +++ b/veilid-core/src/network_manager/native/mod.rs @@ -72,8 +72,8 @@ pub const MAX_CAPABILITIES: usize = 64; ///////////////////////////////////////////////////////////////// struct NetworkInner { - /// true if the low-level network is running - network_started: bool, + /// Some(true) if the low-level network is running, Some(false) if it is not, None if it is in transit + network_started: Option, /// set if the network needs to be restarted due to a low level configuration change /// such as dhcp release or change of address or interfaces being added or removed network_needs_restart: bool, @@ -137,7 +137,7 @@ pub(in crate::network_manager) struct Network { impl Network { fn new_inner() -> NetworkInner { NetworkInner { - network_started: false, + network_started: Some(false), network_needs_restart: false, needs_public_dial_info_check: false, network_already_cleared: false, @@ -675,196 +675,209 @@ impl Network { #[instrument(level = "debug", err, skip_all)] pub async fn startup(&self) -> EyreResult<()> { - // initialize interfaces - self.unlocked_inner.interfaces.refresh().await?; + self.inner.lock().network_started = None; + let startup_func = async { + // initialize interfaces + self.unlocked_inner.interfaces.refresh().await?; - // build the set of networks we should consider for the 'LocalNetwork' routing domain - let mut local_networks: HashSet<(IpAddr, IpAddr)> = HashSet::new(); - self.unlocked_inner - .interfaces - .with_interfaces(|interfaces| { - log_net!(debug "interfaces: {:#?}", interfaces); + // build the set of networks we should consider for the 'LocalNetwork' routing domain + let mut local_networks: HashSet<(IpAddr, IpAddr)> = HashSet::new(); + self.unlocked_inner + .interfaces + .with_interfaces(|interfaces| { + log_net!(debug "interfaces: {:#?}", interfaces); - for intf in interfaces.values() { - // Skip networks that we should never encounter - if intf.is_loopback() || !intf.is_running() { - continue; + for intf in interfaces.values() { + // Skip networks that we should never encounter + if intf.is_loopback() || !intf.is_running() { + continue; + } + // Add network to local networks table + for addr in &intf.addrs { + let netmask = addr.if_addr().netmask(); + let network_ip = ipaddr_apply_netmask(addr.if_addr().ip(), netmask); + local_networks.insert((network_ip, netmask)); + } } - // Add network to local networks table - for addr in &intf.addrs { - let netmask = addr.if_addr().netmask(); - let network_ip = ipaddr_apply_netmask(addr.if_addr().ip(), netmask); - local_networks.insert((network_ip, netmask)); - } - } - }); - let local_networks: Vec<(IpAddr, IpAddr)> = local_networks.into_iter().collect(); - self.unlocked_inner - .routing_table - .configure_local_network_routing_domain(local_networks); + }); + let local_networks: Vec<(IpAddr, IpAddr)> = local_networks.into_iter().collect(); + self.unlocked_inner + .routing_table + .configure_local_network_routing_domain(local_networks); - // determine if we have ipv4/ipv6 addresses - { - let mut inner = self.inner.lock(); - inner.enable_ipv4 = false; - for addr in self.get_stable_interface_addresses() { - if addr.is_ipv4() { - log_net!(debug "enable address {:?} as ipv4", addr); - inner.enable_ipv4 = true; - } else if addr.is_ipv6() { - let address = Address::from_ip_addr(addr); - if address.is_global() { - log_net!(debug "enable address {:?} as ipv6 global", address); - inner.enable_ipv6_global = true; - } else if address.is_local() { - log_net!(debug "enable address {:?} as ipv6 local", address); - inner.enable_ipv6_local = true; + // determine if we have ipv4/ipv6 addresses + { + let mut inner = self.inner.lock(); + inner.enable_ipv4 = false; + for addr in self.get_stable_interface_addresses() { + if addr.is_ipv4() { + log_net!(debug "enable address {:?} as ipv4", addr); + inner.enable_ipv4 = true; + } else if addr.is_ipv6() { + let address = Address::from_ip_addr(addr); + if address.is_global() { + log_net!(debug "enable address {:?} as ipv6 global", address); + inner.enable_ipv6_global = true; + } else if address.is_local() { + log_net!(debug "enable address {:?} as ipv6 local", address); + inner.enable_ipv6_local = true; + } } } } - } - // Build our protocol config to share it with other nodes - let protocol_config = { - let mut inner = self.inner.lock(); - - // Create stop source - inner.stop_source = Some(StopSource::new()); - - // get protocol config + // Build our protocol config to share it with other nodes let protocol_config = { - let c = self.config.get(); - let mut inbound = ProtocolTypeSet::new(); + let mut inner = self.inner.lock(); - if c.network.protocol.udp.enabled { - inbound.insert(ProtocolType::UDP); - } - if c.network.protocol.tcp.listen { - inbound.insert(ProtocolType::TCP); - } - if c.network.protocol.ws.listen { - inbound.insert(ProtocolType::WS); - } - if c.network.protocol.wss.listen { - inbound.insert(ProtocolType::WSS); - } + // Create stop source + inner.stop_source = Some(StopSource::new()); - let mut outbound = ProtocolTypeSet::new(); - if c.network.protocol.udp.enabled { - outbound.insert(ProtocolType::UDP); - } - if c.network.protocol.tcp.connect { - outbound.insert(ProtocolType::TCP); - } - if c.network.protocol.ws.connect { - outbound.insert(ProtocolType::WS); - } - if c.network.protocol.wss.connect { - outbound.insert(ProtocolType::WSS); - } + // get protocol config + let protocol_config = { + let c = self.config.get(); + let mut inbound = ProtocolTypeSet::new(); - let mut family_global = AddressTypeSet::new(); - let mut family_local = AddressTypeSet::new(); - if inner.enable_ipv4 { - family_global.insert(AddressType::IPV4); - family_local.insert(AddressType::IPV4); - } - if inner.enable_ipv6_global { - family_global.insert(AddressType::IPV6); - } - if inner.enable_ipv6_local { - family_local.insert(AddressType::IPV6); - } + if c.network.protocol.udp.enabled { + inbound.insert(ProtocolType::UDP); + } + if c.network.protocol.tcp.listen { + inbound.insert(ProtocolType::TCP); + } + if c.network.protocol.ws.listen { + inbound.insert(ProtocolType::WS); + } + if c.network.protocol.wss.listen { + inbound.insert(ProtocolType::WSS); + } - // set up the routing table's network config - // if we have static public dialinfo, upgrade our network class - let public_internet_capabilities = { - PUBLIC_INTERNET_CAPABILITIES - .iter() - .copied() - .filter(|cap| !c.capabilities.disable.contains(cap)) - .collect::>() - }; - let local_network_capabilities = { - LOCAL_NETWORK_CAPABILITIES - .iter() - .copied() - .filter(|cap| !c.capabilities.disable.contains(cap)) - .collect::>() + let mut outbound = ProtocolTypeSet::new(); + if c.network.protocol.udp.enabled { + outbound.insert(ProtocolType::UDP); + } + if c.network.protocol.tcp.connect { + outbound.insert(ProtocolType::TCP); + } + if c.network.protocol.ws.connect { + outbound.insert(ProtocolType::WS); + } + if c.network.protocol.wss.connect { + outbound.insert(ProtocolType::WSS); + } + + let mut family_global = AddressTypeSet::new(); + let mut family_local = AddressTypeSet::new(); + if inner.enable_ipv4 { + family_global.insert(AddressType::IPV4); + family_local.insert(AddressType::IPV4); + } + if inner.enable_ipv6_global { + family_global.insert(AddressType::IPV6); + } + if inner.enable_ipv6_local { + family_local.insert(AddressType::IPV6); + } + + // set up the routing table's network config + // if we have static public dialinfo, upgrade our network class + let public_internet_capabilities = { + PUBLIC_INTERNET_CAPABILITIES + .iter() + .copied() + .filter(|cap| !c.capabilities.disable.contains(cap)) + .collect::>() + }; + let local_network_capabilities = { + LOCAL_NETWORK_CAPABILITIES + .iter() + .copied() + .filter(|cap| !c.capabilities.disable.contains(cap)) + .collect::>() + }; + + ProtocolConfig { + outbound, + inbound, + family_global, + family_local, + public_internet_capabilities, + local_network_capabilities, + } }; + inner.protocol_config = protocol_config.clone(); - ProtocolConfig { - outbound, - inbound, - family_global, - family_local, - public_internet_capabilities, - local_network_capabilities, - } + protocol_config }; - inner.protocol_config = protocol_config.clone(); - protocol_config - }; + // Start editing routing table + let mut editor_public_internet = self + .unlocked_inner + .routing_table + .edit_routing_domain(RoutingDomain::PublicInternet); + let mut editor_local_network = self + .unlocked_inner + .routing_table + .edit_routing_domain(RoutingDomain::LocalNetwork); - // Start editing routing table - let mut editor_public_internet = self - .unlocked_inner - .routing_table - .edit_routing_domain(RoutingDomain::PublicInternet); - let mut editor_local_network = self - .unlocked_inner - .routing_table - .edit_routing_domain(RoutingDomain::LocalNetwork); - - // start listeners - if protocol_config.inbound.contains(ProtocolType::UDP) { - self.bind_udp_protocol_handlers(&mut editor_public_internet, &mut editor_local_network) + // start listeners + if protocol_config.inbound.contains(ProtocolType::UDP) { + self.bind_udp_protocol_handlers( + &mut editor_public_internet, + &mut editor_local_network, + ) .await?; - } - if protocol_config.inbound.contains(ProtocolType::WS) { - self.start_ws_listeners(&mut editor_public_internet, &mut editor_local_network) - .await?; - } - if protocol_config.inbound.contains(ProtocolType::WSS) { - self.start_wss_listeners(&mut editor_public_internet, &mut editor_local_network) - .await?; - } - if protocol_config.inbound.contains(ProtocolType::TCP) { - self.start_tcp_listeners(&mut editor_public_internet, &mut editor_local_network) - .await?; - } - - editor_public_internet.setup_network( - protocol_config.outbound, - protocol_config.inbound, - protocol_config.family_global, - protocol_config.public_internet_capabilities, - ); - editor_local_network.setup_network( - protocol_config.outbound, - protocol_config.inbound, - protocol_config.family_local, - protocol_config.local_network_capabilities, - ); - let detect_address_changes = { - let c = self.config.get(); - c.network.detect_address_changes - }; - if !detect_address_changes { - let inner = self.inner.lock(); - if !inner.static_public_dialinfo.is_empty() { - editor_public_internet.set_network_class(Some(NetworkClass::InboundCapable)); } - } + if protocol_config.inbound.contains(ProtocolType::WS) { + self.start_ws_listeners(&mut editor_public_internet, &mut editor_local_network) + .await?; + } + if protocol_config.inbound.contains(ProtocolType::WSS) { + self.start_wss_listeners(&mut editor_public_internet, &mut editor_local_network) + .await?; + } + if protocol_config.inbound.contains(ProtocolType::TCP) { + self.start_tcp_listeners(&mut editor_public_internet, &mut editor_local_network) + .await?; + } - // commit routing table edits - editor_public_internet.commit(true).await; - editor_local_network.commit(true).await; + editor_public_internet.setup_network( + protocol_config.outbound, + protocol_config.inbound, + protocol_config.family_global, + protocol_config.public_internet_capabilities, + ); + editor_local_network.setup_network( + protocol_config.outbound, + protocol_config.inbound, + protocol_config.family_local, + protocol_config.local_network_capabilities, + ); + let detect_address_changes = { + let c = self.config.get(); + c.network.detect_address_changes + }; + if !detect_address_changes { + let inner = self.inner.lock(); + if !inner.static_public_dialinfo.is_empty() { + editor_public_internet.set_network_class(Some(NetworkClass::InboundCapable)); + } + } + + // commit routing table edits + editor_public_internet.commit(true).await; + editor_local_network.commit(true).await; + + Ok(()) + }; + let res = startup_func.await; + if res.is_err() { + info!("network failed to start"); + self.inner.lock().network_started = Some(false); + return res; + } info!("network started"); - self.inner.lock().network_started = true; - + self.inner.lock().network_started = Some(true); Ok(()) } @@ -872,7 +885,7 @@ impl Network { self.inner.lock().network_needs_restart } - pub fn is_started(&self) -> bool { + pub fn is_started(&self) -> Option { self.inner.lock().network_started } diff --git a/veilid-core/src/network_manager/send_data.rs b/veilid-core/src/network_manager/send_data.rs index fab8f3d1..82e5f308 100644 --- a/veilid-core/src/network_manager/send_data.rs +++ b/veilid-core/src/network_manager/send_data.rs @@ -394,6 +394,7 @@ impl NetworkManager { // Get cache key let ncm_key = NodeContactMethodCacheKey { + node_ids: target_node_ref.node_ids(), own_node_info_ts: routing_table.get_own_node_info_ts(routing_domain), target_node_info_ts: target_node_ref.node_info_ts(routing_domain), target_node_ref_filter: target_node_ref.filter_ref().cloned(), diff --git a/veilid-core/src/network_manager/stats.rs b/veilid-core/src/network_manager/stats.rs index 608cf1cf..2c5f0b97 100644 --- a/veilid-core/src/network_manager/stats.rs +++ b/veilid-core/src/network_manager/stats.rs @@ -76,15 +76,7 @@ impl NetworkManager { } pub fn get_veilid_state(&self) -> Box { - let has_state = self - .unlocked_inner - .components - .read() - .as_ref() - .map(|c| c.net.is_started()) - .unwrap_or(false); - - if !has_state { + if !self.network_is_started() { return Box::new(VeilidStateNetwork { started: false, bps_down: 0.into(), diff --git a/veilid-core/src/network_manager/wasm/mod.rs b/veilid-core/src/network_manager/wasm/mod.rs index 98ad2090..b3576b8a 100644 --- a/veilid-core/src/network_manager/wasm/mod.rs +++ b/veilid-core/src/network_manager/wasm/mod.rs @@ -52,7 +52,7 @@ pub const MAX_CAPABILITIES: usize = 64; ///////////////////////////////////////////////////////////////// struct NetworkInner { - network_started: bool, + network_started: Option, network_needs_restart: bool, protocol_config: ProtocolConfig, } @@ -74,7 +74,7 @@ pub(in crate::network_manager) struct Network { impl Network { fn new_inner() -> NetworkInner { NetworkInner { - network_started: false, + network_started: Some(false), network_needs_restart: false, protocol_config: Default::default(), } @@ -334,70 +334,81 @@ impl Network { ///////////////////////////////////////////////////////////////// pub async fn startup(&self) -> EyreResult<()> { - log_net!(debug "starting network"); - // get protocol config - let protocol_config = { - let c = self.config.get(); - let inbound = ProtocolTypeSet::new(); - let mut outbound = ProtocolTypeSet::new(); + self.inner.lock().network_started = None; + let startup_func = async { + log_net!(debug "starting network"); + // get protocol config + let protocol_config = { + let c = self.config.get(); + let inbound = ProtocolTypeSet::new(); + let mut outbound = ProtocolTypeSet::new(); - if c.network.protocol.ws.connect { - outbound.insert(ProtocolType::WS); - } - if c.network.protocol.wss.connect { - outbound.insert(ProtocolType::WSS); - } + if c.network.protocol.ws.connect { + outbound.insert(ProtocolType::WS); + } + if c.network.protocol.wss.connect { + outbound.insert(ProtocolType::WSS); + } - let supported_address_types: AddressTypeSet = if is_ipv6_supported() { - AddressType::IPV4 | AddressType::IPV6 - } else { - AddressType::IPV4.into() + let supported_address_types: AddressTypeSet = if is_ipv6_supported() { + AddressType::IPV4 | AddressType::IPV6 + } else { + AddressType::IPV4.into() + }; + + let family_global = supported_address_types; + let family_local = supported_address_types; + + let public_internet_capabilities = { + PUBLIC_INTERNET_CAPABILITIES + .iter() + .copied() + .filter(|cap| !c.capabilities.disable.contains(cap)) + .collect::>() + }; + + ProtocolConfig { + outbound, + inbound, + family_global, + family_local, + local_network_capabilities: vec![], + public_internet_capabilities, + } }; + self.inner.lock().protocol_config = protocol_config.clone(); - let family_global = supported_address_types; - let family_local = supported_address_types; + // Start editing routing table + let mut editor_public_internet = self + .unlocked_inner + .routing_table + .edit_routing_domain(RoutingDomain::PublicInternet); - let public_internet_capabilities = { - PUBLIC_INTERNET_CAPABILITIES - .iter() - .copied() - .filter(|cap| !c.capabilities.disable.contains(cap)) - .collect::>() - }; + // set up the routing table's network config + // if we have static public dialinfo, upgrade our network class - ProtocolConfig { - outbound, - inbound, - family_global, - family_local, - local_network_capabilities: vec![], - public_internet_capabilities, - } + editor_public_internet.setup_network( + protocol_config.outbound, + protocol_config.inbound, + protocol_config.family_global, + protocol_config.public_internet_capabilities.clone(), + ); + editor_public_internet.set_network_class(Some(NetworkClass::WebApp)); + + // commit routing table edits + editor_public_internet.commit(true).await; + Ok(()) }; - self.inner.lock().protocol_config = protocol_config.clone(); - // Start editing routing table - let mut editor_public_internet = self - .unlocked_inner - .routing_table - .edit_routing_domain(RoutingDomain::PublicInternet); + let res = startup_func.await; + if res.is_err() { + info!("network failed to start"); + self.inner.lock().network_started = Some(false); + return res; + } - // set up the routing table's network config - // if we have static public dialinfo, upgrade our network class - - editor_public_internet.setup_network( - protocol_config.outbound, - protocol_config.inbound, - protocol_config.family_global, - protocol_config.public_internet_capabilities.clone(), - ); - editor_public_internet.set_network_class(Some(NetworkClass::WebApp)); - - // commit routing table edits - editor_public_internet.commit(true).await; - - self.inner.lock().network_started = true; - log_net!(debug "network started"); + info!("network started"); + self.inner.lock().network_started = Some(true); Ok(()) } @@ -405,7 +416,7 @@ impl Network { self.inner.lock().network_needs_restart } - pub fn is_started(&self) -> bool { + pub fn is_started(&self) -> Option { self.inner.lock().network_started } diff --git a/veilid-core/src/routing_table/tasks/bootstrap.rs b/veilid-core/src/routing_table/tasks/bootstrap.rs index 5d1f750b..3f8b9215 100644 --- a/veilid-core/src/routing_table/tasks/bootstrap.rs +++ b/veilid-core/src/routing_table/tasks/bootstrap.rs @@ -287,6 +287,9 @@ impl RoutingTable { Ok(NodeContactMethod::Direct(v)) => v, Ok(v) => { log_rtab!(warn "invalid contact method for bootstrap, ignoring peer: {:?}", v); + let _ = routing_table + .network_manager() + .get_node_contact_method(nr.clone()); return; } Err(e) => { diff --git a/veilid-core/src/routing_table/tasks/mod.rs b/veilid-core/src/routing_table/tasks/mod.rs index 3ca6ce22..47369b62 100644 --- a/veilid-core/src/routing_table/tasks/mod.rs +++ b/veilid-core/src/routing_table/tasks/mod.rs @@ -149,6 +149,11 @@ impl RoutingTable { inner.refresh_cached_entry_counts() }; + // Only do the rest if the network has started + if !self.network_manager().network_is_started() { + return Ok(()); + } + let min_peer_count = self.with_config(|c| c.network.dht.min_peer_count as usize); // Figure out which tables need bootstrap or peer minimum refresh From 6590b76263d6fa7b99b57b8b1d1b07c6cd7919c5 Mon Sep 17 00:00:00 2001 From: Christien Rioux Date: Sun, 19 May 2024 12:28:06 -0400 Subject: [PATCH 07/14] correctly handle local interface address changes closes #350 --- veilid-core/src/network_manager/native/mod.rs | 20 +++++++++++++++---- .../src/routing_table/tasks/bootstrap.rs | 6 +++--- 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/veilid-core/src/network_manager/native/mod.rs b/veilid-core/src/network_manager/native/mod.rs index 728ce936..5b168292 100644 --- a/veilid-core/src/network_manager/native/mod.rs +++ b/veilid-core/src/network_manager/native/mod.rs @@ -109,6 +109,8 @@ struct NetworkInner { listener_states: BTreeMap>>, /// Preferred local addresses for protocols/address combinations for outgoing connections preferred_local_addresses: BTreeMap<(ProtocolType, AddressType), SocketAddr>, + /// The list of stable interface addresses we have last seen + stable_interface_addresses_at_startup: Vec, } struct NetworkUnlockedInner { @@ -155,6 +157,7 @@ impl Network { tls_acceptor: None, listener_states: BTreeMap::new(), preferred_local_addresses: BTreeMap::new(), + stable_interface_addresses_at_startup: Vec::new(), } } @@ -170,7 +173,7 @@ impl Network { connection_manager, interfaces: NetworkInterfaces::new(), update_network_class_task: TickTask::new(1), - network_interfaces_task: TickTask::new(5), + network_interfaces_task: TickTask::new(1), upnp_task: TickTask::new(1), igd_manager: igd_manager::IGDManager::new(config.clone()), } @@ -339,13 +342,14 @@ impl Network { pub fn get_stable_interface_addresses(&self) -> Vec { let addrs = self.unlocked_inner.interfaces.stable_addresses(); - let addrs: Vec = addrs + let mut addrs: Vec = addrs .into_iter() .filter(|addr| { let address = Address::from_ip_addr(*addr); address.is_local() || address.is_global() }) .collect(); + addrs.sort(); addrs } @@ -361,7 +365,11 @@ impl Network { return Ok(false); } - self.inner.lock().needs_public_dial_info_check = true; + let mut inner = self.inner.lock(); + let new_stable_interface_addresses = self.get_stable_interface_addresses(); + if new_stable_interface_addresses != inner.stable_interface_addresses_at_startup { + inner.network_needs_restart = true; + } Ok(true) } @@ -708,8 +716,11 @@ impl Network { // determine if we have ipv4/ipv6 addresses { let mut inner = self.inner.lock(); + + let stable_interface_addresses = self.get_stable_interface_addresses(); + inner.enable_ipv4 = false; - for addr in self.get_stable_interface_addresses() { + for addr in stable_interface_addresses.iter().copied() { if addr.is_ipv4() { log_net!(debug "enable address {:?} as ipv4", addr); inner.enable_ipv4 = true; @@ -724,6 +735,7 @@ impl Network { } } } + inner.stable_interface_addresses_at_startup = stable_interface_addresses; } // Build our protocol config to share it with other nodes diff --git a/veilid-core/src/routing_table/tasks/bootstrap.rs b/veilid-core/src/routing_table/tasks/bootstrap.rs index 3f8b9215..4b1ce1cd 100644 --- a/veilid-core/src/routing_table/tasks/bootstrap.rs +++ b/veilid-core/src/routing_table/tasks/bootstrap.rs @@ -287,9 +287,9 @@ impl RoutingTable { Ok(NodeContactMethod::Direct(v)) => v, Ok(v) => { log_rtab!(warn "invalid contact method for bootstrap, ignoring peer: {:?}", v); - let _ = routing_table - .network_manager() - .get_node_contact_method(nr.clone()); + // let _ = routing_table + // .network_manager() + // .get_node_contact_method(nr.clone()); return; } Err(e) => { From 908bb48f8c3b4c6ba082f11de31e00f95d6b299a Mon Sep 17 00:00:00 2001 From: Christien Rioux Date: Mon, 20 May 2024 20:25:46 -0400 Subject: [PATCH 08/14] setvalue pre-empt --- veilid-core/src/storage_manager/get_value.rs | 111 ++--- veilid-core/src/storage_manager/mod.rs | 59 +-- veilid-core/src/storage_manager/set_value.rs | 440 ++++++++++++------ .../tasks/offline_subkey_writes.rs | 60 ++- veilid-flutter/example/pubspec.lock | 54 +-- veilid-flutter/example/pubspec.yaml | 4 +- veilid-flutter/pubspec.yaml | 10 +- 7 files changed, 476 insertions(+), 262 deletions(-) diff --git a/veilid-core/src/storage_manager/get_value.rs b/veilid-core/src/storage_manager/get_value.rs index 9e0d6fee..ad29d5a9 100644 --- a/veilid-core/src/storage_manager/get_value.rs +++ b/veilid-core/src/storage_manager/get_value.rs @@ -10,11 +10,12 @@ struct OutboundGetValueContext { pub descriptor: Option>, /// The parsed schema from the descriptor if we have one pub schema: Option, - /// If we should send a partial update with the current contetx + /// If we should send a partial update with the current context pub send_partial_update: bool, } /// The result of the outbound_get_value operation +#[derive(Clone, Debug)] pub(super) struct OutboundGetValueResult { /// Fanout result pub fanout_result: FanoutResult, @@ -91,11 +92,11 @@ impl StorageManager { ) .await? ); + let mut ctx = context.lock(); // Keep the descriptor if we got one. If we had a last_descriptor it will // already be validated by rpc_call_get_value if let Some(descriptor) = gva.answer.descriptor { - let mut ctx = context.lock(); if ctx.descriptor.is_none() && ctx.schema.is_none() { let schema = match descriptor.schema() { Ok(v) => v, @@ -109,69 +110,73 @@ impl StorageManager { } // Keep the value if we got one and it is newer and it passes schema validation - if let Some(value) = gva.answer.value { - log_dht!(debug "Got value back: len={} seq={}", value.value_data().data().len(), value.value_data().seq()); - let mut ctx = context.lock(); + let Some(value) = gva.answer.value else { + // Return peers if we have some + log_network_result!(debug "GetValue returned no value, fanout call returned peers {}", gva.answer.peers.len()); - // Ensure we have a schema and descriptor - let (Some(descriptor), Some(schema)) = (&ctx.descriptor, &ctx.schema) - else { - // Got a value but no descriptor for it - // Move to the next node - return Ok(NetworkResult::invalid_message( - "Got value with no descriptor", - )); - }; + return Ok(NetworkResult::value(gva.answer.peers)) + }; - // Validate with schema - if !schema.check_subkey_value_data( - descriptor.owner(), - subkey, - value.value_data(), - ) { - // Validation failed, ignore this value - // Move to the next node - return Ok(NetworkResult::invalid_message(format!( - "Schema validation failed on subkey {}", - subkey - ))); - } + log_dht!(debug "GetValue got value back: len={} seq={}", value.value_data().data().len(), value.value_data().seq()); - // If we have a prior value, see if this is a newer sequence number - if let Some(prior_value) = &ctx.value { - let prior_seq = prior_value.value_data().seq(); - let new_seq = value.value_data().seq(); + // Ensure we have a schema and descriptor + let (Some(descriptor), Some(schema)) = (&ctx.descriptor, &ctx.schema) + else { + // Got a value but no descriptor for it + // Move to the next node + return Ok(NetworkResult::invalid_message( + "Got value with no descriptor", + )); + }; - if new_seq == prior_seq { - // If sequence number is the same, the data should be the same - if prior_value.value_data() != value.value_data() { - // Move to the next node - return Ok(NetworkResult::invalid_message( - "value data mismatch", - )); - } - // Increase the consensus count for the existing value - ctx.value_nodes.push(next_node); - } else if new_seq > prior_seq { - // If the sequence number is greater, start over with the new value - ctx.value = Some(Arc::new(value)); - // One node has shown us this value so far - ctx.value_nodes = vec![next_node]; - // Send an update since the value changed - ctx.send_partial_update = true; - } else { - // If the sequence number is older, ignore it + // Validate with schema + if !schema.check_subkey_value_data( + descriptor.owner(), + subkey, + value.value_data(), + ) { + // Validation failed, ignore this value + // Move to the next node + return Ok(NetworkResult::invalid_message(format!( + "Schema validation failed on subkey {}", + subkey + ))); + } + + // If we have a prior value, see if this is a newer sequence number + if let Some(prior_value) = &ctx.value { + let prior_seq = prior_value.value_data().seq(); + let new_seq = value.value_data().seq(); + + if new_seq == prior_seq { + // If sequence number is the same, the data should be the same + if prior_value.value_data() != value.value_data() { + // Move to the next node + return Ok(NetworkResult::invalid_message( + "value data mismatch", + )); } - } else { - // If we have no prior value, keep it + // Increase the consensus count for the existing value + ctx.value_nodes.push(next_node); + } else if new_seq > prior_seq { + // If the sequence number is greater, start over with the new value ctx.value = Some(Arc::new(value)); // One node has shown us this value so far ctx.value_nodes = vec![next_node]; // Send an update since the value changed ctx.send_partial_update = true; + } else { + // If the sequence number is older, ignore it } + } else { + // If we have no prior value, keep it + ctx.value = Some(Arc::new(value)); + // One node has shown us this value so far + ctx.value_nodes = vec![next_node]; + // Send an update since the value changed + ctx.send_partial_update = true; } - + // Return peers if we have some log_network_result!(debug "GetValue fanout call returned peers {}", gva.answer.peers.len()); diff --git a/veilid-core/src/storage_manager/mod.rs b/veilid-core/src/storage_manager/mod.rs index b8088ba8..3a325b4a 100644 --- a/veilid-core/src/storage_manager/mod.rs +++ b/veilid-core/src/storage_manager/mod.rs @@ -557,7 +557,7 @@ impl StorageManager { log_stor!(debug "Writing subkey to the network: {}:{} len={}", key, subkey, signed_value_data.value_data().data().len() ); // Use the safety selection we opened the record with - let result = match self + let res_rx = match self .outbound_set_value( rpc_processor, key, @@ -577,36 +577,39 @@ impl StorageManager { } }; - // Regain the lock after network access - let mut inner = self.lock().await?; + // Wait for the first result + let Ok(result) = res_rx.recv_async().await else { + apibail_internal!("failed to receive results"); + }; + let result = result?; + let partial = result.fanout_result.kind.is_partial(); - // Report on fanout result offline - let was_offline = self.check_fanout_set_offline(key, subkey, &result.fanout_result); - if was_offline { - // Failed to write, try again later - inner.add_offline_subkey_write(key, subkey, safety_selection); + // Process the returned result + let out = self + .process_outbound_set_value_result( + key, + subkey, + signed_value_data.value_data().clone(), + safety_selection, + result, + ) + .await?; + + // If there's more to process, do it in the background + if partial { + let mut inner = self.lock().await?; + self.process_deferred_outbound_set_value_result_inner( + &mut inner, + res_rx, + key, + subkey, + out.clone() + .unwrap_or_else(|| signed_value_data.value_data().clone()), + safety_selection, + ); } - // Keep the list of nodes that returned a value for later reference - inner.process_fanout_results(key, core::iter::once((subkey, &result.fanout_result)), true); - - // Return the new value if it differs from what was asked to set - if result.signed_value_data.value_data() != signed_value_data.value_data() { - // Record the newer value and send and update since it is different than what we just set - inner - .handle_set_local_value( - key, - subkey, - result.signed_value_data.clone(), - WatchUpdateMode::UpdateAll, - ) - .await?; - - return Ok(Some(result.signed_value_data.value_data().clone())); - } - - // If the original value was set, return None - Ok(None) + Ok(out) } /// Create,update or cancel an outbound watch to a DHT value diff --git a/veilid-core/src/storage_manager/set_value.rs b/veilid-core/src/storage_manager/set_value.rs index a063436e..dc9609e9 100644 --- a/veilid-core/src/storage_manager/set_value.rs +++ b/veilid-core/src/storage_manager/set_value.rs @@ -10,9 +10,12 @@ struct OutboundSetValueContext { pub missed_since_last_set: usize, /// The parsed schema from the descriptor if we have one pub schema: DHTSchema, + /// If we should send a partial update with the current context + pub send_partial_update: bool, } /// The result of the outbound_set_value operation +#[derive(Clone, Debug)] pub(super) struct OutboundSetValueResult { /// Fanout result pub fanout_result: FanoutResult, @@ -30,7 +33,7 @@ impl StorageManager { safety_selection: SafetySelection, value: Arc, descriptor: Arc, - ) -> VeilidAPIResult { + ) -> VeilidAPIResult>> { let routing_table = rpc_processor.routing_table(); // Get the DHT parameters for 'SetValue' @@ -50,6 +53,9 @@ impl StorageManager { inner.get_value_nodes(key)?.unwrap_or_default() }; + // Make the return channel + let (out_tx, out_rx) = flume::unbounded::>(); + // Make do-set-value answer context let schema = descriptor.schema()?; let context = Arc::new(Mutex::new(OutboundSetValueContext { @@ -57,156 +63,330 @@ impl StorageManager { value_nodes: vec![], missed_since_last_set: 0, schema, + send_partial_update: false, })); // Routine to call to generate fanout - let call_routine = |next_node: NodeRef| { - let rpc_processor = rpc_processor.clone(); + let call_routine = { let context = context.clone(); - let descriptor = descriptor.clone(); - async move { - let send_descriptor = true; // xxx check if next_node needs the descriptor or not + let rpc_processor = rpc_processor.clone(); - // get most recent value to send - let value = { - let ctx = context.lock(); - ctx.value.clone() - }; + move |next_node: NodeRef| { + let rpc_processor = rpc_processor.clone(); + let context = context.clone(); + let descriptor = descriptor.clone(); + async move { + let send_descriptor = true; // xxx check if next_node needs the descriptor or not - // send across the wire - let sva = network_result_try!( - rpc_processor - .clone() - .rpc_call_set_value( - Destination::direct(next_node.clone()).with_safety(safety_selection), - key, - subkey, - (*value).clone(), - (*descriptor).clone(), - send_descriptor, - ) - .await? - ); + // get most recent value to send + let value = { + let ctx = context.lock(); + ctx.value.clone() + }; - // If the node was close enough to possibly set the value - let mut ctx = context.lock(); - if !sva.answer.set { - ctx.missed_since_last_set += 1; + // send across the wire + let sva = network_result_try!( + rpc_processor + .clone() + .rpc_call_set_value( + Destination::direct(next_node.clone()) + .with_safety(safety_selection), + key, + subkey, + (*value).clone(), + (*descriptor).clone(), + send_descriptor, + ) + .await? + ); - // Return peers if we have some - log_network_result!(debug "SetValue missed: {}, fanout call returned peers {}", ctx.missed_since_last_set, sva.answer.peers.len()); - return Ok(NetworkResult::value(sva.answer.peers)); - } + // If the node was close enough to possibly set the value + let mut ctx = context.lock(); + if !sva.answer.set { + ctx.missed_since_last_set += 1; - // See if we got a value back - let Some(value) = sva.answer.value else { - // No newer value was found and returned, so increase our consensus count - ctx.value_nodes.push(next_node); + // Return peers if we have some + log_network_result!(debug "SetValue missed: {}, fanout call returned peers {}", ctx.missed_since_last_set, sva.answer.peers.len()); + return Ok(NetworkResult::value(sva.answer.peers)); + } + + // See if we got a value back + let Some(value) = sva.answer.value else { + // No newer value was found and returned, so increase our consensus count + ctx.value_nodes.push(next_node); + ctx.missed_since_last_set = 0; + // Send an update since it was set + if ctx.value_nodes.len() == 1 { + ctx.send_partial_update = true; + } + + // Return peers if we have some + log_network_result!(debug "SetValue returned no value, fanout call returned peers {}", sva.answer.peers.len()); + return Ok(NetworkResult::value(sva.answer.peers)); + }; + + // Keep the value if we got one and it is newer and it passes schema validation + log_dht!(debug "SetValue got value back: len={} seq={}", value.value_data().data().len(), value.value_data().seq()); + + // Validate with schema + if !ctx.schema.check_subkey_value_data( + descriptor.owner(), + subkey, + value.value_data(), + ) { + // Validation failed, ignore this value and pretend we never saw this node + return Ok(NetworkResult::invalid_message(format!( + "Schema validation failed on subkey {}", + subkey + ))); + } + + // If we got a value back it should be different than the one we are setting + // But in the case of a benign bug, we can just move to the next node + if ctx.value.value_data() == value.value_data() { + + ctx.value_nodes.push(next_node); + ctx.missed_since_last_set = 0; + + // Send an update since it was set + if ctx.value_nodes.len() == 1 { + ctx.send_partial_update = true; + } + + return Ok(NetworkResult::value(sva.answer.peers)); + } + + // We have a prior value, ensure this is a newer sequence number + let prior_seq = ctx.value.value_data().seq(); + let new_seq = value.value_data().seq(); + if new_seq < prior_seq { + // If the sequence number is older node should have not returned a value here. + // Skip this node and its closer list because it is misbehaving + // Ignore this value and pretend we never saw this node + return Ok(NetworkResult::invalid_message("Sequence number is older")); + } + + // If the sequence number is greater or equal, keep it + // even if the sequence number is the same, accept all conflicts in an attempt to resolve them + ctx.value = Arc::new(value); + // One node has shown us this value so far + ctx.value_nodes = vec![next_node]; ctx.missed_since_last_set = 0; + // Send an update since the value changed + ctx.send_partial_update = true; - // Return peers if we have some - log_network_result!(debug "SetValue returned no value, fanout call returned peers {}", sva.answer.peers.len()); - return Ok(NetworkResult::value(sva.answer.peers)); - }; - - // Keep the value if we got one and it is newer and it passes schema validation - log_dht!(debug "SetValue got value back: len={} seq={}", value.value_data().data().len(), value.value_data().seq()); - - // Validate with schema - if !ctx.schema.check_subkey_value_data( - descriptor.owner(), - subkey, - value.value_data(), - ) { - // Validation failed, ignore this value and pretend we never saw this node - return Ok(NetworkResult::invalid_message("Schema validation failed")); + Ok(NetworkResult::value(sva.answer.peers)) } - - // If we got a value back it should be different than the one we are setting - // But in the case of a benign bug, we can just move to the next node - if ctx.value.value_data() == value.value_data() { - ctx.value_nodes.push(next_node); - ctx.missed_since_last_set = 0; - return Ok(NetworkResult::value(sva.answer.peers)); - } - - // We have a prior value, ensure this is a newer sequence number - let prior_seq = ctx.value.value_data().seq(); - let new_seq = value.value_data().seq(); - if new_seq < prior_seq { - // If the sequence number is older node should have not returned a value here. - // Skip this node and its closer list because it is misbehaving - // Ignore this value and pretend we never saw this node - return Ok(NetworkResult::invalid_message("Sequence number is older")); - } - - // If the sequence number is greater or equal, keep it - // even if the sequence number is the same, accept all conflicts in an attempt to resolve them - ctx.value = Arc::new(value); - // One node has shown us this value so far - ctx.value_nodes = vec![next_node]; - ctx.missed_since_last_set = 0; - Ok(NetworkResult::value(sva.answer.peers)) } }; // Routine to call to check if we're done at each step - let check_done = |_closest_nodes: &[NodeRef]| { + let check_done = { + let context = context.clone(); + let out_tx = out_tx.clone(); + move |_closest_nodes: &[NodeRef]| { + let mut ctx = context.lock(); + + // send partial update if desired + if ctx.send_partial_update { + ctx.send_partial_update = false; + + // return partial result + let fanout_result = FanoutResult { + kind: FanoutResultKind::Partial, + value_nodes: ctx.value_nodes.clone(), + }; + let out=OutboundSetValueResult { + fanout_result, + signed_value_data: ctx.value.clone()}; + log_dht!(debug "Sending partial SetValue result: {:?}", out); + + if let Err(e) = out_tx.send(Ok(out)) { + log_dht!(debug "Sending partial SetValue result failed: {}", e); + } + } + + // If we have reached sufficient consensus, return done + if ctx.value_nodes.len() >= consensus_count { + return Some(()); + } + // If we have missed more than our consensus count since our last set, return done + // This keeps the traversal from searching too many nodes when we aren't converging + // Only do this if we have gotten at least half our desired sets. + if ctx.value_nodes.len() >= ((consensus_count + 1) / 2) + && ctx.missed_since_last_set >= consensus_count + { + return Some(()); + } + None + } + }; + + // Call the fanout in a spawned task + spawn(Box::pin(async move { + let fanout_call = FanoutCall::new( + routing_table.clone(), + key, + key_count, + fanout, + timeout_us, + capability_fanout_node_info_filter(vec![CAP_DHT]), + call_routine, + check_done, + ); + + let kind = match fanout_call.run(init_fanout_queue).await { + // If we don't finish in the timeout (too much time passed checking for consensus) + TimeoutOr::Timeout => FanoutResultKind::Timeout, + // If we finished with or without consensus (enough nodes returning the same value) + TimeoutOr::Value(Ok(Some(()))) => FanoutResultKind::Finished, + // If we ran out of nodes before getting consensus) + TimeoutOr::Value(Ok(None)) => FanoutResultKind::Exhausted, + // Failed + TimeoutOr::Value(Err(e)) => { + // If we finished with an error, return that + log_dht!(debug "SetValue fanout error: {}", e); + if let Err(e) = out_tx.send(Err(e.into())) { + log_dht!(debug "Sending SetValue fanout error failed: {}", e); + } + return; + } + }; + let ctx = context.lock(); + let fanout_result = FanoutResult { + kind, + value_nodes: ctx.value_nodes.clone(), + }; + log_network_result!(debug "SetValue Fanout: {:?}", fanout_result); - // If we have reached sufficient consensus, return done - if ctx.value_nodes.len() >= consensus_count { - return Some(()); + if let Err(e) = out_tx.send(Ok(OutboundSetValueResult { + fanout_result, + signed_value_data: ctx.value.clone(), + })) { + log_dht!(debug "Sending SetValue result failed: {}", e); } - // If we have missed more than our consensus count since our last set, return done - // This keeps the traversal from searching too many nodes when we aren't converging - // Only do this if we have gotten at least half our desired sets. - if ctx.value_nodes.len() >= ((consensus_count + 1) / 2) - && ctx.missed_since_last_set >= consensus_count - { - return Some(()); - } - None - }; + })) + .detach(); - // Call the fanout - let fanout_call = FanoutCall::new( - routing_table.clone(), - key, - key_count, - fanout, - timeout_us, - capability_fanout_node_info_filter(vec![CAP_DHT]), - call_routine, - check_done, - ); - - let kind = match fanout_call.run(init_fanout_queue).await { - // If we don't finish in the timeout (too much time passed checking for consensus) - TimeoutOr::Timeout => FanoutResultKind::Timeout, - // If we finished with or without consensus (enough nodes returning the same value) - TimeoutOr::Value(Ok(Some(()))) => FanoutResultKind::Finished, - // If we ran out of nodes before getting consensus) - TimeoutOr::Value(Ok(None)) => FanoutResultKind::Exhausted, - // Failed - TimeoutOr::Value(Err(e)) => { - // If we finished with an error, return that - log_dht!(debug "SetValue Fanout Error: {}", e); - return Err(e.into()); - } - }; - let ctx = context.lock(); - let fanout_result = FanoutResult { - kind, - value_nodes: ctx.value_nodes.clone(), - }; - log_network_result!(debug "SetValue Fanout: {:?}", fanout_result); - - Ok(OutboundSetValueResult { - fanout_result, - signed_value_data: ctx.value.clone(), - }) + Ok(out_rx) } + + pub(super) fn process_deferred_outbound_set_value_result_inner(&self, inner: &mut StorageManagerInner, + res_rx: flume::Receiver>, + key: TypedKey, subkey: ValueSubkey, last_value_data: ValueData, safety_selection: SafetySelection, ) { + let this = self.clone(); + let last_value_data = Arc::new(Mutex::new(last_value_data)); + inner.process_deferred_results( + res_rx, + Box::new( + move |result: VeilidAPIResult| -> SendPinBoxFuture { + let this = this.clone(); + let last_value_data = last_value_data.clone(); + Box::pin(async move { + let result = match result { + Ok(v) => v, + Err(e) => { + log_rtab!(debug "Deferred fanout error: {}", e); + return false; + } + }; + let is_partial = result.fanout_result.kind.is_partial(); + let lvd = last_value_data.lock().clone(); + let value_data = match this.process_outbound_set_value_result(key, subkey, lvd, safety_selection, result).await { + Ok(Some(v)) => v, + Ok(None) => { + return is_partial; + } + Err(e) => { + log_rtab!(debug "Deferred fanout error: {}", e); + return false; + } + }; + if is_partial { + // If more partial results show up, don't send an update until we're done + return true; + } + // If we processed the final result, possibly send an update + // if the sequence number changed since our first partial update + // Send with a max count as this is not attached to any watch + let changed = { + let mut lvd = last_value_data.lock(); + if lvd.seq() != value_data.seq() { + *lvd = value_data.clone(); + true + } else { + false + } + }; + if changed { + if let Err(e) = this.update_callback_value_change(key,ValueSubkeyRangeSet::single(subkey), u32::MAX, Some(value_data)).await { + log_rtab!(debug "Failed sending deferred fanout value change: {}", e); + } + } + + // Return done + false + }) + }, + ), + ); + } + + pub(super) async fn process_outbound_set_value_result(&self, key: TypedKey, subkey: ValueSubkey, last_value_data: ValueData, safety_selection: SafetySelection, result: set_value::OutboundSetValueResult) -> Result, VeilidAPIError> { + + // Regain the lock after network access + let mut inner = self.lock().await?; + + // Report on fanout result offline + let was_offline = self.check_fanout_set_offline(key, subkey, &result.fanout_result); + if was_offline { + // Failed to write, try again later + inner.add_offline_subkey_write(key, subkey, safety_selection); + } + + // Keep the list of nodes that returned a value for later reference + inner.process_fanout_results(key, core::iter::once((subkey, &result.fanout_result)), true); + + // Return the new value if it differs from what was asked to set + if result.signed_value_data.value_data() != &last_value_data { + // Record the newer value and send and update since it is different than what we just set + inner + .handle_set_local_value( + key, + subkey, + result.signed_value_data.clone(), + WatchUpdateMode::UpdateAll, + ) + .await?; + + return Ok(Some(result.signed_value_data.value_data().clone())); + } + + // If the original value was set, return None + Ok(None) + } + + + + + + + + + + + + + + + + + + + + + /// Handle a received 'Set Value' query /// Returns a None if the value passed in was set diff --git a/veilid-core/src/storage_manager/tasks/offline_subkey_writes.rs b/veilid-core/src/storage_manager/tasks/offline_subkey_writes.rs index ced02735..164cf7ae 100644 --- a/veilid-core/src/storage_manager/tasks/offline_subkey_writes.rs +++ b/veilid-core/src/storage_manager/tasks/offline_subkey_writes.rs @@ -68,24 +68,50 @@ impl StorageManager { ) .await; match osvres { - Ok(result) => { - let was_offline = - self.check_fanout_set_offline(*key, subkey, &result.fanout_result); - if !was_offline { - if let Some(update_callback) = opt_update_callback.clone() { - // Send valuechange with dead count and no subkeys - update_callback(VeilidUpdate::ValueChange(Box::new( - VeilidValueChange { - key: *key, - subkeys: ValueSubkeyRangeSet::single(subkey), - count: u32::MAX, - value: Some(result.signed_value_data.value_data().clone()), - }, - ))); + Ok(res_rx) => { + while let Ok(res) = res_rx.recv_async().await { + match res { + Ok(result) => { + let partial = result.fanout_result.kind.is_partial(); + // Skip partial results in offline subkey write mode + if partial { + continue; + } + + // Process non-partial setvalue result + let was_offline = self.check_fanout_set_offline( + *key, + subkey, + &result.fanout_result, + ); + if !was_offline { + if let Some(update_callback) = opt_update_callback.clone() { + // Send valuechange with dead count and no subkeys + update_callback(VeilidUpdate::ValueChange(Box::new( + VeilidValueChange { + key: *key, + subkeys: ValueSubkeyRangeSet::single(subkey), + count: u32::MAX, + value: Some( + result + .signed_value_data + .value_data() + .clone(), + ), + }, + ))); + } + written_subkeys.insert(subkey); + }; + fanout_results.push((subkey, result.fanout_result)); + break; + } + Err(e) => { + log_stor!(debug "failed to get offline subkey write result: {}:{} {}", key, subkey, e); + break; + } } - written_subkeys.insert(subkey); - }; - fanout_results.push((subkey, result.fanout_result)); + } } Err(e) => { log_stor!(debug "failed to write offline subkey: {}:{} {}", key, subkey, e); diff --git a/veilid-flutter/example/pubspec.lock b/veilid-flutter/example/pubspec.lock index 4eebc57b..92ebaf92 100644 --- a/veilid-flutter/example/pubspec.lock +++ b/veilid-flutter/example/pubspec.lock @@ -21,10 +21,10 @@ packages: dependency: transitive description: name: async_tools - sha256: "972f68ab663724d86260a31e363c1355ff493308441b872bf4e7b8adc67c832c" + sha256: e783ac6ed5645c86da34240389bb3a000fc5e3ae6589c6a482eb24ece7217681 url: "https://pub.dev" source: hosted - version: "0.1.0" + version: "0.1.1" boolean_selector: dependency: transitive description: @@ -85,10 +85,10 @@ packages: dependency: "direct main" description: name: cupertino_icons - sha256: d57953e10f9f8327ce64a508a355f0b1ec902193f66288e8cb5070e7c47eeb2d + sha256: ba631d1c7f7bef6b729a622b7b752645a2d076dba9976925b8f25725a30e1ee6 url: "https://pub.dev" source: hosted - version: "1.0.6" + version: "1.0.8" equatable: dependency: transitive description: @@ -187,34 +187,34 @@ packages: dependency: transitive description: name: json_annotation - sha256: b10a7b2ff83d83c777edba3c6a0f97045ddadd56c944e1a23a3fdf43a1bf4467 + sha256: "1ce844379ca14835a50d2f019a3099f419082cfdd231cd86a142af94dd5c6bb1" url: "https://pub.dev" source: hosted - version: "4.8.1" + version: "4.9.0" leak_tracker: dependency: transitive description: name: leak_tracker - sha256: "78eb209deea09858f5269f5a5b02be4049535f568c07b275096836f01ea323fa" + sha256: "7f0df31977cb2c0b88585095d168e689669a2cc9b97c309665e3386f3e9d341a" url: "https://pub.dev" source: hosted - version: "10.0.0" + version: "10.0.4" leak_tracker_flutter_testing: dependency: transitive description: name: leak_tracker_flutter_testing - sha256: b46c5e37c19120a8a01918cfaf293547f47269f7cb4b0058f21531c2465d6ef0 + sha256: "06e98f569d004c1315b991ded39924b21af84cf14cc94791b8aea337d25b57f8" url: "https://pub.dev" source: hosted - version: "2.0.1" + version: "3.0.3" leak_tracker_testing: dependency: transitive description: name: leak_tracker_testing - sha256: a597f72a664dbd293f3bfc51f9ba69816f84dcd403cdac7066cb3f6003f3ab47 + sha256: "6ba465d5d76e67ddf503e1161d1f4a6bc42306f9d66ca1e8f079a47290fb06d3" url: "https://pub.dev" source: hosted - version: "2.0.1" + version: "3.0.1" lint_hard: dependency: "direct dev" description: @@ -259,10 +259,10 @@ packages: dependency: transitive description: name: meta - sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04 + sha256: "7687075e408b093f36e6bbf6c91878cc0d4cd10f409506f7bc996f68220b9136" url: "https://pub.dev" source: hosted - version: "1.11.0" + version: "1.12.0" path: dependency: "direct main" description: @@ -275,26 +275,26 @@ packages: dependency: "direct main" description: name: path_provider - sha256: b27217933eeeba8ff24845c34003b003b2b22151de3c908d0e679e8fe1aa078b + sha256: c9e7d3a4cd1410877472158bee69963a4579f78b68c65a2b7d40d1a7a88bb161 url: "https://pub.dev" source: hosted - version: "2.1.2" + version: "2.1.3" path_provider_android: dependency: transitive description: name: path_provider_android - sha256: "477184d672607c0a3bf68fbbf601805f92ef79c82b64b4d6eb318cbca4c48668" + sha256: a248d8146ee5983446bf03ed5ea8f6533129a12b11f12057ad1b4a67a2b3b41d url: "https://pub.dev" source: hosted - version: "2.2.2" + version: "2.2.4" path_provider_foundation: dependency: transitive description: name: path_provider_foundation - sha256: "5a7999be66e000916500be4f15a3633ebceb8302719b47b9cc49ce924125350f" + sha256: f234384a3fdd67f989b4d54a5d73ca2a6c422fa55ae694381ae0f4375cd1ea16 url: "https://pub.dev" source: hosted - version: "2.3.2" + version: "2.4.0" path_provider_linux: dependency: transitive description: @@ -424,10 +424,10 @@ packages: dependency: transitive description: name: test_api - sha256: "5c2f730018264d276c20e4f1503fd1308dfbbae39ec8ee63c5236311ac06954b" + sha256: "9955ae474176f7ac8ee4e989dadfb411a58c30415bcfb648fa04b2b8a03afa7f" url: "https://pub.dev" source: hosted - version: "0.6.1" + version: "0.7.0" typed_data: dependency: transitive description: @@ -462,10 +462,10 @@ packages: dependency: transitive description: name: vm_service - sha256: b3d56ff4341b8f182b96aceb2fa20e3dcb336b9f867bc0eafc0de10f1048e957 + sha256: "3923c89304b715fb1eb6423f017651664a03bf5f4b29983627c4da791f74a4ec" url: "https://pub.dev" source: hosted - version: "13.0.0" + version: "14.2.1" webdriver: dependency: transitive description: @@ -478,10 +478,10 @@ packages: dependency: transitive description: name: win32 - sha256: "8cb58b45c47dcb42ab3651533626161d6b67a2921917d8d429791f76972b3480" + sha256: a79dbe579cb51ecd6d30b17e0cae4e0ea15e2c0e66f69ad4198f22a6789e94f4 url: "https://pub.dev" source: hosted - version: "5.3.0" + version: "5.5.1" xdg_directories: dependency: transitive description: @@ -507,5 +507,5 @@ packages: source: hosted version: "0.0.6" sdks: - dart: ">=3.3.4 <4.0.0" + dart: ">=3.4.0 <4.0.0" flutter: ">=3.19.1" diff --git a/veilid-flutter/example/pubspec.yaml b/veilid-flutter/example/pubspec.yaml index 38a9b52d..e07c673b 100644 --- a/veilid-flutter/example/pubspec.yaml +++ b/veilid-flutter/example/pubspec.yaml @@ -31,9 +31,9 @@ dependencies: # The following adds the Cupertino Icons font to your application. # Use with the CupertinoIcons class for iOS style icons. - cupertino_icons: ^1.0.6 + cupertino_icons: ^1.0.8 loggy: ^2.0.3 - path_provider: ^2.1.2 + path_provider: ^2.1.3 path: ^1.9.0 xterm: ^4.0.0 flutter_acrylic: ^1.1.3 diff --git a/veilid-flutter/pubspec.yaml b/veilid-flutter/pubspec.yaml index e7162e3c..2e325d5e 100644 --- a/veilid-flutter/pubspec.yaml +++ b/veilid-flutter/pubspec.yaml @@ -21,18 +21,18 @@ dependencies: flutter_web_plugins: sdk: flutter freezed_annotation: ^2.4.1 - json_annotation: ^4.8.1 + json_annotation: ^4.9.0 path: ^1.9.0 - path_provider: ^2.1.2 + path_provider: ^2.1.3 system_info2: ^4.0.0 system_info_plus: ^0.0.5 dev_dependencies: - build_runner: ^2.4.8 + build_runner: ^2.4.10 flutter_test: sdk: flutter - freezed: ^2.4.7 - json_serializable: ^6.7.1 + freezed: ^2.5.2 + json_serializable: ^6.8.0 lint_hard: ^4.0.0 # The following section is specific to Flutter. From 23d160a52544a2297dfc8f61c3e1365ed70223bc Mon Sep 17 00:00:00 2001 From: Christien Rioux Date: Tue, 21 May 2024 14:09:08 -0400 Subject: [PATCH 09/14] more race condition cleanup --- veilid-core/src/network_manager/native/mod.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/veilid-core/src/network_manager/native/mod.rs b/veilid-core/src/network_manager/native/mod.rs index 5b168292..0b664f8e 100644 --- a/veilid-core/src/network_manager/native/mod.rs +++ b/veilid-core/src/network_manager/native/mod.rs @@ -910,6 +910,8 @@ impl Network { pub async fn shutdown(&self) { log_net!(debug "starting low level network shutdown"); + self.inner.lock().network_started = None; + let routing_table = self.routing_table(); // Stop all tasks From 95bccaff46cd9af06466d8218de03b38ca936a44 Mon Sep 17 00:00:00 2001 From: Christien Rioux Date: Tue, 21 May 2024 14:57:34 -0400 Subject: [PATCH 10/14] address filter cleanout during restart --- veilid-core/src/network_manager/address_filter.rs | 2 ++ veilid-core/src/routing_table/tasks/bootstrap.rs | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/veilid-core/src/network_manager/address_filter.rs b/veilid-core/src/network_manager/address_filter.rs index b39dde90..f2b3c3dd 100644 --- a/veilid-core/src/network_manager/address_filter.rs +++ b/veilid-core/src/network_manager/address_filter.rs @@ -105,6 +105,8 @@ impl AddressFilter { // When the network restarts, some of the address filter can be cleared pub fn restart(&self) { let mut inner = self.inner.lock(); + inner.conn_count_by_ip4.clear(); + inner.conn_count_by_ip6_prefix.clear(); inner.dial_info_failures.clear(); } diff --git a/veilid-core/src/routing_table/tasks/bootstrap.rs b/veilid-core/src/routing_table/tasks/bootstrap.rs index 4b1ce1cd..64f5a53c 100644 --- a/veilid-core/src/routing_table/tasks/bootstrap.rs +++ b/veilid-core/src/routing_table/tasks/bootstrap.rs @@ -286,7 +286,7 @@ impl RoutingTable { { Ok(NodeContactMethod::Direct(v)) => v, Ok(v) => { - log_rtab!(warn "invalid contact method for bootstrap, ignoring peer: {:?}", v); + log_rtab!(debug "invalid contact method for bootstrap, ignoring peer: {:?}", v); // let _ = routing_table // .network_manager() // .get_node_contact_method(nr.clone()); From f9b726806c799ed50c22979f2c7154b495312b82 Mon Sep 17 00:00:00 2001 From: Christien Rioux Date: Tue, 21 May 2024 15:23:09 -0400 Subject: [PATCH 11/14] clippy fixes --- veilid-core/src/routing_table/node_ref.rs | 64 +++++++++++------------ 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/veilid-core/src/routing_table/node_ref.rs b/veilid-core/src/routing_table/node_ref.rs index 49f56e95..5abc3dd0 100644 --- a/veilid-core/src/routing_table/node_ref.rs +++ b/veilid-core/src/routing_table/node_ref.rs @@ -65,13 +65,13 @@ pub(crate) trait NodeRefBase: Sized { } } - fn is_filter_dead(&self) -> bool { - if let Some(filter) = &self.common().filter { - filter.is_dead() - } else { - false - } - } + // fn is_filter_dead(&self) -> bool { + // if let Some(filter) = &self.common().filter { + // filter.is_dead() + // } else { + // false + // } + // } fn routing_domain_set(&self) -> RoutingDomainSet { self.common() @@ -117,15 +117,15 @@ pub(crate) trait NodeRefBase: Sized { e.update_node_status(routing_domain, node_status); }); } - fn envelope_support(&self) -> Vec { - self.operate(|_rti, e| e.envelope_support()) - } + // fn envelope_support(&self) -> Vec { + // self.operate(|_rti, e| e.envelope_support()) + // } fn add_envelope_version(&self, envelope_version: u8) { self.operate_mut(|_rti, e| e.add_envelope_version(envelope_version)) } - fn set_envelope_support(&self, envelope_support: Vec) { - self.operate_mut(|_rti, e| e.set_envelope_support(envelope_support)) - } + // fn set_envelope_support(&self, envelope_support: Vec) { + // self.operate_mut(|_rti, e| e.set_envelope_support(envelope_support)) + // } fn best_envelope_version(&self) -> Option { self.operate(|_rti, e| e.best_envelope_version()) } @@ -167,25 +167,25 @@ pub(crate) trait NodeRefBase: Sized { fn set_seen_our_node_info_ts(&self, routing_domain: RoutingDomain, seen_ts: Timestamp) { self.operate_mut(|_rti, e| e.set_seen_our_node_info_ts(routing_domain, seen_ts)); } - fn network_class(&self, routing_domain: RoutingDomain) -> Option { - self.operate(|_rt, e| e.node_info(routing_domain).map(|n| n.network_class())) - } - fn outbound_protocols(&self, routing_domain: RoutingDomain) -> Option { - self.operate(|_rt, e| e.node_info(routing_domain).map(|n| n.outbound_protocols())) - } - fn address_types(&self, routing_domain: RoutingDomain) -> Option { - self.operate(|_rt, e| e.node_info(routing_domain).map(|n| n.address_types())) - } - fn node_info_outbound_filter(&self, routing_domain: RoutingDomain) -> DialInfoFilter { - let mut dif = DialInfoFilter::all(); - if let Some(outbound_protocols) = self.outbound_protocols(routing_domain) { - dif = dif.with_protocol_type_set(outbound_protocols); - } - if let Some(address_types) = self.address_types(routing_domain) { - dif = dif.with_address_type_set(address_types); - } - dif - } + // fn network_class(&self, routing_domain: RoutingDomain) -> Option { + // self.operate(|_rt, e| e.node_info(routing_domain).map(|n| n.network_class())) + // } + // fn outbound_protocols(&self, routing_domain: RoutingDomain) -> Option { + // self.operate(|_rt, e| e.node_info(routing_domain).map(|n| n.outbound_protocols())) + // } + // fn address_types(&self, routing_domain: RoutingDomain) -> Option { + // self.operate(|_rt, e| e.node_info(routing_domain).map(|n| n.address_types())) + // } + // fn node_info_outbound_filter(&self, routing_domain: RoutingDomain) -> DialInfoFilter { + // let mut dif = DialInfoFilter::all(); + // if let Some(outbound_protocols) = self.outbound_protocols(routing_domain) { + // dif = dif.with_protocol_type_set(outbound_protocols); + // } + // if let Some(address_types) = self.address_types(routing_domain) { + // dif = dif.with_address_type_set(address_types); + // } + // dif + // } fn relay(&self, routing_domain: RoutingDomain) -> EyreResult> { self.operate_mut(|rti, e| { let Some(sni) = e.signed_node_info(routing_domain) else { From 33141776f3401224777beaa05473c6cfcb1c9dd9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=CE=94=20=C7=80=20=CE=9E=20=C8=BC?= Date: Wed, 22 May 2024 22:48:17 +0100 Subject: [PATCH 12/14] Add support for no container repository (i.e. a new fork) --- .gitlab-ci.yml | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 7029f752..49e8c04d 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -35,12 +35,17 @@ build_cache: stage: prepare script: - apk update && apk add jq && apk add curl - - 'CONT_REPO_ID=$(curl "https://gitlab.com/api/graphql" --header "Content-Type: application/json" --request POST --data "{\"query\": \"query { project(fullPath: \\\"$CI_PROJECT_PATH\\\" ) { containerRepositories( name: \\\"build-cache\\\" ) { nodes { id }}}} \"}" | jq -r ".data.project.containerRepositories.nodes[0].id")' - - 'CACHE_TS=$(curl "https://gitlab.com/api/graphql" --header "Content-Type: application/json" --request POST --data "{\"query\": \"query { containerRepository(id: \\\"$CONT_REPO_ID\\\") { tags( first: 1 name: \\\"latest\\\" ) { nodes { location publishedAt }}}} \"}" | jq -r ".data.containerRepository.tags.nodes[0].publishedAt" | cut -d "+" -f1 | sed "s/T/ /g" )' - - CACHE_EPOCH=$(date -d "$CACHE_TS" +%s) + - if ! docker manifest inspect $CI_REGISTRY_IMAGE/build-cache:latest > /dev/null; then + - CACHE_EPOCH=0 + - else + - 'CONT_REPO_ID=$(curl "https://gitlab.com/api/graphql" --header "Content-Type: application/json" --request POST --data "{\"query\": \"query { project(fullPath: \\\"$CI_PROJECT_PATH\\\" ) { containerRepositories( name: \\\"build-cache\\\" ) { nodes { id }}}} \"}" | jq -r ".data.project.containerRepositories.nodes[0].id")' + - echo "CONTAINER REPO ID = $CONT_REPO_ID" + - 'CACHE_TS=$(curl "https://gitlab.com/api/graphql" --header "Content-Type: application/json" --request POST --data "{\"query\": \"query { containerRepository(id: \\\"$CONT_REPO_ID\\\") { tags( first: 1 name: \\\"latest\\\" ) { nodes { location publishedAt }}}} \"}" | jq -r ".data.containerRepository.tags.nodes[0].publishedAt" | cut -d "+" -f1 | sed "s/T/ /g" )' + - CACHE_EPOCH=$(date -d "$CACHE_TS" +%s) + - fi - EARTHLY_EPOCH=$(git log -1 --format=%ct Earthfile) - echo "CACHE EPOCH = $CACHE_EPOCH, EARTHLY EPOCH = $EARTHLY_EPOCH" - - if ! docker manifest inspect $CI_REGISTRY_IMAGE/build-cache:latest > /dev/null || [[ $EARTHLY_EPOCH -gt $CACHE_EPOCH ]] || [[ "$CI_PIPELINE_SOURCE" == "schedule" ]] || [[ "$CI_PIPELINE_SOURCE" == "web" ]] ; then + - if [[ $EARTHLY_EPOCH -gt $CACHE_EPOCH ]] || [[ "$CI_PIPELINE_SOURCE" == "schedule" ]] || [[ "$CI_PIPELINE_SOURCE" == "web" ]] ; then - *earthly_setup - earthly --use-inline-cache --save-inline-cache --strict --push -P +build-linux-cache $project_args - else From 05180252e4bcceca28af633d28ba360e83c7e5c7 Mon Sep 17 00:00:00 2001 From: Christien Rioux Date: Fri, 31 May 2024 16:20:58 -0400 Subject: [PATCH 13/14] * API Breaking Change: CryptoSystem.verify() should return bool, and reserve errors for error cases, not validation failures. * API Breaking Change: VeilidAPI.verify_signatures() returns Option now Fixes #313 --- veilid-core/src/crypto/crypto_system.rs | 2 +- veilid-core/src/crypto/envelope.rs | 7 +- veilid-core/src/crypto/mod.rs | 19 +-- veilid-core/src/crypto/none/mod.rs | 26 ++- veilid-core/src/crypto/receipt.rs | 7 +- veilid-core/src/crypto/tests/test_types.rs | 54 ++++--- veilid-core/src/crypto/vld0/mod.rs | 28 ++-- .../src/routing_table/route_spec_store/mod.rs | 13 +- .../types/signed_direct_node_info.rs | 5 +- .../types/signed_relayed_node_info.rs | 5 +- .../coders/operations/operation_get_value.rs | 7 +- .../coders/operations/operation_set_value.rs | 7 +- .../operations/operation_watch_value.rs | 7 +- .../types/signed_value_data.rs | 2 +- .../types/signed_value_descriptor.rs | 7 +- .../src/veilid_api/json_api/crypto_system.rs | 9 +- veilid-core/src/veilid_api/json_api/mod.rs | 19 ++- .../src/veilid_api/json_api/process.rs | 20 ++- .../example/integration_test/app_test.dart | 2 + .../example/integration_test/test_crypto.dart | 35 +++++ veilid-flutter/example/macos/Podfile.lock | 2 +- veilid-flutter/lib/routing_context.dart | 7 + veilid-flutter/lib/veilid.dart | 8 +- veilid-flutter/lib/veilid_crypto.dart | 2 +- veilid-flutter/lib/veilid_ffi.dart | 8 +- veilid-flutter/lib/veilid_js.dart | 8 +- veilid-flutter/rust/src/dart_ffi.rs | 4 +- veilid-python/tests/test_crypto.py | 33 ++++ veilid-python/veilid/api.py | 10 +- veilid-python/veilid/json_api.py | 35 +++-- veilid-python/veilid/operations.py | 1 + veilid-python/veilid/schema/RecvMessage.json | 148 +++++++++++++----- veilid-python/veilid/schema/Request.json | 36 +++-- veilid-wasm/src/lib.rs | 6 +- veilid-wasm/src/veilid_crypto_js.rs | 28 ++-- veilid-wasm/tests/src/veilidCrypto.test.ts | 2 +- 36 files changed, 445 insertions(+), 174 deletions(-) diff --git a/veilid-core/src/crypto/crypto_system.rs b/veilid-core/src/crypto/crypto_system.rs index f0bae8de..61e140b6 100644 --- a/veilid-core/src/crypto/crypto_system.rs +++ b/veilid-core/src/crypto/crypto_system.rs @@ -46,7 +46,7 @@ pub trait CryptoSystem { // Authentication fn sign(&self, key: &PublicKey, secret: &SecretKey, data: &[u8]) -> VeilidAPIResult; - fn verify(&self, key: &PublicKey, data: &[u8], signature: &Signature) -> VeilidAPIResult<()>; + fn verify(&self, key: &PublicKey, data: &[u8], signature: &Signature) -> VeilidAPIResult; // AEAD Encrypt/Decrypt fn aead_overhead(&self) -> usize; diff --git a/veilid-core/src/crypto/envelope.rs b/veilid-core/src/crypto/envelope.rs index dc92e317..d371e182 100644 --- a/veilid-core/src/crypto/envelope.rs +++ b/veilid-core/src/crypto/envelope.rs @@ -172,9 +172,12 @@ impl Envelope { ); // Validate signature - vcrypto + if !vcrypto .verify(&sender_id, &data[0..(data.len() - 64)], &signature) - .map_err(VeilidAPIError::internal)?; + .map_err(VeilidAPIError::internal)? + { + apibail_parse_error!("signature verification of envelope failed", signature); + } // Return envelope Ok(Self { diff --git a/veilid-core/src/crypto/mod.rs b/veilid-core/src/crypto/mod.rs index 8be06b81..3ed636e4 100644 --- a/veilid-core/src/crypto/mod.rs +++ b/veilid-core/src/crypto/mod.rs @@ -238,27 +238,28 @@ impl Crypto { } /// Signature set verification - /// Returns the set of signature cryptokinds that validate and are supported - /// If any cryptokinds are supported and do not validate, the whole operation - /// returns an error + /// Returns Some() the set of signature cryptokinds that validate and are supported + /// Returns None if any cryptokinds are supported and do not validate pub fn verify_signatures( &self, - node_ids: &[TypedKey], + public_keys: &[TypedKey], data: &[u8], typed_signatures: &[TypedSignature], - ) -> VeilidAPIResult { - let mut out = TypedKeyGroup::with_capacity(node_ids.len()); + ) -> VeilidAPIResult> { + let mut out = TypedKeyGroup::with_capacity(public_keys.len()); for sig in typed_signatures { - for nid in node_ids { + for nid in public_keys { if nid.kind == sig.kind { if let Some(vcrypto) = self.get(sig.kind) { - vcrypto.verify(&nid.value, data, &sig.value)?; + if !vcrypto.verify(&nid.value, data, &sig.value)? { + return Ok(None); + } out.add(*nid); } } } } - Ok(out) + Ok(Some(out)) } /// Signature set generation diff --git a/veilid-core/src/crypto/none/mod.rs b/veilid-core/src/crypto/none/mod.rs index 48b80106..8f4e1642 100644 --- a/veilid-core/src/crypto/none/mod.rs +++ b/veilid-core/src/crypto/none/mod.rs @@ -143,13 +143,13 @@ impl CryptoSystem for CryptoSystemNONE { // Validation fn validate_keypair(&self, dht_key: &PublicKey, dht_key_secret: &SecretKey) -> bool { let data = vec![0u8; 512]; - let sig = match self.sign(dht_key, dht_key_secret, &data) { - Ok(s) => s, - Err(_) => { - return false; - } + let Ok(sig) = self.sign(dht_key, dht_key_secret, &data) else { + return false; }; - self.verify(dht_key, &data, &sig).is_ok() + let Ok(v) = self.verify(dht_key, &data, &sig) else { + return false; + }; + v } fn validate_hash(&self, data: &[u8], dht_key: &PublicKey) -> bool { let bytes = *blake3::hash(data).as_bytes(); @@ -205,7 +205,7 @@ impl CryptoSystem for CryptoSystemNONE { dht_key: &PublicKey, data: &[u8], signature: &Signature, - ) -> VeilidAPIResult<()> { + ) -> VeilidAPIResult { let mut dig = Blake3Digest512::new(); dig.update(data); let sig = dig.finalize(); @@ -217,19 +217,13 @@ impl CryptoSystem for CryptoSystemNONE { .copy_from_slice(&do_xor_32(&in_sig_bytes[32..64], &signature.bytes[32..64])); if !is_bytes_eq_32(&verify_bytes[0..32], 0u8) { - return Err(VeilidAPIError::parse_error( - "Verification failed", - "signature 0..32 is invalid", - )); + return Ok(false); } if !is_bytes_eq_32(&do_xor_32(&verify_bytes[32..64], &dht_key.bytes), 0xFFu8) { - return Err(VeilidAPIError::parse_error( - "Verification failed", - "signature 32..64 is invalid", - )); + return Ok(false); } - Ok(()) + return Ok(true); } // AEAD Encrypt/Decrypt diff --git a/veilid-core/src/crypto/receipt.rs b/veilid-core/src/crypto/receipt.rs index 56bd4c90..a2922172 100644 --- a/veilid-core/src/crypto/receipt.rs +++ b/veilid-core/src/crypto/receipt.rs @@ -129,9 +129,12 @@ impl Receipt { ); // Validate signature - vcrypto + if !vcrypto .verify(&sender_id, &data[0..(data.len() - 64)], &signature) - .map_err(VeilidAPIError::generic)?; + .map_err(VeilidAPIError::generic)? + { + apibail_parse_error!("signature failure in receipt", signature); + } // Get nonce let nonce: Nonce = Nonce::new( diff --git a/veilid-core/src/crypto/tests/test_types.rs b/veilid-core/src/crypto/tests/test_types.rs index 6d08d34b..2636d916 100644 --- a/veilid-core/src/crypto/tests/test_types.rs +++ b/veilid-core/src/crypto/tests/test_types.rs @@ -64,49 +64,55 @@ pub async fn test_sign_and_verify(vcrypto: CryptoSystemVersion) { assert_eq!( vcrypto.verify(&dht_key, LOREM_IPSUM.as_bytes(), &a1), - Ok(()) + Ok(true) ); assert_eq!( vcrypto.verify(&dht_key2, LOREM_IPSUM.as_bytes(), &a2), - Ok(()) + Ok(true) + ); + assert_eq!( + vcrypto.verify(&dht_key, LOREM_IPSUM.as_bytes(), &a2), + Ok(false) + ); + assert_eq!( + vcrypto.verify(&dht_key2, LOREM_IPSUM.as_bytes(), &a1), + Ok(false) ); - assert!(vcrypto - .verify(&dht_key, LOREM_IPSUM.as_bytes(), &a2) - .is_err()); - assert!(vcrypto - .verify(&dht_key2, LOREM_IPSUM.as_bytes(), &a1) - .is_err()); // Try verifications that should work assert_eq!( vcrypto.verify(&dht_key, LOREM_IPSUM.as_bytes(), &dht_sig), - Ok(()) + Ok(true) ); assert_eq!( vcrypto.verify(&dht_key, LOREM_IPSUM.as_bytes(), &dht_sig_b), - Ok(()) + Ok(true) ); assert_eq!( vcrypto.verify(&dht_key2, LOREM_IPSUM.as_bytes(), &dht_sig2), - Ok(()) + Ok(true) ); assert_eq!( vcrypto.verify(&dht_key, CHEEZBURGER.as_bytes(), &dht_sig_c), - Ok(()) + Ok(true) ); // Try verifications that shouldn't work - assert!(vcrypto - .verify(&dht_key2, LOREM_IPSUM.as_bytes(), &dht_sig) - .is_err()); - assert!(vcrypto - .verify(&dht_key, LOREM_IPSUM.as_bytes(), &dht_sig2) - .is_err()); - assert!(vcrypto - .verify(&dht_key2, CHEEZBURGER.as_bytes(), &dht_sig_c) - .is_err()); - assert!(vcrypto - .verify(&dht_key, CHEEZBURGER.as_bytes(), &dht_sig) - .is_err()); + assert_eq!( + vcrypto.verify(&dht_key2, LOREM_IPSUM.as_bytes(), &dht_sig), + Ok(false) + ); + assert_eq!( + vcrypto.verify(&dht_key, LOREM_IPSUM.as_bytes(), &dht_sig2), + Ok(false) + ); + assert_eq!( + vcrypto.verify(&dht_key2, CHEEZBURGER.as_bytes(), &dht_sig_c), + Ok(false) + ); + assert_eq!( + vcrypto.verify(&dht_key, CHEEZBURGER.as_bytes(), &dht_sig), + Ok(false) + ); } pub async fn test_key_conversions(vcrypto: CryptoSystemVersion) { diff --git a/veilid-core/src/crypto/vld0/mod.rs b/veilid-core/src/crypto/vld0/mod.rs index b2b2ce1c..a8c6382d 100644 --- a/veilid-core/src/crypto/vld0/mod.rs +++ b/veilid-core/src/crypto/vld0/mod.rs @@ -161,13 +161,13 @@ impl CryptoSystem for CryptoSystemVLD0 { // Validation fn validate_keypair(&self, dht_key: &PublicKey, dht_key_secret: &SecretKey) -> bool { let data = vec![0u8; 512]; - let sig = match self.sign(dht_key, dht_key_secret, &data) { - Ok(s) => s, - Err(_) => { - return false; - } + let Ok(sig) = self.sign(dht_key, dht_key_secret, &data) else { + return false; }; - self.verify(dht_key, &data, &sig).is_ok() + let Ok(v) = self.verify(dht_key, &data, &sig) else { + return false; + }; + v } fn validate_hash(&self, data: &[u8], dht_key: &PublicKey) -> bool { let bytes = *blake3::hash(data).as_bytes(); @@ -219,7 +219,9 @@ impl CryptoSystem for CryptoSystemVLD0 { let sig = Signature::new(sig_bytes.to_bytes()); - self.verify(dht_key, data, &sig)?; + if !self.verify(dht_key, data, &sig)? { + apibail_internal!("newly created signature does not verify"); + } Ok(sig) } @@ -228,7 +230,7 @@ impl CryptoSystem for CryptoSystemVLD0 { dht_key: &PublicKey, data: &[u8], signature: &Signature, - ) -> VeilidAPIResult<()> { + ) -> VeilidAPIResult { let pk = ed::VerifyingKey::from_bytes(&dht_key.bytes) .map_err(|e| VeilidAPIError::parse_error("Public key is invalid", e))?; let sig = ed::Signature::from_bytes(&signature.bytes); @@ -236,9 +238,13 @@ impl CryptoSystem for CryptoSystemVLD0 { let mut dig: ed::Sha512 = ed::Sha512::default(); dig.update(data); - pk.verify_prehashed_strict(dig, Some(VEILID_DOMAIN_SIGN), &sig) - .map_err(|e| VeilidAPIError::parse_error("Verification failed", e))?; - Ok(()) + if pk + .verify_prehashed_strict(dig, Some(VEILID_DOMAIN_SIGN), &sig) + .is_err() + { + return Ok(false); + } + Ok(true) } // AEAD Encrypt/Decrypt diff --git a/veilid-core/src/routing_table/route_spec_store/mod.rs b/veilid-core/src/routing_table/route_spec_store/mod.rs index 82270aef..e51421ef 100644 --- a/veilid-core/src/routing_table/route_spec_store/mod.rs +++ b/veilid-core/src/routing_table/route_spec_store/mod.rs @@ -694,9 +694,16 @@ impl RouteSpecStore { } } else { // Verify a signature for a hop node along the route - if let Err(e) = vcrypto.verify(hop_public_key, data, &signatures[hop_n]) { - log_rpc!(debug "failed to verify signature for hop {} at {} on private route {}: {}", hop_n, hop_public_key, public_key, e); - return None; + match vcrypto.verify(hop_public_key, data, &signatures[hop_n]) { + Ok(true) => {} + Ok(false) => { + log_rpc!(debug "invalid signature for hop {} at {} on private route {}", hop_n, hop_public_key, public_key); + return None; + } + Err(e) => { + log_rpc!(debug "errir verifying signature for hop {} at {} on private route {}: {}", hop_n, hop_public_key, public_key, e); + return None; + } } } } diff --git a/veilid-core/src/routing_table/types/signed_direct_node_info.rs b/veilid-core/src/routing_table/types/signed_direct_node_info.rs index ddc8198b..1cb69b1f 100644 --- a/veilid-core/src/routing_table/types/signed_direct_node_info.rs +++ b/veilid-core/src/routing_table/types/signed_direct_node_info.rs @@ -27,8 +27,11 @@ impl SignedDirectNodeInfo { let node_info_bytes = Self::make_signature_bytes(&self.node_info, self.timestamp)?; // Verify the signatures that we can - let validated_node_ids = + let opt_validated_node_ids = crypto.verify_signatures(node_ids, &node_info_bytes, &self.signatures)?; + let Some(validated_node_ids) = opt_validated_node_ids else { + apibail_generic!("verification error in direct node info"); + }; if validated_node_ids.is_empty() { apibail_generic!("no valid node ids in direct node info"); } diff --git a/veilid-core/src/routing_table/types/signed_relayed_node_info.rs b/veilid-core/src/routing_table/types/signed_relayed_node_info.rs index bed33ba0..13861bd4 100644 --- a/veilid-core/src/routing_table/types/signed_relayed_node_info.rs +++ b/veilid-core/src/routing_table/types/signed_relayed_node_info.rs @@ -53,8 +53,11 @@ impl SignedRelayedNodeInfo { &self.relay_info, self.timestamp, )?; - let validated_node_ids = + let opt_validated_node_ids = crypto.verify_signatures(node_ids, &node_info_bytes, &self.signatures)?; + let Some(validated_node_ids) = opt_validated_node_ids else { + apibail_generic!("verification error in relayed node info"); + }; if validated_node_ids.is_empty() { apibail_generic!("no valid node ids in relayed node info"); } diff --git a/veilid-core/src/rpc_processor/coders/operations/operation_get_value.rs b/veilid-core/src/rpc_processor/coders/operations/operation_get_value.rs index f56a3a42..1652b6fb 100644 --- a/veilid-core/src/rpc_processor/coders/operations/operation_get_value.rs +++ b/veilid-core/src/rpc_processor/coders/operations/operation_get_value.rs @@ -142,13 +142,16 @@ impl RPCOperationGetValueA { }; // And the signed value data - value + if !value .validate( descriptor.owner(), get_value_context.subkey, get_value_context.vcrypto.clone(), ) - .map_err(RPCError::protocol)?; + .map_err(RPCError::protocol)? + { + return Err(RPCError::protocol("signed value data did not validate")); + } } PeerInfo::validate_vec(&mut self.peers, validate_context.crypto.clone()); diff --git a/veilid-core/src/rpc_processor/coders/operations/operation_set_value.rs b/veilid-core/src/rpc_processor/coders/operations/operation_set_value.rs index 45d00643..8cd58636 100644 --- a/veilid-core/src/rpc_processor/coders/operations/operation_set_value.rs +++ b/veilid-core/src/rpc_processor/coders/operations/operation_set_value.rs @@ -149,13 +149,16 @@ impl RPCOperationSetValueA { if let Some(value) = &self.value { // And the signed value data - value + if !value .validate( set_value_context.descriptor.owner(), set_value_context.subkey, set_value_context.vcrypto.clone(), ) - .map_err(RPCError::protocol)?; + .map_err(RPCError::protocol)? + { + return Err(RPCError::protocol("signed value data did not validate")); + } } PeerInfo::validate_vec(&mut self.peers, validate_context.crypto.clone()); diff --git a/veilid-core/src/rpc_processor/coders/operations/operation_watch_value.rs b/veilid-core/src/rpc_processor/coders/operations/operation_watch_value.rs index 3eb12eed..261d8e36 100644 --- a/veilid-core/src/rpc_processor/coders/operations/operation_watch_value.rs +++ b/veilid-core/src/rpc_processor/coders/operations/operation_watch_value.rs @@ -88,9 +88,12 @@ impl RPCOperationWatchValueQ { self.count, self.watch_id, ); - vcrypto + if !vcrypto .verify(&self.watcher, &sig_data, &self.signature) - .map_err(RPCError::protocol)?; + .map_err(RPCError::protocol)? + { + return Err(RPCError::protocol("failed to validate watcher signature")); + } // Count is zero means cancelling, so there should always be a watch id in this case if self.count == 0 && self.watch_id.is_none() { diff --git a/veilid-core/src/storage_manager/types/signed_value_data.rs b/veilid-core/src/storage_manager/types/signed_value_data.rs index ea1e8ec6..e89354f0 100644 --- a/veilid-core/src/storage_manager/types/signed_value_data.rs +++ b/veilid-core/src/storage_manager/types/signed_value_data.rs @@ -20,7 +20,7 @@ impl SignedValueData { owner: &PublicKey, subkey: ValueSubkey, vcrypto: CryptoSystemVersion, - ) -> VeilidAPIResult<()> { + ) -> VeilidAPIResult { let node_info_bytes = Self::make_signature_bytes(&self.value_data, owner, subkey)?; // validate signature vcrypto.verify(self.value_data.writer(), &node_info_bytes, &self.signature) diff --git a/veilid-core/src/storage_manager/types/signed_value_descriptor.rs b/veilid-core/src/storage_manager/types/signed_value_descriptor.rs index 0a10a383..10832c38 100644 --- a/veilid-core/src/storage_manager/types/signed_value_descriptor.rs +++ b/veilid-core/src/storage_manager/types/signed_value_descriptor.rs @@ -19,7 +19,12 @@ impl SignedValueDescriptor { pub fn validate(&self, vcrypto: CryptoSystemVersion) -> VeilidAPIResult<()> { // validate signature - vcrypto.verify(&self.owner, &self.schema_data, &self.signature)?; + if !vcrypto.verify(&self.owner, &self.schema_data, &self.signature)? { + apibail_parse_error!( + "failed to validate signature of signed value descriptor", + self.signature + ); + } // validate schema DHTSchema::try_from(self.schema_data.as_slice())?; Ok(()) diff --git a/veilid-core/src/veilid_api/json_api/crypto_system.rs b/veilid-core/src/veilid_api/json_api/crypto_system.rs index 6bb960a9..a4261f4c 100644 --- a/veilid-core/src/veilid_api/json_api/crypto_system.rs +++ b/veilid-core/src/veilid_api/json_api/crypto_system.rs @@ -18,6 +18,7 @@ pub struct CryptoSystemResponse { #[serde(tag = "cs_op")] pub enum CryptoSystemRequestOp { Release, + Kind, CachedDh { #[schemars(with = "String")] key: PublicKey, @@ -108,7 +109,7 @@ pub enum CryptoSystemRequestOp { #[schemars(with = "String")] data: Vec, #[schemars(with = "String")] - secret: Signature, + signature: Signature, }, AeadOverhead, DecryptAead { @@ -150,6 +151,10 @@ pub enum CryptoSystemRequestOp { pub enum CryptoSystemResponseOp { InvalidId, Release, + Kind { + #[schemars(with = "String")] + value: CryptoKind, + }, CachedDh { #[serde(flatten)] #[schemars(with = "ApiResult")] @@ -219,7 +224,7 @@ pub enum CryptoSystemResponseOp { }, Verify { #[serde(flatten)] - result: ApiResult<()>, + result: ApiResult, }, AeadOverhead { value: u32, diff --git a/veilid-core/src/veilid_api/json_api/mod.rs b/veilid-core/src/veilid_api/json_api/mod.rs index 507fb218..7b7a2d38 100644 --- a/veilid-core/src/veilid_api/json_api/mod.rs +++ b/veilid-core/src/veilid_api/json_api/mod.rs @@ -201,8 +201,8 @@ pub enum ResponseOp { CryptoSystem(CryptoSystemResponse), VerifySignatures { #[serde(flatten)] - #[schemars(with = "ApiResult>")] - result: ApiResultWithVecString, + #[schemars(with = "ApiResult>>")] + result: ApiResultWithOptVecString>, }, GenerateSignatures { #[serde(flatten)] @@ -308,6 +308,21 @@ where }, } +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[serde(untagged)] +pub enum ApiResultWithOptVecString +where + T: Clone + fmt::Debug, +{ + Ok { + #[schemars(with = "Option>")] + value: T, + }, + Err { + error: VeilidAPIError, + }, +} + pub fn emit_schemas(out: &mut HashMap) { let schema_request = schema_for!(Request); let schema_recv_message = schema_for!(RecvMessage); diff --git a/veilid-core/src/veilid_api/json_api/process.rs b/veilid-core/src/veilid_api/json_api/process.rs index d342f959..c4d6d338 100644 --- a/veilid-core/src/veilid_api/json_api/process.rs +++ b/veilid-core/src/veilid_api/json_api/process.rs @@ -28,6 +28,15 @@ pub fn to_json_api_result_with_vec_string( } } +pub fn to_json_api_result_with_opt_vec_string( + r: VeilidAPIResult, +) -> json_api::ApiResultWithOptVecString { + match r { + Err(e) => json_api::ApiResultWithOptVecString::Err { error: e }, + Ok(v) => json_api::ApiResultWithOptVecString::Ok { value: v }, + } +} + pub fn to_json_api_result_with_vec_u8(r: VeilidAPIResult>) -> json_api::ApiResultWithVecU8 { match r { Err(e) => json_api::ApiResultWithVecU8::Err { error: e }, @@ -462,6 +471,7 @@ impl JsonRequestProcessor { self.release_crypto_system(csr.cs_id); CryptoSystemResponseOp::Release {} } + CryptoSystemRequestOp::Kind => CryptoSystemResponseOp::Kind { value: csv.kind() }, CryptoSystemRequestOp::CachedDh { key, secret } => CryptoSystemResponseOp::CachedDh { result: to_json_api_result_with_string(csv.cached_dh(&key, &secret)), }, @@ -532,8 +542,12 @@ impl JsonRequestProcessor { CryptoSystemRequestOp::Sign { key, secret, data } => CryptoSystemResponseOp::Sign { result: to_json_api_result_with_string(csv.sign(&key, &secret, &data)), }, - CryptoSystemRequestOp::Verify { key, data, secret } => CryptoSystemResponseOp::Verify { - result: to_json_api_result(csv.verify(&key, &data, &secret)), + CryptoSystemRequestOp::Verify { + key, + data, + signature, + } => CryptoSystemResponseOp::Verify { + result: to_json_api_result(csv.verify(&key, &data, &signature)), }, CryptoSystemRequestOp::AeadOverhead => CryptoSystemResponseOp::AeadOverhead { value: csv.aead_overhead() as u32, @@ -766,7 +780,7 @@ impl JsonRequestProcessor { } }; ResponseOp::VerifySignatures { - result: to_json_api_result_with_vec_string(crypto.verify_signatures( + result: to_json_api_result_with_opt_vec_string(crypto.verify_signatures( &node_ids, &data, &signatures, diff --git a/veilid-flutter/example/integration_test/app_test.dart b/veilid-flutter/example/integration_test/app_test.dart index 22b23224..eb391933 100644 --- a/veilid-flutter/example/integration_test/app_test.dart +++ b/veilid-flutter/example/integration_test/app_test.dart @@ -30,6 +30,8 @@ void main() { test('get cryptosystem', testGetCryptoSystem); test('get cryptosystem invalid', testGetCryptoSystemInvalid); test('hash and verify password', testHashAndVerifyPassword); + test('sign and verify signature', testSignAndVerifySignature); + test('sign and verify signatures', testSignAndVerifySignatures); }); group('Table DB Tests', () { diff --git a/veilid-flutter/example/integration_test/test_crypto.dart b/veilid-flutter/example/integration_test/test_crypto.dart index fd159326..04bd1554 100644 --- a/veilid-flutter/example/integration_test/test_crypto.dart +++ b/veilid-flutter/example/integration_test/test_crypto.dart @@ -32,6 +32,41 @@ Future testHashAndVerifyPassword() async { expect(await cs.verifyPassword(utf8.encode('abc1235'), phash), isFalse); } +Future testSignAndVerifySignature() async { + final cs = await Veilid.instance.bestCryptoSystem(); + final kp1 = await cs.generateKeyPair(); + final kp2 = await cs.generateKeyPair(); + + // Signature match + final sig = await cs.sign(kp1.key, kp1.secret, utf8.encode('abc123')); + expect(await cs.verify(kp1.key, utf8.encode('abc123'), sig), isTrue); + + // Signature mismatch + final sig2 = await cs.sign(kp1.key, kp1.secret, utf8.encode('abc1234')); + expect(await cs.verify(kp1.key, utf8.encode('abc1234'), sig2), isTrue); + expect(await cs.verify(kp1.key, utf8.encode('abc12345'), sig2), isFalse); + expect(await cs.verify(kp2.key, utf8.encode('abc1234'), sig2), isFalse); +} + +Future testSignAndVerifySignatures() async { + final cs = await Veilid.instance.bestCryptoSystem(); + final kind = cs.kind(); + final kp1 = await cs.generateKeyPair(); + + // Signature match + final sigs = await Veilid.instance.generateSignatures( + utf8.encode('abc123'), [TypedKeyPair.fromKeyPair(kind, kp1)]); + expect( + await Veilid.instance.verifySignatures( + [TypedKey(kind: kind, value: kp1.key)], utf8.encode('abc123'), sigs), + equals([TypedKey(kind: kind, value: kp1.key)])); + // Signature mismatch + expect( + await Veilid.instance.verifySignatures( + [TypedKey(kind: kind, value: kp1.key)], utf8.encode('abc1234'), sigs), + isNull); +} + Future testGenerateSharedSecret() async { final cs = await Veilid.instance.bestCryptoSystem(); diff --git a/veilid-flutter/example/macos/Podfile.lock b/veilid-flutter/example/macos/Podfile.lock index 88db902f..cd7fcd0a 100644 --- a/veilid-flutter/example/macos/Podfile.lock +++ b/veilid-flutter/example/macos/Podfile.lock @@ -27,7 +27,7 @@ EXTERNAL SOURCES: SPEC CHECKSUMS: FlutterMacOS: 8f6f14fa908a6fb3fba0cd85dbd81ec4b251fb24 macos_window_utils: 933f91f64805e2eb91a5bd057cf97cd097276663 - path_provider_foundation: 3784922295ac71e43754bd15e0653ccfd36a147c + path_provider_foundation: 2b6b4c569c0fb62ec74538f866245ac84301af46 veilid: a54f57b7bcf0e4e072fe99272d76ca126b2026d0 PODFILE CHECKSUM: 73d2f470b1d889e27fcfda1d6e6efec66f98af3f diff --git a/veilid-flutter/lib/routing_context.dart b/veilid-flutter/lib/routing_context.dart index c097dbbe..3c789f26 100644 --- a/veilid-flutter/lib/routing_context.dart +++ b/veilid-flutter/lib/routing_context.dart @@ -115,6 +115,13 @@ extension DHTRecordDescriptorExt on DHTRecordDescriptor { return KeyPair(key: owner, secret: ownerSecret!); } + TypedKey? ownerTypedSecret() { + if (ownerSecret == null) { + return null; + } + return TypedKey(kind: key.kind, value: ownerSecret!); + } + TypedKeyPair? ownerTypedKeyPair() { if (ownerSecret == null) { return null; diff --git a/veilid-flutter/lib/veilid.dart b/veilid-flutter/lib/veilid.dart index fbcd6f54..058899a9 100644 --- a/veilid-flutter/lib/veilid.dart +++ b/veilid-flutter/lib/veilid.dart @@ -44,6 +44,10 @@ Object? veilidApiToEncodable(Object? value) { List Function(dynamic) jsonListConstructor( T Function(dynamic) jsonConstructor) => (dynamic j) => (j as List).map(jsonConstructor).toList(); +List? Function(dynamic) optJsonListConstructor( + T Function(dynamic) jsonConstructor) => + (dynamic j) => + j == null ? null : (j as List).map(jsonConstructor).toList(); ////////////////////////////////////// /// VeilidVersion @@ -152,8 +156,8 @@ abstract class Veilid { List validCryptoKinds(); Future getCryptoSystem(CryptoKind kind); Future bestCryptoSystem(); - Future> verifySignatures( - List nodeIds, Uint8List data, List signatures); + Future?> verifySignatures(List publicKeys, + Uint8List data, List signatures); Future> generateSignatures( Uint8List data, List keyPairs); Future generateKeyPair(CryptoKind kind); diff --git a/veilid-flutter/lib/veilid_crypto.dart b/veilid-flutter/lib/veilid_crypto.dart index 186d8e01..4dbe4e87 100644 --- a/veilid-flutter/lib/veilid_crypto.dart +++ b/veilid-flutter/lib/veilid_crypto.dart @@ -214,7 +214,7 @@ abstract class VeilidCryptoSystem { Future signWithKeyPair(KeyPair keyPair, Uint8List data) => sign(keyPair.key, keyPair.secret, data); - Future verify(PublicKey key, Uint8List data, Signature signature); + Future verify(PublicKey key, Uint8List data, Signature signature); Future aeadOverhead(); Future decryptAead(Uint8List body, Nonce nonce, SharedSecret sharedSecret, Uint8List? associatedData); diff --git a/veilid-flutter/lib/veilid_ffi.dart b/veilid-flutter/lib/veilid_ffi.dart index aa289b9d..b6b1effd 100644 --- a/veilid-flutter/lib/veilid_ffi.dart +++ b/veilid-flutter/lib/veilid_ffi.dart @@ -1154,7 +1154,7 @@ class VeilidCryptoSystemFFI extends VeilidCryptoSystem { } @override - Future verify( + Future verify( PublicKey key, Uint8List data, Signature signature) async { final nativeKey = jsonEncode(key).toNativeUtf8(); final nativeEncodedData = base64UrlNoPadEncode(data).toNativeUtf8(); @@ -1164,7 +1164,7 @@ class VeilidCryptoSystemFFI extends VeilidCryptoSystem { final sendPort = recvPort.sendPort; _ffi._cryptoVerify(sendPort.nativePort, _kind, nativeKey, nativeEncodedData, nativeSignature); - return processFutureVoid(recvPort.first); + return processFuturePlain(recvPort.first); } @override @@ -1742,7 +1742,7 @@ class VeilidFFI extends Veilid { VeilidCryptoSystemFFI._(this, _bestCryptoKind()); @override - Future> verifySignatures(List nodeIds, + Future?> verifySignatures(List nodeIds, Uint8List data, List signatures) async { final nativeNodeIds = jsonEncode(nodeIds).toNativeUtf8(); final nativeData = base64UrlNoPadEncode(data).toNativeUtf8(); @@ -1752,7 +1752,7 @@ class VeilidFFI extends Veilid { final sendPort = recvPort.sendPort; _verifySignatures( sendPort.nativePort, nativeNodeIds, nativeData, nativeSignatures); - return processFutureJson( + return processFutureOptJson( jsonListConstructor(TypedKey.fromJson), recvPort.first); } diff --git a/veilid-flutter/lib/veilid_js.dart b/veilid-flutter/lib/veilid_js.dart index 12660ae7..6a6c9a6b 100644 --- a/veilid-flutter/lib/veilid_js.dart +++ b/veilid-flutter/lib/veilid_js.dart @@ -359,7 +359,7 @@ class VeilidCryptoSystemJS extends VeilidCryptoSystem { ])))); @override - Future verify(PublicKey key, Uint8List data, Signature signature) => + Future verify(PublicKey key, Uint8List data, Signature signature) => _wrapApiPromise(js_util.callMethod(wasm, 'crypto_verify', [ _kind, jsonEncode(key), @@ -655,10 +655,10 @@ class VeilidJS extends Veilid { this, js_util.callMethod(wasm, 'best_crypto_kind', [])); @override - Future> verifySignatures(List nodeIds, + Future?> verifySignatures(List nodeIds, Uint8List data, List signatures) async => - jsonListConstructor(TypedKey.fromJson)(jsonDecode(await _wrapApiPromise( - js_util.callMethod(wasm, 'verify_signatures', [ + optJsonListConstructor(TypedKey.fromJson)(jsonDecode( + await _wrapApiPromise(js_util.callMethod(wasm, 'verify_signatures', [ jsonEncode(nodeIds), base64UrlNoPadEncode(data), jsonEncode(signatures) diff --git a/veilid-flutter/rust/src/dart_ffi.rs b/veilid-flutter/rust/src/dart_ffi.rs index d1a00caf..262ae3be 100644 --- a/veilid-flutter/rust/src/dart_ffi.rs +++ b/veilid-flutter/rust/src/dart_ffi.rs @@ -1525,8 +1525,8 @@ pub extern "C" fn crypto_verify( let csv = crypto.get(kind).ok_or_else(|| { veilid_core::VeilidAPIError::invalid_argument("crypto_verify", "kind", kind.to_string()) })?; - csv.verify(&key, &data, &signature)?; - APIRESULT_VOID + let out = csv.verify(&key, &data, &signature)?; + APIResult::Ok(out) }); } diff --git a/veilid-python/tests/test_crypto.py b/veilid-python/tests/test_crypto.py index 94754328..606a9e2c 100644 --- a/veilid-python/tests/test_crypto.py +++ b/veilid-python/tests/test_crypto.py @@ -45,6 +45,39 @@ async def test_hash_and_verify_password(api_connection: veilid.VeilidAPI): phash2 = await cs.hash_password(b"abc1234", salt) assert not await cs.verify_password(b"abc12345", phash) +@pytest.mark.asyncio +async def test_sign_and_verify_signature(api_connection: veilid.VeilidAPI): + cs = await api_connection.best_crypto_system() + async with cs: + kp1 = await cs.generate_key_pair() + kp2 = await cs.generate_key_pair() + + # Signature match + sig = await cs.sign(kp1.key(), kp1.secret(), b"abc123") + assert await cs.verify(kp1.key(), b"abc123", sig) + + # Signature mismatch + sig2 = await cs.sign(kp1.key(), kp1.secret(), b"abc1234") + assert await cs.verify(kp1.key(), b"abc1234", sig2) + assert not await cs.verify(kp1.key(), b"abc12345", sig2) + assert not await cs.verify(kp2.key(), b"abc1234", sig2) + + +@pytest.mark.asyncio +async def test_sign_and_verify_signatures(api_connection: veilid.VeilidAPI): + cs = await api_connection.best_crypto_system() + async with cs: + kind = await cs.kind() + kp1 = await cs.generate_key_pair() + + # Signature match + sigs = await api_connection.generate_signatures(b"abc123", [veilid.TypedKeyPair.from_value(kind, kp1)]) + keys = [veilid.TypedKey.from_value(kind,kp1.key())] + assert (await api_connection.verify_signatures(keys, b"abc123", sigs)) == keys + + # Signature mismatch + assert (await api_connection.verify_signatures([veilid.TypedKey.from_value(kind,kp1.key())], b"abc1234", sigs)) is None + @pytest.mark.asyncio async def test_generate_shared_secret(api_connection: veilid.VeilidAPI): diff --git a/veilid-python/veilid/api.py b/veilid-python/veilid/api.py index 3d2e29c0..d7c25be9 100644 --- a/veilid-python/veilid/api.py +++ b/veilid-python/veilid/api.py @@ -115,7 +115,7 @@ class TableDbTransaction(ABC): async def __aexit__(self, *excinfo): if not self.is_done(): await self.rollback() - + @abstractmethod def is_done(self) -> bool: pass @@ -186,6 +186,10 @@ class CryptoSystem(ABC): if not self.is_done(): await self.release() + @abstractmethod + def kind(self) -> types.CryptoKind: + pass + @abstractmethod def is_done(self) -> bool: pass @@ -267,7 +271,7 @@ class CryptoSystem(ABC): pass @abstractmethod - async def verify(self, key: types.PublicKey, data: bytes, signature: types.Signature): + async def verify(self, key: types.PublicKey, data: bytes, signature: types.Signature) -> bool: pass @abstractmethod @@ -384,7 +388,7 @@ class VeilidAPI(ABC): node_ids: list[types.TypedKey], data: bytes, signatures: list[types.TypedSignature], - ) -> list[types.TypedKey]: + ) -> Optional[list[types.TypedKey]]: pass @abstractmethod diff --git a/veilid-python/veilid/json_api.py b/veilid-python/veilid/json_api.py index 5745327e..f79d18e7 100644 --- a/veilid-python/veilid/json_api.py +++ b/veilid-python/veilid/json_api.py @@ -386,18 +386,21 @@ class _JsonVeilidAPI(VeilidAPI): async def verify_signatures( self, node_ids: list[TypedKey], data: bytes, signatures: list[TypedSignature] - ) -> list[TypedKey]: + ) -> Optional[list[TypedKey]]: + out = raise_api_result( + await self.send_ndjson_request( + Operation.VERIFY_SIGNATURES, + node_ids=node_ids, + data=data, + signatures=signatures, + ) + ) + if out is None: + return out return list( map( lambda x: TypedKey(x), - raise_api_result( - await self.send_ndjson_request( - Operation.VERIFY_SIGNATURES, - node_ids=node_ids, - data=data, - signatures=signatures, - ) - ), + out ) ) @@ -938,6 +941,18 @@ class _JsonCryptoSystem(CryptoSystem): # complain raise AssertionError("Should have released crypto system before dropping object") + + async def kind(self) -> CryptoKind: + return CryptoKind( + raise_api_result( + await self.api.send_ndjson_request( + Operation.CRYPTO_SYSTEM, + validate=validate_cs_op, + cs_id=self.cs_id, + cs_op=CryptoSystemOperation.KIND, + ) + ) + ) def is_done(self) -> bool: return self.done @@ -1160,7 +1175,7 @@ class _JsonCryptoSystem(CryptoSystem): ) async def verify(self, key: PublicKey, data: bytes, signature: Signature): - raise_api_result( + return raise_api_result( await self.api.send_ndjson_request( Operation.CRYPTO_SYSTEM, validate=validate_cs_op, diff --git a/veilid-python/veilid/operations.py b/veilid-python/veilid/operations.py index a41fb96b..24d26121 100644 --- a/veilid-python/veilid/operations.py +++ b/veilid-python/veilid/operations.py @@ -73,6 +73,7 @@ class TableDbTransactionOperation(StrEnum): class CryptoSystemOperation(StrEnum): INVALID_ID = "InvalidId" RELEASE = "Release" + KIND = "Kind" CACHED_DH = "CachedDh" COMPUTE_DH = "ComputeDh" GENERATE_SHARED_SECRET = "GenerateSharedSecret" diff --git a/veilid-python/veilid/schema/RecvMessage.json b/veilid-python/veilid/schema/RecvMessage.json index 21192210..ba59e0e7 100644 --- a/veilid-python/veilid/schema/RecvMessage.json +++ b/veilid-python/veilid/schema/RecvMessage.json @@ -1599,6 +1599,24 @@ } } }, + { + "type": "object", + "required": [ + "cs_op", + "value" + ], + "properties": { + "cs_op": { + "type": "string", + "enum": [ + "Kind" + ] + }, + "value": { + "type": "string" + } + } + }, { "type": "object", "anyOf": [ @@ -2039,7 +2057,7 @@ ], "properties": { "value": { - "type": "null" + "type": "boolean" } } }, @@ -2205,12 +2223,12 @@ "anyOf": [ { "type": "object", - "required": [ - "value" - ], "properties": { "value": { - "type": "array", + "type": [ + "array", + "null" + ], "items": { "type": "string" } @@ -2450,7 +2468,7 @@ ], "properties": { "id": { - "description": "Operation Id (pairs with Request, or empty if unidirectional)", + "description": "Operation Id (pairs with Request, or empty if unidirectional).", "default": 0, "type": "integer", "format": "uint32", @@ -2465,10 +2483,11 @@ } }, { + "description": "An update from the veilid-core to the host application describing a change to the internal state of the Veilid node.", "type": "object", "oneOf": [ { - "description": "A VeilidCore log message with optional backtrace", + "description": "A VeilidCore log message with optional backtrace.", "type": "object", "required": [ "kind", @@ -2497,7 +2516,7 @@ } }, { - "description": "Direct statement blob passed to hosting application for processing", + "description": "Direct statement blob passed to hosting application for processing.", "type": "object", "required": [ "kind", @@ -2528,7 +2547,7 @@ } }, { - "description": "Direct question blob passed to hosting application for processing to send an eventual AppReply", + "description": "Direct question blob passed to hosting application for processing to send an eventual AppReply.", "type": "object", "required": [ "call_id", @@ -2563,6 +2582,7 @@ } }, { + "description": "Describe the attachment state of the Veilid node", "type": "object", "required": [ "kind", @@ -2578,17 +2598,25 @@ ] }, "local_network_ready": { + "description": "If attached and there are enough eachable nodes in the routing table to perform all the actions of the LocalNetwork RoutingDomain.", "type": "boolean" }, "public_internet_ready": { + "description": "If attached and there are enough eachable nodes in the routing table to perform all the actions of the PublicInternet RoutingDomain, including things like private/safety route allocation and DHT operations.", "type": "boolean" }, "state": { - "$ref": "#/definitions/AttachmentState" + "description": "The overall quality of the routing table if attached, or the current state the attachment state machine.", + "allOf": [ + { + "$ref": "#/definitions/AttachmentState" + } + ] } } }, { + "description": "Describe the current network state of the Veilid node", "type": "object", "required": [ "bps_down", @@ -2599,9 +2627,11 @@ ], "properties": { "bps_down": { + "description": "The total number of bytes per second used by Veilid currently in the download direction.", "type": "string" }, "bps_up": { + "description": "The total number of bytes per second used by Veilid currently in the upload direction.", "type": "string" }, "kind": { @@ -2611,17 +2641,20 @@ ] }, "peers": { + "description": "The list of most recently accessed peers. This is not an active connection table, nor is representative of the entire routing table.", "type": "array", "items": { "$ref": "#/definitions/PeerTableData" } }, "started": { + "description": "If the network has been started or not.", "type": "boolean" } } }, { + "description": "Describe changes to the Veilid node configuration Currently this is only ever emitted once, however we reserve the right to add the ability to change the configuration or have it changed by the Veilid node itself during runtime.", "type": "object", "required": [ "config", @@ -2629,7 +2662,12 @@ ], "properties": { "config": { - "$ref": "#/definitions/VeilidConfigInner" + "description": "If the Veilid node configuration has changed the full new config will be here.", + "allOf": [ + { + "$ref": "#/definitions/VeilidConfigInner" + } + ] }, "kind": { "type": "string", @@ -2640,6 +2678,7 @@ } }, { + "description": "Describe a private route change that has happened", "type": "object", "required": [ "dead_remote_routes", @@ -2648,12 +2687,14 @@ ], "properties": { "dead_remote_routes": { + "description": "If a private route that was imported has died, it is listed here.", "type": "array", "items": { "type": "string" } }, "dead_routes": { + "description": "If a private route that was allocated has died, it is listed here.", "type": "array", "items": { "type": "string" @@ -2668,6 +2709,7 @@ } }, { + "description": "Describe when DHT records have subkey values changed", "type": "object", "required": [ "count", @@ -2677,11 +2719,13 @@ ], "properties": { "count": { + "description": "The count remaining on the watch that triggered this value change If there is no watch and this is received, it will be set to u32::MAX If this value is zero, any watch present on the value has died.", "type": "integer", "format": "uint32", "minimum": 0.0 }, "key": { + "description": "The DHT Record key that changed", "type": "string" }, "kind": { @@ -2691,6 +2735,7 @@ ] }, "subkeys": { + "description": "The portion of the DHT Record's subkeys that have changed If the subkey range is empty, any watch present on the value has died.", "type": "array", "items": { "type": "array", @@ -2711,6 +2756,7 @@ } }, "value": { + "description": "The (optional) value data for the first subkey in the subkeys range If 'subkeys' is not a single value, other values than the first value must be retrieved with RoutingContext::get_dht_value().", "anyOf": [ { "$ref": "#/definitions/ValueData" @@ -2752,7 +2798,7 @@ ], "definitions": { "AttachmentState": { - "description": "Attachment abstraction for network 'signal strength'", + "description": "Attachment abstraction for network 'signal strength'.", "type": "string", "enum": [ "Detached", @@ -2949,7 +2995,7 @@ } }, "FourCC": { - "description": "FOURCC code", + "description": "FOURCC code.", "type": "array", "items": { "type": "integer", @@ -3023,6 +3069,7 @@ } }, "PeerTableData": { + "description": "Describe a recently accessed peer", "type": "object", "required": [ "node_ids", @@ -3031,16 +3078,23 @@ ], "properties": { "node_ids": { + "description": "The node ids used by this peer", "type": "array", "items": { "type": "string" } }, "peer_address": { + "description": "The peer's human readable address.", "type": "string" }, "peer_stats": { - "$ref": "#/definitions/PeerStats" + "description": "Statistics we have collected on this peer.", + "allOf": [ + { + "$ref": "#/definitions/PeerStats" + } + ] } } }, @@ -3100,10 +3154,10 @@ } }, "SafetySelection": { - "description": "The choice of safety route to include in compiled routes", + "description": "The choice of safety route to include in compiled routes.", "oneOf": [ { - "description": "Don't use a safety route, only specify the sequencing preference", + "description": "Don't use a safety route, only specify the sequencing preference.", "type": "object", "required": [ "Unsafe" @@ -3116,7 +3170,7 @@ "additionalProperties": false }, { - "description": "Use a safety route and parameters specified by a SafetySpec", + "description": "Use a safety route and parameters specified by a SafetySpec.", "type": "object", "required": [ "Safe" @@ -3131,7 +3185,7 @@ ] }, "SafetySpec": { - "description": "Options for safety routes (sender privacy)", + "description": "Options for safety routes (sender privacy).", "type": "object", "required": [ "hop_count", @@ -3140,20 +3194,20 @@ ], "properties": { "hop_count": { - "description": "must be greater than 0", + "description": "Must be greater than 0.", "type": "integer", "format": "uint", "minimum": 0.0 }, "preferred_route": { - "description": "preferred safety route set id if it still exists", + "description": "Preferred safety route set id if it still exists.", "type": [ "string", "null" ] }, "sequencing": { - "description": "prefer connection-oriented sequenced protocols", + "description": "Prefer connection-oriented sequenced protocols.", "allOf": [ { "$ref": "#/definitions/Sequencing" @@ -3161,7 +3215,7 @@ ] }, "stability": { - "description": "prefer reliability over speed", + "description": "Prefer reliability over speed.", "allOf": [ { "$ref": "#/definitions/Stability" @@ -3504,7 +3558,7 @@ ] }, "VeilidConfigApplication": { - "description": "Application configuration\n\nConfigure web access to the Progressive Web App (PWA)\n\nTo be implemented...", + "description": "Application configuration.\n\nConfigure web access to the Progressive Web App (PWA).\n\nTo be implemented...", "type": "object", "required": [ "http", @@ -3549,7 +3603,7 @@ } }, "VeilidConfigDHT": { - "description": "Configure the Distributed Hash Table (DHT)", + "description": "Configure the Distributed Hash Table (DHT).", "type": "object", "required": [ "get_value_count", @@ -3689,7 +3743,7 @@ } }, "VeilidConfigHTTP": { - "description": "Enable and configure HTTP access to the Veilid node\n\n```yaml http: enabled: false listen_address: ':5150' path: 'app\" url: 'https://localhost:5150' ```", + "description": "Enable and configure HTTP access to the Veilid node.\n\n```yaml http: enabled: false listen_address: ':5150' path: 'app\" url: 'https://localhost:5150' ```", "type": "object", "required": [ "enabled", @@ -3715,7 +3769,7 @@ } }, "VeilidConfigHTTPS": { - "description": "Enable and configure HTTPS access to the Veilid node\n\n```yaml https: enabled: false listen_address: ':5150' path: 'app' url: 'https://localhost:5150' ```", + "description": "Enable and configure HTTPS access to the Veilid node.\n\n```yaml https: enabled: false listen_address: ':5150' path: 'app' url: 'https://localhost:5150' ```", "type": "object", "required": [ "enabled", @@ -3914,7 +3968,7 @@ } }, "VeilidConfigProtocol": { - "description": "Configure Network Protocols\n\nVeilid can communicate over UDP, TCP, and Web Sockets.\n\nAll protocols are available by default, and the Veilid node will sort out which protocol is used for each peer connection.", + "description": "Configure Network Protocols.\n\nVeilid can communicate over UDP, TCP, and Web Sockets.\n\nAll protocols are available by default, and the Veilid node will sort out which protocol is used for each peer connection.", "type": "object", "required": [ "tcp", @@ -3938,7 +3992,7 @@ } }, "VeilidConfigRPC": { - "description": "Configure RPC", + "description": "Configure RPC.", "type": "object", "required": [ "concurrency", @@ -3992,7 +4046,7 @@ } }, "VeilidConfigRoutingTable": { - "description": "Configure the network routing table", + "description": "Configure the network routing table.", "type": "object", "required": [ "bootstrap", @@ -4051,7 +4105,7 @@ } }, "VeilidConfigTCP": { - "description": "Enable and configure TCP\n\n```yaml tcp: connect: true listen: true max_connections: 32 listen_address: ':5150' public_address: ''", + "description": "Enable and configure TCP.\n\n```yaml tcp: connect: true listen: true max_connections: 32 listen_address: ':5150' public_address: ''", "type": "object", "required": [ "connect", @@ -4083,7 +4137,7 @@ } }, "VeilidConfigTLS": { - "description": "Configure TLS\n\n```yaml tls: certificate_path: /path/to/cert private_key_path: /path/to/private/key connection_initial_timeout_ms: 2000", + "description": "Configure TLS.\n\n```yaml tls: certificate_path: /path/to/cert private_key_path: /path/to/private/key connection_initial_timeout_ms: 2000", "type": "object", "required": [ "certificate_path", @@ -4120,7 +4174,7 @@ } }, "VeilidConfigUDP": { - "description": "Enable and configure UDP\n\n```yaml udp: enabled: true socket_pool_size: 0 listen_address: ':5150' public_address: '' ```", + "description": "Enable and configure UDP.\n\n```yaml udp: enabled: true socket_pool_size: 0 listen_address: ':5150' public_address: '' ```", "type": "object", "required": [ "enabled", @@ -4148,7 +4202,7 @@ } }, "VeilidConfigWS": { - "description": "Enable and configure Web Sockets\n\n```yaml ws: connect: true listen: true max_connections: 32 listen_address: ':5150' path: 'ws' url: 'ws://localhost:5150/ws'", + "description": "Enable and configure Web Sockets.\n\n```yaml ws: connect: true listen: true max_connections: 32 listen_address: ':5150' path: 'ws' url: 'ws://localhost:5150/ws'", "type": "object", "required": [ "connect", @@ -4184,7 +4238,7 @@ } }, "VeilidConfigWSS": { - "description": "Enable and configure Secure Web Sockets\n\n```yaml wss: connect: true listen: false max_connections: 32 listen_address: ':5150' path: 'ws' url: ''", + "description": "Enable and configure Secure Web Sockets.\n\n```yaml wss: connect: true listen: false max_connections: 32 listen_address: ':5150' path: 'ws' url: ''", "type": "object", "required": [ "connect", @@ -4220,7 +4274,7 @@ } }, "VeilidLogLevel": { - "description": "Log level for VeilidCore", + "description": "Log level for VeilidCore.", "type": "string", "enum": [ "Error", @@ -4231,6 +4285,7 @@ ] }, "VeilidState": { + "description": "A queriable state of the internals of veilid-core.", "type": "object", "required": [ "attachment", @@ -4250,6 +4305,7 @@ } }, "VeilidStateAttachment": { + "description": "Describe the attachment state of the Veilid node", "type": "object", "required": [ "local_network_ready", @@ -4258,28 +4314,42 @@ ], "properties": { "local_network_ready": { + "description": "If attached and there are enough eachable nodes in the routing table to perform all the actions of the LocalNetwork RoutingDomain.", "type": "boolean" }, "public_internet_ready": { + "description": "If attached and there are enough eachable nodes in the routing table to perform all the actions of the PublicInternet RoutingDomain, including things like private/safety route allocation and DHT operations.", "type": "boolean" }, "state": { - "$ref": "#/definitions/AttachmentState" + "description": "The overall quality of the routing table if attached, or the current state the attachment state machine.", + "allOf": [ + { + "$ref": "#/definitions/AttachmentState" + } + ] } } }, "VeilidStateConfig": { + "description": "Describe changes to the Veilid node configuration Currently this is only ever emitted once, however we reserve the right to add the ability to change the configuration or have it changed by the Veilid node itself during runtime.", "type": "object", "required": [ "config" ], "properties": { "config": { - "$ref": "#/definitions/VeilidConfigInner" + "description": "If the Veilid node configuration has changed the full new config will be here.", + "allOf": [ + { + "$ref": "#/definitions/VeilidConfigInner" + } + ] } } }, "VeilidStateNetwork": { + "description": "Describe the current network state of the Veilid node", "type": "object", "required": [ "bps_down", @@ -4289,18 +4359,22 @@ ], "properties": { "bps_down": { + "description": "The total number of bytes per second used by Veilid currently in the download direction.", "type": "string" }, "bps_up": { + "description": "The total number of bytes per second used by Veilid currently in the upload direction.", "type": "string" }, "peers": { + "description": "The list of most recently accessed peers. This is not an active connection table, nor is representative of the entire routing table.", "type": "array", "items": { "$ref": "#/definitions/PeerTableData" } }, "started": { + "description": "If the network has been started or not.", "type": "boolean" } } diff --git a/veilid-python/veilid/schema/Request.json b/veilid-python/veilid/schema/Request.json index 19a342cc..1e9a8441 100644 --- a/veilid-python/veilid/schema/Request.json +++ b/veilid-python/veilid/schema/Request.json @@ -963,6 +963,20 @@ } } }, + { + "type": "object", + "required": [ + "cs_op" + ], + "properties": { + "cs_op": { + "type": "string", + "enum": [ + "Kind" + ] + } + } + }, { "type": "object", "required": [ @@ -1291,7 +1305,7 @@ "cs_op", "data", "key", - "secret" + "signature" ], "properties": { "cs_op": { @@ -1306,7 +1320,7 @@ "key": { "type": "string" }, - "secret": { + "signature": { "type": "string" } } @@ -1586,7 +1600,7 @@ ], "properties": { "id": { - "description": "Operation Id (pairs with Response, or empty if unidirectional)", + "description": "Operation Id (pairs with Response, or empty if unidirectional).", "default": 0, "type": "integer", "format": "uint32", @@ -1712,10 +1726,10 @@ } }, "SafetySelection": { - "description": "The choice of safety route to include in compiled routes", + "description": "The choice of safety route to include in compiled routes.", "oneOf": [ { - "description": "Don't use a safety route, only specify the sequencing preference", + "description": "Don't use a safety route, only specify the sequencing preference.", "type": "object", "required": [ "Unsafe" @@ -1728,7 +1742,7 @@ "additionalProperties": false }, { - "description": "Use a safety route and parameters specified by a SafetySpec", + "description": "Use a safety route and parameters specified by a SafetySpec.", "type": "object", "required": [ "Safe" @@ -1743,7 +1757,7 @@ ] }, "SafetySpec": { - "description": "Options for safety routes (sender privacy)", + "description": "Options for safety routes (sender privacy).", "type": "object", "required": [ "hop_count", @@ -1752,20 +1766,20 @@ ], "properties": { "hop_count": { - "description": "must be greater than 0", + "description": "Must be greater than 0.", "type": "integer", "format": "uint", "minimum": 0.0 }, "preferred_route": { - "description": "preferred safety route set id if it still exists", + "description": "Preferred safety route set id if it still exists.", "type": [ "string", "null" ] }, "sequencing": { - "description": "prefer connection-oriented sequenced protocols", + "description": "Prefer connection-oriented sequenced protocols.", "allOf": [ { "$ref": "#/definitions/Sequencing" @@ -1773,7 +1787,7 @@ ] }, "stability": { - "description": "prefer reliability over speed", + "description": "Prefer reliability over speed.", "allOf": [ { "$ref": "#/definitions/Stability" diff --git a/veilid-wasm/src/lib.rs b/veilid-wasm/src/lib.rs index 76d95ab9..46ab6452 100644 --- a/veilid-wasm/src/lib.rs +++ b/veilid-wasm/src/lib.rs @@ -1353,14 +1353,14 @@ pub fn crypto_verify(kind: u32, key: String, data: String, signature: String) -> .unwrap(); let signature: veilid_core::Signature = veilid_core::deserialize_json(&signature).unwrap(); - wrap_api_future_void(async move { + wrap_api_future_plain(async move { let veilid_api = get_veilid_api()?; let crypto = veilid_api.crypto()?; let csv = crypto.get(kind).ok_or_else(|| { veilid_core::VeilidAPIError::invalid_argument("crypto_verify", "kind", kind.to_string()) })?; - csv.verify(&key, &data, &signature)?; - APIRESULT_UNDEFINED + let out = csv.verify(&key, &data, &signature)?; + APIResult::Ok(out) }) } diff --git a/veilid-wasm/src/veilid_crypto_js.rs b/veilid-wasm/src/veilid_crypto_js.rs index 5933f39b..55c4d82d 100644 --- a/veilid-wasm/src/veilid_crypto_js.rs +++ b/veilid-wasm/src/veilid_crypto_js.rs @@ -207,7 +207,7 @@ impl VeilidCrypto { node_ids: StringArray, data: Box<[u8]>, signatures: StringArray, - ) -> VeilidAPIResult { + ) -> VeilidAPIResult> { let node_ids = into_unchecked_string_vec(node_ids); let node_ids: Vec = node_ids .iter() @@ -238,12 +238,15 @@ impl VeilidCrypto { let veilid_api = get_veilid_api()?; let crypto = veilid_api.crypto()?; - let out = crypto.verify_signatures(&node_ids, &data, &typed_signatures)?; - let out = out - .iter() - .map(|item| item.to_string()) - .collect::>(); - let out = into_unchecked_string_array(out); + let out = crypto + .verify_signatures(&node_ids, &data, &typed_signatures)? + .map(|sigs| { + let out = sigs + .iter() + .map(|item| item.to_string()) + .collect::>(); + into_unchecked_string_array(out) + }); APIResult::Ok(out) } @@ -375,7 +378,12 @@ impl VeilidCrypto { APIResult::Ok(out.to_string()) } - pub fn verify(kind: String, key: String, data: Box<[u8]>, signature: String) -> APIResult<()> { + pub fn verify( + kind: String, + key: String, + data: Box<[u8]>, + signature: String, + ) -> APIResult { let kind: veilid_core::CryptoKind = veilid_core::FourCC::from_str(&kind)?; let key: veilid_core::PublicKey = veilid_core::PublicKey::from_str(&key)?; @@ -386,8 +394,8 @@ impl VeilidCrypto { let crypto_system = crypto.get(kind).ok_or_else(|| { veilid_core::VeilidAPIError::invalid_argument("crypto_verify", "kind", kind.to_string()) })?; - crypto_system.verify(&key, &data, &signature)?; - APIRESULT_UNDEFINED + let out = crypto_system.verify(&key, &data, &signature)?; + APIResult::Ok(out) } pub fn aeadOverhead(kind: String) -> APIResult { diff --git a/veilid-wasm/tests/src/veilidCrypto.test.ts b/veilid-wasm/tests/src/veilidCrypto.test.ts index c325e052..00d1a49d 100644 --- a/veilid-wasm/tests/src/veilidCrypto.test.ts +++ b/veilid-wasm/tests/src/veilidCrypto.test.ts @@ -138,7 +138,7 @@ describe('veilidCrypto', () => { expect(() => { const res = veilidCrypto.verify(bestKind, publicKey, data, sig); - expect(res).toBeUndefined(); + expect(res).toBe(true); }).not.toThrow(); }); From faf8347aa8d7cf15d866482729decdce2f8989fb Mon Sep 17 00:00:00 2001 From: Christien Rioux Date: Sun, 2 Jun 2024 12:54:06 -0400 Subject: [PATCH 14/14] keep 'last ditch watch cancel' from failing the whole operation --- veilid-core/src/storage_manager/mod.rs | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/veilid-core/src/storage_manager/mod.rs b/veilid-core/src/storage_manager/mod.rs index 3a325b4a..b705e262 100644 --- a/veilid-core/src/storage_manager/mod.rs +++ b/veilid-core/src/storage_manager/mod.rs @@ -332,7 +332,7 @@ impl StorageManager { if let Some(rpc_processor) = opt_rpc_processor { // Use the safety selection we opened the record with // Use the writer we opened with as the 'watcher' as well - let opt_owvresult = self + let opt_owvresult = match self .outbound_watch_value_cancel( rpc_processor, key, @@ -342,7 +342,16 @@ impl StorageManager { active_watch.id, active_watch.watch_node, ) - .await?; + .await + { + Ok(v) => v, + Err(e) => { + log_stor!(debug + "close record watch cancel failed: {}", e + ); + None + } + }; if let Some(owvresult) = opt_owvresult { if owvresult.expiration_ts.as_u64() != 0 { log_stor!(debug