mirror of
https://gitlab.com/veilid/veilid.git
synced 2025-01-25 22:15:59 -05:00
fix debug commands
clean up dht capabilities add temporary fanout debugging logs
This commit is contained in:
parent
0589e6dc31
commit
39d5e0435c
@ -145,6 +145,15 @@ where
|
||||
}
|
||||
|
||||
fn add_to_fanout_queue(self: Arc<Self>, new_nodes: &[NodeRef]) {
|
||||
info!(
|
||||
"FanoutCall::add_to_fanout_queue:\n new_nodes={{\n{}}}\n",
|
||||
new_nodes
|
||||
.iter()
|
||||
.map(|x| format!(" {}", x))
|
||||
.collect::<Vec<String>>()
|
||||
.join(",\n"),
|
||||
);
|
||||
|
||||
let ctx = &mut *self.context.lock();
|
||||
let this = self.clone();
|
||||
ctx.fanout_queue.add(new_nodes, |current_nodes| {
|
||||
|
@ -1,5 +1,6 @@
|
||||
use super::*;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(in crate::rpc_processor) struct FanoutQueue {
|
||||
crypto_kind: CryptoKind,
|
||||
current_nodes: VecDeque<NodeRef>,
|
||||
@ -53,6 +54,20 @@ impl FanoutQueue {
|
||||
// Sort and trim the candidate set
|
||||
self.current_nodes =
|
||||
VecDeque::from_iter(cleanup(self.current_nodes.as_slices().0).iter().cloned());
|
||||
|
||||
info!(
|
||||
"FanoutQueue::add:\n current_nodes={{\n{}}}\n returned_nodes={{\n{}}}\n",
|
||||
self.current_nodes
|
||||
.iter()
|
||||
.map(|x| format!(" {}", x))
|
||||
.collect::<Vec<String>>()
|
||||
.join(",\n"),
|
||||
self.returned_nodes
|
||||
.iter()
|
||||
.map(|x| format!(" {}", x))
|
||||
.collect::<Vec<String>>()
|
||||
.join(",\n")
|
||||
);
|
||||
}
|
||||
|
||||
// Return next fanout candidate
|
||||
@ -63,6 +78,9 @@ impl FanoutQueue {
|
||||
|
||||
// Ensure we don't return this node again
|
||||
self.returned_nodes.insert(key);
|
||||
|
||||
info!("FanoutQueue::next: => {}", cn);
|
||||
|
||||
Some(cn)
|
||||
}
|
||||
|
||||
|
@ -211,7 +211,7 @@ impl RPCProcessor {
|
||||
|
||||
// Get the nodes that we know about that are closer to the the key than our own node
|
||||
let routing_table = self.routing_table();
|
||||
let closer_to_key_peers = network_result_try!(routing_table.find_preferred_peers_closer_to_key(key, vec![CAP_DHT, CAP_DHT_WATCH]));
|
||||
let closer_to_key_peers = network_result_try!(routing_table.find_preferred_peers_closer_to_key(key, vec![CAP_DHT]));
|
||||
|
||||
if debug_target_enabled!("dht") {
|
||||
let debug_string = format!(
|
||||
|
@ -200,7 +200,7 @@ impl RPCProcessor {
|
||||
|
||||
// Get the nodes that we know about that are closer to the the key than our own node
|
||||
let routing_table = self.routing_table();
|
||||
let closer_to_key_peers = network_result_try!(routing_table.find_preferred_peers_closer_to_key(key, vec![CAP_DHT, CAP_DHT_WATCH]));
|
||||
let closer_to_key_peers = network_result_try!(routing_table.find_preferred_peers_closer_to_key(key, vec![CAP_DHT]));
|
||||
|
||||
if debug_target_enabled!("dht") {
|
||||
let debug_string = format!(
|
||||
|
@ -251,7 +251,7 @@ impl RPCProcessor {
|
||||
|
||||
// Get the nodes that we know about that are closer to the the key than our own node
|
||||
let closer_to_key_peers = network_result_try!(
|
||||
routing_table.find_preferred_peers_closer_to_key(key, vec![CAP_DHT, CAP_DHT_WATCH])
|
||||
routing_table.find_preferred_peers_closer_to_key(key, vec![CAP_DHT])
|
||||
);
|
||||
|
||||
// See if we would have accepted this as a set, same set_value_count for watches
|
||||
|
@ -174,7 +174,7 @@ impl StorageManager {
|
||||
key_count,
|
||||
fanout,
|
||||
timeout_us,
|
||||
capability_fanout_node_info_filter(vec![CAP_DHT, CAP_DHT_WATCH]),
|
||||
capability_fanout_node_info_filter(vec![CAP_DHT]),
|
||||
call_routine,
|
||||
check_done,
|
||||
);
|
||||
|
@ -159,7 +159,7 @@ impl StorageManager {
|
||||
key_count,
|
||||
fanout,
|
||||
timeout_us,
|
||||
capability_fanout_node_info_filter(vec![CAP_DHT, CAP_DHT_WATCH]),
|
||||
capability_fanout_node_info_filter(vec![CAP_DHT]),
|
||||
call_routine,
|
||||
check_done,
|
||||
);
|
||||
|
@ -50,7 +50,16 @@ impl StorageManager {
|
||||
vec![watch_node]
|
||||
} else {
|
||||
let inner = self.inner.lock().await;
|
||||
inner.get_value_nodes(key)?.unwrap_or_default()
|
||||
inner
|
||||
.get_value_nodes(key)?
|
||||
.unwrap_or_default()
|
||||
.into_iter()
|
||||
.filter(|x| {
|
||||
x.node_info(RoutingDomain::PublicInternet)
|
||||
.map(|ni| ni.has_capability(CAP_DHT_WATCH))
|
||||
.unwrap_or_default()
|
||||
})
|
||||
.collect()
|
||||
};
|
||||
|
||||
// Get the appropriate watcher key, if anonymous use a static anonymous watch key
|
||||
|
@ -1602,7 +1602,7 @@ impl VeilidAPI {
|
||||
"subkey",
|
||||
get_number::<u32>,
|
||||
)?;
|
||||
let force_refresh = if args.len() >= 4 {
|
||||
let force_refresh = if args.len() >= 3 + opt_arg_add {
|
||||
Some(get_debug_argument_at(
|
||||
&args,
|
||||
2 + opt_arg_add,
|
||||
@ -1735,7 +1735,7 @@ impl VeilidAPI {
|
||||
})
|
||||
};
|
||||
let count = if rest_defaults {
|
||||
Default::default()
|
||||
u32::MAX
|
||||
} else {
|
||||
get_debug_argument_at(
|
||||
&args,
|
||||
|
Loading…
x
Reference in New Issue
Block a user