mirror of
https://gitlab.com/veilid/veilid.git
synced 2024-12-26 15:59:24 -05:00
more inspect work
This commit is contained in:
parent
ea74d646f8
commit
7fdd5f9555
@ -16,8 +16,6 @@ struct OutboundGetValueContext {
|
|||||||
pub(super) struct OutboundGetValueResult {
|
pub(super) struct OutboundGetValueResult {
|
||||||
/// Fanout result
|
/// Fanout result
|
||||||
pub fanout_result: FanoutResult,
|
pub fanout_result: FanoutResult,
|
||||||
/// Consensus count for this operation,
|
|
||||||
pub consensus_count: usize,
|
|
||||||
/// The subkey that was retrieved
|
/// The subkey that was retrieved
|
||||||
pub get_result: GetResult,
|
pub get_result: GetResult,
|
||||||
}
|
}
|
||||||
@ -205,7 +203,6 @@ impl StorageManager {
|
|||||||
|
|
||||||
Ok(OutboundGetValueResult {
|
Ok(OutboundGetValueResult {
|
||||||
fanout_result,
|
fanout_result,
|
||||||
consensus_count,
|
|
||||||
get_result: GetResult {
|
get_result: GetResult {
|
||||||
opt_value: ctx.value.clone(),
|
opt_value: ctx.value.clone(),
|
||||||
opt_descriptor: ctx.descriptor.clone(),
|
opt_descriptor: ctx.descriptor.clone(),
|
||||||
|
@ -44,8 +44,6 @@ struct OutboundInspectValueContext {
|
|||||||
pub(super) struct OutboundInspectValueResult {
|
pub(super) struct OutboundInspectValueResult {
|
||||||
/// Fanout results for each subkey
|
/// Fanout results for each subkey
|
||||||
pub fanout_results: Vec<FanoutResult>,
|
pub fanout_results: Vec<FanoutResult>,
|
||||||
/// Required count for consensus for this operation,
|
|
||||||
pub consensus_count: usize,
|
|
||||||
/// The inspection that was retrieved
|
/// The inspection that was retrieved
|
||||||
pub inspect_result: InspectResult,
|
pub inspect_result: InspectResult,
|
||||||
}
|
}
|
||||||
@ -58,7 +56,7 @@ impl StorageManager {
|
|||||||
key: TypedKey,
|
key: TypedKey,
|
||||||
subkeys: ValueSubkeyRangeSet,
|
subkeys: ValueSubkeyRangeSet,
|
||||||
safety_selection: SafetySelection,
|
safety_selection: SafetySelection,
|
||||||
mut local_inspect_result: InspectResult,
|
local_inspect_result: InspectResult,
|
||||||
use_set_scope: bool,
|
use_set_scope: bool,
|
||||||
) -> VeilidAPIResult<OutboundInspectValueResult> {
|
) -> VeilidAPIResult<OutboundInspectValueResult> {
|
||||||
let routing_table = rpc_processor.routing_table();
|
let routing_table = rpc_processor.routing_table();
|
||||||
@ -69,11 +67,6 @@ impl StorageManager {
|
|||||||
let c = self.unlocked_inner.config.get();
|
let c = self.unlocked_inner.config.get();
|
||||||
|
|
||||||
if use_set_scope {
|
if use_set_scope {
|
||||||
// If we're simulating a set, increase the previous sequence number we have by 1
|
|
||||||
for seq in &mut local_inspect_result.seqs {
|
|
||||||
*seq += 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
(
|
(
|
||||||
c.network.dht.max_find_node_count as usize,
|
c.network.dht.max_find_node_count as usize,
|
||||||
c.network.dht.set_value_count as usize,
|
c.network.dht.set_value_count as usize,
|
||||||
@ -288,7 +281,6 @@ impl StorageManager {
|
|||||||
|
|
||||||
Ok(OutboundInspectValueResult {
|
Ok(OutboundInspectValueResult {
|
||||||
fanout_results,
|
fanout_results,
|
||||||
consensus_count,
|
|
||||||
inspect_result: InspectResult {
|
inspect_result: InspectResult {
|
||||||
subkeys: ctx
|
subkeys: ctx
|
||||||
.opt_descriptor_info
|
.opt_descriptor_info
|
||||||
|
@ -408,7 +408,11 @@ impl StorageManager {
|
|||||||
|
|
||||||
// Keep the list of nodes that returned a value for later reference
|
// Keep the list of nodes that returned a value for later reference
|
||||||
let mut inner = self.lock().await?;
|
let mut inner = self.lock().await?;
|
||||||
inner.set_value_nodes(key, result.fanout_result.value_nodes)?;
|
inner.process_fanout_results(
|
||||||
|
key,
|
||||||
|
core::iter::once((subkey, &result.fanout_result)),
|
||||||
|
false,
|
||||||
|
)?;
|
||||||
|
|
||||||
// If we got a new value back then write it to the opened record
|
// If we got a new value back then write it to the opened record
|
||||||
if Some(get_result_value.value_data().seq()) != opt_last_seq {
|
if Some(get_result_value.value_data().seq()) != opt_last_seq {
|
||||||
@ -541,7 +545,11 @@ impl StorageManager {
|
|||||||
|
|
||||||
// Keep the list of nodes that returned a value for later reference
|
// Keep the list of nodes that returned a value for later reference
|
||||||
let mut inner = self.lock().await?;
|
let mut inner = self.lock().await?;
|
||||||
inner.set_value_nodes(key, result.fanout_result.value_nodes)?;
|
inner.process_fanout_results(
|
||||||
|
key,
|
||||||
|
core::iter::once((subkey, &result.fanout_result)),
|
||||||
|
true,
|
||||||
|
)?;
|
||||||
|
|
||||||
// Return the new value if it differs from what was asked to set
|
// Return the new value if it differs from what was asked to set
|
||||||
if result.signed_value_data.value_data() != signed_value_data.value_data() {
|
if result.signed_value_data.value_data() != signed_value_data.value_data() {
|
||||||
@ -739,6 +747,12 @@ impl StorageManager {
|
|||||||
subkeys: ValueSubkeyRangeSet,
|
subkeys: ValueSubkeyRangeSet,
|
||||||
scope: DHTReportScope,
|
scope: DHTReportScope,
|
||||||
) -> VeilidAPIResult<DHTRecordReport> {
|
) -> VeilidAPIResult<DHTRecordReport> {
|
||||||
|
let subkeys = if subkeys.is_empty() {
|
||||||
|
ValueSubkeyRangeSet::full()
|
||||||
|
} else {
|
||||||
|
subkeys
|
||||||
|
};
|
||||||
|
|
||||||
let mut inner = self.lock().await?;
|
let mut inner = self.lock().await?;
|
||||||
let safety_selection = {
|
let safety_selection = {
|
||||||
let Some(opened_record) = inner.opened_records.get(&key) else {
|
let Some(opened_record) = inner.opened_records.get(&key) else {
|
||||||
@ -748,15 +762,25 @@ impl StorageManager {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// See if the requested record is our local record store
|
// See if the requested record is our local record store
|
||||||
let local_inspect_result = inner
|
let mut local_inspect_result = inner
|
||||||
.handle_inspect_local_value(key, subkeys.clone(), true)
|
.handle_inspect_local_value(key, subkeys.clone(), true)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
local_inspect_result.subkeys.len() == local_inspect_result.seqs.len(),
|
||||||
|
"mismatch between local subkeys returned and sequence number list returned"
|
||||||
|
);
|
||||||
|
assert!(
|
||||||
|
local_inspect_result.subkeys.is_subset(&subkeys),
|
||||||
|
"mismatch between local subkeys returned and sequence number list returned"
|
||||||
|
);
|
||||||
|
|
||||||
// If this is the maximum scope we're interested in, return the report
|
// If this is the maximum scope we're interested in, return the report
|
||||||
if matches!(scope, DHTReportScope::Local) {
|
if matches!(scope, DHTReportScope::Local) {
|
||||||
return Ok(DHTRecordReport::new(
|
return Ok(DHTRecordReport::new(
|
||||||
local_inspect_result.subkeys,
|
local_inspect_result.subkeys,
|
||||||
local_inspect_result.seqs,
|
local_inspect_result.seqs,
|
||||||
|
vec![],
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -768,6 +792,13 @@ impl StorageManager {
|
|||||||
// Drop the lock for network access
|
// Drop the lock for network access
|
||||||
drop(inner);
|
drop(inner);
|
||||||
|
|
||||||
|
// If we're simulating a set, increase the previous sequence number we have by 1
|
||||||
|
if matches!(scope, DHTReportScope::UpdateSet) {
|
||||||
|
for seq in &mut local_inspect_result.seqs {
|
||||||
|
*seq = seq.overflowing_add(1).0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Get the inspect record report from the network
|
// Get the inspect record report from the network
|
||||||
let result = self
|
let result = self
|
||||||
.outbound_inspect_value(
|
.outbound_inspect_value(
|
||||||
@ -775,24 +806,40 @@ impl StorageManager {
|
|||||||
key,
|
key,
|
||||||
subkeys,
|
subkeys,
|
||||||
safety_selection,
|
safety_selection,
|
||||||
local_inspect_result,
|
if matches!(scope, DHTReportScope::SyncGet | DHTReportScope::SyncSet) {
|
||||||
matches!(scope, DHTReportScope::NetworkSet),
|
InspectResult::default()
|
||||||
|
} else {
|
||||||
|
local_inspect_result.clone()
|
||||||
|
},
|
||||||
|
matches!(scope, DHTReportScope::UpdateSet | DHTReportScope::SyncSet),
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
// See if we got a seqs list back
|
// Sanity check before zip
|
||||||
if result.inspect_result.seqs.is_empty() {
|
assert!(
|
||||||
// If we got nothing back then we also had nothing beforehand, return nothing
|
result.inspect_result.subkeys.len() == result.fanout_results.len(),
|
||||||
return Ok(DHTRecordReport::default());
|
"mismatch between subkeys returned and fanout results returned"
|
||||||
};
|
);
|
||||||
|
assert!(
|
||||||
|
local_inspect_result.subkeys.is_empty()
|
||||||
|
|| result.inspect_result.subkeys.is_empty()
|
||||||
|
|| result.inspect_result.subkeys.len() == local_inspect_result.subkeys.len(),
|
||||||
|
"mismatch between local subkeys returned and network results returned"
|
||||||
|
);
|
||||||
|
|
||||||
// Keep the list of nodes that returned a value for later reference
|
// Keep the list of nodes that returned a value for later reference
|
||||||
// xxx switch 'value nodes' to keeping ranges of subkeys per node
|
let mut inner = self.lock().await?;
|
||||||
// let mut inner = self.lock().await?;
|
let results_iter = result
|
||||||
// inner.set_value_nodes(key, result.fanout_results.value_nodes)?;
|
.inspect_result
|
||||||
|
.subkeys
|
||||||
|
.iter()
|
||||||
|
.zip(result.fanout_results.iter());
|
||||||
|
|
||||||
|
inner.process_fanout_results(key, results_iter, false)?;
|
||||||
|
|
||||||
Ok(DHTRecordReport::new(
|
Ok(DHTRecordReport::new(
|
||||||
result.inspect_result.subkeys,
|
result.inspect_result.subkeys,
|
||||||
|
local_inspect_result.seqs,
|
||||||
result.inspect_result.seqs,
|
result.inspect_result.seqs,
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,13 @@
|
|||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
|
/// Information about nodes that cache a local record remotely
|
||||||
|
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
|
||||||
|
pub(in crate::storage_manager) struct PerNodeRecordDetail {
|
||||||
|
pub last_set: Timestamp,
|
||||||
|
pub last_seen: Timestamp,
|
||||||
|
pub subkeys: ValueSubkeyRangeSet,
|
||||||
|
}
|
||||||
|
|
||||||
/// Information required to handle locally opened records
|
/// Information required to handle locally opened records
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||||
pub(in crate::storage_manager) struct LocalRecordDetail {
|
pub(in crate::storage_manager) struct LocalRecordDetail {
|
||||||
@ -8,5 +16,14 @@ pub(in crate::storage_manager) struct LocalRecordDetail {
|
|||||||
pub safety_selection: SafetySelection,
|
pub safety_selection: SafetySelection,
|
||||||
/// The nodes that we have seen this record cached on recently
|
/// The nodes that we have seen this record cached on recently
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub value_nodes: Vec<PublicKey>,
|
pub nodes: HashMap<PublicKey, PerNodeRecordDetail>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LocalRecordDetail {
|
||||||
|
pub fn new(safety_selection: SafetySelection) -> Self {
|
||||||
|
Self {
|
||||||
|
safety_selection,
|
||||||
|
nodes: Default::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -78,7 +78,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// The result of the do_get_value_operation
|
/// The result of the do_get_value_operation
|
||||||
#[derive(Default, Debug)]
|
#[derive(Default, Clone, Debug)]
|
||||||
pub struct GetResult {
|
pub struct GetResult {
|
||||||
/// The subkey value if we got one
|
/// The subkey value if we got one
|
||||||
pub opt_value: Option<Arc<SignedValueData>>,
|
pub opt_value: Option<Arc<SignedValueData>>,
|
||||||
@ -87,7 +87,7 @@ pub struct GetResult {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// The result of the do_inspect_value_operation
|
/// The result of the do_inspect_value_operation
|
||||||
#[derive(Default, Debug)]
|
#[derive(Default, Clone, Debug)]
|
||||||
pub struct InspectResult {
|
pub struct InspectResult {
|
||||||
/// The actual in-schema subkey range being reported on
|
/// The actual in-schema subkey range being reported on
|
||||||
pub subkeys: ValueSubkeyRangeSet,
|
pub subkeys: ValueSubkeyRangeSet,
|
||||||
|
@ -16,8 +16,6 @@ struct OutboundSetValueContext {
|
|||||||
pub(super) struct OutboundSetValueResult {
|
pub(super) struct OutboundSetValueResult {
|
||||||
/// Fanout result
|
/// Fanout result
|
||||||
pub fanout_result: FanoutResult,
|
pub fanout_result: FanoutResult,
|
||||||
/// Consensus count for this operation,
|
|
||||||
pub consensus_count: usize,
|
|
||||||
/// The value that was set
|
/// The value that was set
|
||||||
pub signed_value_data: Arc<SignedValueData>,
|
pub signed_value_data: Arc<SignedValueData>,
|
||||||
}
|
}
|
||||||
@ -189,7 +187,6 @@ impl StorageManager {
|
|||||||
|
|
||||||
Ok(OutboundSetValueResult {
|
Ok(OutboundSetValueResult {
|
||||||
fanout_result,
|
fanout_result,
|
||||||
consensus_count,
|
|
||||||
signed_value_data: ctx.value.clone(),
|
signed_value_data: ctx.value.clone(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -32,6 +32,9 @@ pub(super) struct StorageManagerInner {
|
|||||||
pub tick_future: Option<SendPinBoxFuture<()>>,
|
pub tick_future: Option<SendPinBoxFuture<()>>,
|
||||||
/// Update callback to send ValueChanged notification to
|
/// Update callback to send ValueChanged notification to
|
||||||
pub update_callback: Option<UpdateCallback>,
|
pub update_callback: Option<UpdateCallback>,
|
||||||
|
|
||||||
|
/// The maximum consensus count
|
||||||
|
set_consensus_count: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn local_limits_from_config(config: VeilidConfig) -> RecordStoreLimits {
|
fn local_limits_from_config(config: VeilidConfig) -> RecordStoreLimits {
|
||||||
@ -72,6 +75,7 @@ fn remote_limits_from_config(config: VeilidConfig) -> RecordStoreLimits {
|
|||||||
|
|
||||||
impl StorageManagerInner {
|
impl StorageManagerInner {
|
||||||
pub fn new(unlocked_inner: Arc<StorageManagerUnlockedInner>) -> Self {
|
pub fn new(unlocked_inner: Arc<StorageManagerUnlockedInner>) -> Self {
|
||||||
|
let set_consensus_count = unlocked_inner.config.get().network.dht.set_value_count as usize;
|
||||||
Self {
|
Self {
|
||||||
unlocked_inner,
|
unlocked_inner,
|
||||||
initialized: false,
|
initialized: false,
|
||||||
@ -84,6 +88,7 @@ impl StorageManagerInner {
|
|||||||
opt_routing_table: Default::default(),
|
opt_routing_table: Default::default(),
|
||||||
tick_future: Default::default(),
|
tick_future: Default::default(),
|
||||||
update_callback: None,
|
update_callback: None,
|
||||||
|
set_consensus_count,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -242,10 +247,7 @@ impl StorageManagerInner {
|
|||||||
|
|
||||||
// Add new local value record
|
// Add new local value record
|
||||||
let cur_ts = get_aligned_timestamp();
|
let cur_ts = get_aligned_timestamp();
|
||||||
let local_record_detail = LocalRecordDetail {
|
let local_record_detail = LocalRecordDetail::new(safety_selection);
|
||||||
safety_selection,
|
|
||||||
value_nodes: vec![],
|
|
||||||
};
|
|
||||||
let record =
|
let record =
|
||||||
Record::<LocalRecordDetail>::new(cur_ts, signed_value_descriptor, local_record_detail)?;
|
Record::<LocalRecordDetail>::new(cur_ts, signed_value_descriptor, local_record_detail)?;
|
||||||
|
|
||||||
@ -284,10 +286,7 @@ impl StorageManagerInner {
|
|||||||
let local_record = Record::new(
|
let local_record = Record::new(
|
||||||
cur_ts,
|
cur_ts,
|
||||||
remote_record.descriptor().clone(),
|
remote_record.descriptor().clone(),
|
||||||
LocalRecordDetail {
|
LocalRecordDetail::new(safety_selection),
|
||||||
safety_selection,
|
|
||||||
value_nodes: vec![],
|
|
||||||
},
|
|
||||||
)?;
|
)?;
|
||||||
local_record_store.new_record(key, local_record).await?;
|
local_record_store.new_record(key, local_record).await?;
|
||||||
|
|
||||||
@ -425,10 +424,7 @@ impl StorageManagerInner {
|
|||||||
let record = Record::<LocalRecordDetail>::new(
|
let record = Record::<LocalRecordDetail>::new(
|
||||||
get_aligned_timestamp(),
|
get_aligned_timestamp(),
|
||||||
signed_value_descriptor,
|
signed_value_descriptor,
|
||||||
LocalRecordDetail {
|
LocalRecordDetail::new(safety_selection),
|
||||||
safety_selection,
|
|
||||||
value_nodes: vec![],
|
|
||||||
},
|
|
||||||
)?;
|
)?;
|
||||||
local_record_store.new_record(key, record).await?;
|
local_record_store.new_record(key, record).await?;
|
||||||
|
|
||||||
@ -462,8 +458,8 @@ impl StorageManagerInner {
|
|||||||
|
|
||||||
let opt_value_nodes = local_record_store.peek_record(key, |r| {
|
let opt_value_nodes = local_record_store.peek_record(key, |r| {
|
||||||
let d = r.detail();
|
let d = r.detail();
|
||||||
d.value_nodes
|
d.nodes
|
||||||
.iter()
|
.keys()
|
||||||
.copied()
|
.copied()
|
||||||
.filter_map(|x| {
|
.filter_map(|x| {
|
||||||
routing_table
|
routing_table
|
||||||
@ -477,21 +473,46 @@ impl StorageManagerInner {
|
|||||||
Ok(opt_value_nodes)
|
Ok(opt_value_nodes)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_value_nodes(
|
pub fn process_fanout_results<'a, I: IntoIterator<Item = (ValueSubkey, &'a FanoutResult)>>(
|
||||||
&mut self,
|
&mut self,
|
||||||
key: TypedKey,
|
key: TypedKey,
|
||||||
value_nodes: Vec<NodeRef>,
|
subkey_results_iter: I,
|
||||||
|
is_set: bool,
|
||||||
) -> VeilidAPIResult<()> {
|
) -> VeilidAPIResult<()> {
|
||||||
// Get local record store
|
// Get local record store
|
||||||
let Some(local_record_store) = self.local_record_store.as_mut() else {
|
let Some(local_record_store) = self.local_record_store.as_mut() else {
|
||||||
apibail_not_initialized!();
|
apibail_not_initialized!();
|
||||||
};
|
};
|
||||||
|
let cur_ts = get_aligned_timestamp();
|
||||||
local_record_store.with_record_mut(key, |r| {
|
local_record_store.with_record_mut(key, |r| {
|
||||||
let d = r.detail_mut();
|
let d = r.detail_mut();
|
||||||
d.value_nodes = value_nodes
|
|
||||||
.into_iter()
|
for (subkey, fanout_result) in subkey_results_iter {
|
||||||
.filter_map(|x| x.node_ids().get(key.kind).map(|k| k.value))
|
for node_id in fanout_result
|
||||||
.collect();
|
.value_nodes
|
||||||
|
.iter()
|
||||||
|
.filter_map(|x| x.node_ids().get(key.kind).map(|k| k.value))
|
||||||
|
{
|
||||||
|
let pnd = d.nodes.entry(node_id).or_default();
|
||||||
|
if is_set || pnd.last_set == Timestamp::default() {
|
||||||
|
pnd.last_set = cur_ts;
|
||||||
|
}
|
||||||
|
pnd.last_seen = cur_ts;
|
||||||
|
pnd.subkeys.insert(subkey);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Purge nodes down to the N most recently seen, where N is the consensus count for a set operation
|
||||||
|
let mut nodes_ts = d
|
||||||
|
.nodes
|
||||||
|
.iter()
|
||||||
|
.map(|kv| (*kv.0, kv.1.last_seen))
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
nodes_ts.sort_by(|a, b| b.1.cmp(&a.1));
|
||||||
|
|
||||||
|
for dead_node_key in nodes_ts.iter().skip(self.set_consensus_count) {
|
||||||
|
d.nodes.remove(&dead_node_key.0);
|
||||||
|
}
|
||||||
});
|
});
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -78,13 +78,24 @@ fn get_data(text: &str) -> Option<Vec<u8>> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn get_subkeys(text: &str) -> Option<ValueSubkeyRangeSet> {
|
fn get_subkeys(text: &str) -> Option<ValueSubkeyRangeSet> {
|
||||||
if let Some(n) = get_number(text) {
|
if let Some(n) = get_number::<u32>(text) {
|
||||||
Some(ValueSubkeyRangeSet::single(n.try_into().ok()?))
|
Some(ValueSubkeyRangeSet::single(n))
|
||||||
} else {
|
} else {
|
||||||
ValueSubkeyRangeSet::from_str(text).ok()
|
ValueSubkeyRangeSet::from_str(text).ok()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn get_dht_report_scope(text: &str) -> Option<DHTReportScope> {
|
||||||
|
match text.to_ascii_lowercase().trim() {
|
||||||
|
"local" => Some(DHTReportScope::Local),
|
||||||
|
"syncget" => Some(DHTReportScope::SyncGet),
|
||||||
|
"syncset" => Some(DHTReportScope::SyncSet),
|
||||||
|
"updateget" => Some(DHTReportScope::UpdateGet),
|
||||||
|
"updateset" => Some(DHTReportScope::UpdateSet),
|
||||||
|
_ => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn get_route_id(
|
fn get_route_id(
|
||||||
rss: RouteSpecStore,
|
rss: RouteSpecStore,
|
||||||
allow_allocated: bool,
|
allow_allocated: bool,
|
||||||
@ -287,8 +298,8 @@ fn get_destination(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_number(text: &str) -> Option<usize> {
|
fn get_number<T: num_traits::Num + FromStr>(text: &str) -> Option<T> {
|
||||||
usize::from_str(text).ok()
|
T::from_str(text).ok()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_typed_key(text: &str) -> Option<TypedKey> {
|
fn get_typed_key(text: &str) -> Option<TypedKey> {
|
||||||
@ -1458,7 +1469,7 @@ impl VeilidAPI {
|
|||||||
rc
|
rc
|
||||||
};
|
};
|
||||||
|
|
||||||
// Do a record get
|
// Do a record create
|
||||||
let record = match rc.create_dht_record(schema, Some(csv.kind())).await {
|
let record = match rc.create_dht_record(schema, Some(csv.kind())).await {
|
||||||
Err(e) => return Ok(format!("Can't open DHT record: {}", e)),
|
Err(e) => return Ok(format!("Can't open DHT record: {}", e)),
|
||||||
Ok(v) => v,
|
Ok(v) => v,
|
||||||
@ -1532,7 +1543,7 @@ impl VeilidAPI {
|
|||||||
1 + opt_arg_add,
|
1 + opt_arg_add,
|
||||||
"debug_record_set",
|
"debug_record_set",
|
||||||
"subkey",
|
"subkey",
|
||||||
get_number,
|
get_number::<u32>,
|
||||||
)?;
|
)?;
|
||||||
let data =
|
let data =
|
||||||
get_debug_argument_at(&args, 2 + opt_arg_add, "debug_record_set", "data", get_data)?;
|
get_debug_argument_at(&args, 2 + opt_arg_add, "debug_record_set", "data", get_data)?;
|
||||||
@ -1576,7 +1587,7 @@ impl VeilidAPI {
|
|||||||
1 + opt_arg_add,
|
1 + opt_arg_add,
|
||||||
"debug_record_get",
|
"debug_record_get",
|
||||||
"subkey",
|
"subkey",
|
||||||
get_number,
|
get_number::<u32>,
|
||||||
)?;
|
)?;
|
||||||
let force_refresh = if args.len() >= 4 {
|
let force_refresh = if args.len() >= 4 {
|
||||||
Some(get_debug_argument_at(
|
Some(get_debug_argument_at(
|
||||||
@ -1642,15 +1653,21 @@ impl VeilidAPI {
|
|||||||
let key =
|
let key =
|
||||||
get_debug_argument_at(&args, 1, "debug_record_info", "key", get_dht_key_no_safety)?;
|
get_debug_argument_at(&args, 1, "debug_record_info", "key", get_dht_key_no_safety)?;
|
||||||
|
|
||||||
let subkey =
|
let subkey = get_debug_argument_at(
|
||||||
get_debug_argument_at(&args, 2, "debug_record_info", "subkey", get_number).ok();
|
&args,
|
||||||
|
2,
|
||||||
|
"debug_record_info",
|
||||||
|
"subkey",
|
||||||
|
get_number::<ValueSubkey>,
|
||||||
|
)
|
||||||
|
.ok();
|
||||||
|
|
||||||
let out = if let Some(subkey) = subkey {
|
let out = if let Some(subkey) = subkey {
|
||||||
let li = storage_manager
|
let li = storage_manager
|
||||||
.debug_local_record_subkey_info(key, subkey as ValueSubkey)
|
.debug_local_record_subkey_info(key, subkey)
|
||||||
.await;
|
.await;
|
||||||
let ri = storage_manager
|
let ri = storage_manager
|
||||||
.debug_remote_record_subkey_info(key, subkey as ValueSubkey)
|
.debug_remote_record_subkey_info(key, subkey)
|
||||||
.await;
|
.await;
|
||||||
format!(
|
format!(
|
||||||
"Local Subkey Info:\n{}\n\nRemote Subkey Info:\n{}\n",
|
"Local Subkey Info:\n{}\n\nRemote Subkey Info:\n{}\n",
|
||||||
@ -1672,6 +1689,8 @@ impl VeilidAPI {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let (key, rc) = get_opened_dht_record_context(&args, "debug_record_watch", "key", 1)?;
|
let (key, rc) = get_opened_dht_record_context(&args, "debug_record_watch", "key", 1)?;
|
||||||
|
|
||||||
|
let mut rest_defaults = false;
|
||||||
let subkeys = get_debug_argument_at(
|
let subkeys = get_debug_argument_at(
|
||||||
&args,
|
&args,
|
||||||
1 + opt_arg_add,
|
1 + opt_arg_add,
|
||||||
@ -1680,25 +1699,43 @@ impl VeilidAPI {
|
|||||||
get_subkeys,
|
get_subkeys,
|
||||||
)
|
)
|
||||||
.ok()
|
.ok()
|
||||||
.unwrap_or_default();
|
.unwrap_or_else(|| {
|
||||||
let expiration = get_debug_argument_at(
|
rest_defaults = true;
|
||||||
&args,
|
Default::default()
|
||||||
2 + opt_arg_add,
|
});
|
||||||
"debug_record_watch",
|
|
||||||
"expiration",
|
let expiration = if rest_defaults {
|
||||||
parse_duration,
|
Default::default()
|
||||||
)
|
} else {
|
||||||
.ok()
|
get_debug_argument_at(
|
||||||
.unwrap_or_default();
|
&args,
|
||||||
let count = get_debug_argument_at(
|
2 + opt_arg_add,
|
||||||
&args,
|
"debug_record_watch",
|
||||||
3 + opt_arg_add,
|
"expiration",
|
||||||
"debug_record_watch",
|
parse_duration,
|
||||||
"count",
|
)
|
||||||
get_number,
|
.ok()
|
||||||
)
|
.unwrap_or_else(|| {
|
||||||
.ok()
|
rest_defaults = true;
|
||||||
.unwrap_or(usize::MAX) as u32;
|
Default::default()
|
||||||
|
})
|
||||||
|
};
|
||||||
|
let count = if rest_defaults {
|
||||||
|
Default::default()
|
||||||
|
} else {
|
||||||
|
get_debug_argument_at(
|
||||||
|
&args,
|
||||||
|
3 + opt_arg_add,
|
||||||
|
"debug_record_watch",
|
||||||
|
"count",
|
||||||
|
get_number,
|
||||||
|
)
|
||||||
|
.ok()
|
||||||
|
.unwrap_or_else(|| {
|
||||||
|
rest_defaults = true;
|
||||||
|
u32::MAX
|
||||||
|
})
|
||||||
|
};
|
||||||
|
|
||||||
// Do a record watch
|
// Do a record watch
|
||||||
let ts = match rc
|
let ts = match rc
|
||||||
@ -1749,6 +1786,57 @@ impl VeilidAPI {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn debug_record_inspect(&self, args: Vec<String>) -> VeilidAPIResult<String> {
|
||||||
|
let opt_arg_add = if args.len() >= 2 && get_dht_key_no_safety(&args[1]).is_some() {
|
||||||
|
1
|
||||||
|
} else {
|
||||||
|
0
|
||||||
|
};
|
||||||
|
|
||||||
|
let (key, rc) = get_opened_dht_record_context(&args, "debug_record_watch", "key", 1)?;
|
||||||
|
|
||||||
|
let mut rest_defaults = false;
|
||||||
|
let subkeys = get_debug_argument_at(
|
||||||
|
&args,
|
||||||
|
1 + opt_arg_add,
|
||||||
|
"debug_record_inspect",
|
||||||
|
"subkeys",
|
||||||
|
get_subkeys,
|
||||||
|
)
|
||||||
|
.ok()
|
||||||
|
.unwrap_or_else(|| {
|
||||||
|
rest_defaults = true;
|
||||||
|
Default::default()
|
||||||
|
});
|
||||||
|
|
||||||
|
let scope = if rest_defaults {
|
||||||
|
Default::default()
|
||||||
|
} else {
|
||||||
|
get_debug_argument_at(
|
||||||
|
&args,
|
||||||
|
2 + opt_arg_add,
|
||||||
|
"debug_record_inspect",
|
||||||
|
"scope",
|
||||||
|
get_dht_report_scope,
|
||||||
|
)
|
||||||
|
.ok()
|
||||||
|
.unwrap_or_else(|| {
|
||||||
|
rest_defaults = true;
|
||||||
|
Default::default()
|
||||||
|
})
|
||||||
|
};
|
||||||
|
|
||||||
|
// Do a record inspect
|
||||||
|
let report = match rc.inspect_dht_record(key, subkeys, scope).await {
|
||||||
|
Err(e) => {
|
||||||
|
return Ok(format!("Can't inspect DHT record: {}", e));
|
||||||
|
}
|
||||||
|
Ok(v) => v,
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(format!("Success: report={:?}", report))
|
||||||
|
}
|
||||||
|
|
||||||
async fn debug_record(&self, args: String) -> VeilidAPIResult<String> {
|
async fn debug_record(&self, args: String) -> VeilidAPIResult<String> {
|
||||||
let args: Vec<String> =
|
let args: Vec<String> =
|
||||||
shell_words::split(&args).map_err(|e| VeilidAPIError::parse_error(e, args))?;
|
shell_words::split(&args).map_err(|e| VeilidAPIError::parse_error(e, args))?;
|
||||||
@ -1777,6 +1865,8 @@ impl VeilidAPI {
|
|||||||
self.debug_record_watch(args).await
|
self.debug_record_watch(args).await
|
||||||
} else if command == "cancel" {
|
} else if command == "cancel" {
|
||||||
self.debug_record_cancel(args).await
|
self.debug_record_cancel(args).await
|
||||||
|
} else if command == "inspect" {
|
||||||
|
self.debug_record_inspect(args).await
|
||||||
} else {
|
} else {
|
||||||
Ok(">>> Unknown command\n".to_owned())
|
Ok(">>> Unknown command\n".to_owned())
|
||||||
}
|
}
|
||||||
@ -1857,6 +1947,7 @@ record list <local|remote>
|
|||||||
info [<key>] [subkey]
|
info [<key>] [subkey]
|
||||||
watch [<key>] [<subkeys>] [<expiration>] [<count>]
|
watch [<key>] [<subkeys>] [<expiration>] [<count>]
|
||||||
cancel [<key>] [<subkeys>]
|
cancel [<key>] [<subkeys>]
|
||||||
|
inspect [<key>] [<subkeys>] [<scope>]
|
||||||
--------------------------------------------------------------------
|
--------------------------------------------------------------------
|
||||||
<key> is: VLD0:GsgXCRPrzSK6oBNgxhNpm-rTYFd02R0ySx6j9vbQBG4
|
<key> is: VLD0:GsgXCRPrzSK6oBNgxhNpm-rTYFd02R0ySx6j9vbQBG4
|
||||||
* also <node>, <relay>, <target>, <route>
|
* also <node>, <relay>, <target>, <route>
|
||||||
@ -1874,6 +1965,7 @@ record list <local|remote>
|
|||||||
<routingdomain> is: public|local
|
<routingdomain> is: public|local
|
||||||
<cryptokind> is: VLD0
|
<cryptokind> is: VLD0
|
||||||
<dhtschema> is: a json dht schema, default is '{"kind":"DFLT","o_cnt":1}'
|
<dhtschema> is: a json dht schema, default is '{"kind":"DFLT","o_cnt":1}'
|
||||||
|
<scope> is: local, syncget, syncset, updateget, updateset
|
||||||
<subkey> is: a number: 2
|
<subkey> is: a number: 2
|
||||||
<subkeys> is:
|
<subkeys> is:
|
||||||
* a number: 2
|
* a number: 2
|
||||||
|
@ -14,18 +14,34 @@ pub struct DHTRecordReport {
|
|||||||
/// This may be a subset of the requested range if it exceeds the schema limits
|
/// This may be a subset of the requested range if it exceeds the schema limits
|
||||||
/// or has more than 512 subkeys
|
/// or has more than 512 subkeys
|
||||||
subkeys: ValueSubkeyRangeSet,
|
subkeys: ValueSubkeyRangeSet,
|
||||||
/// The sequence numbers of each subkey requested from a DHT Record
|
/// The sequence numbers of each subkey requested from a locally stored DHT Record
|
||||||
seqs: Vec<ValueSeqNum>,
|
local_seqs: Vec<ValueSeqNum>,
|
||||||
|
/// The sequence numbers of each subkey requested from the DHT over the network
|
||||||
|
network_seqs: Vec<ValueSeqNum>,
|
||||||
}
|
}
|
||||||
from_impl_to_jsvalue!(DHTRecordReport);
|
from_impl_to_jsvalue!(DHTRecordReport);
|
||||||
|
|
||||||
impl DHTRecordReport {
|
impl DHTRecordReport {
|
||||||
pub fn new(subkeys: ValueSubkeyRangeSet, seqs: Vec<ValueSeqNum>) -> Self {
|
pub fn new(
|
||||||
Self { subkeys, seqs }
|
subkeys: ValueSubkeyRangeSet,
|
||||||
|
local_seqs: Vec<ValueSeqNum>,
|
||||||
|
network_seqs: Vec<ValueSeqNum>,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
subkeys,
|
||||||
|
local_seqs,
|
||||||
|
network_seqs,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn seqs(&self) -> &[ValueSeqNum] {
|
pub fn subkeys(&self) -> &ValueSubkeyRangeSet {
|
||||||
&self.seqs
|
&self.subkeys
|
||||||
|
}
|
||||||
|
pub fn local_seqs(&self) -> &[ValueSeqNum] {
|
||||||
|
&self.local_seqs
|
||||||
|
}
|
||||||
|
pub fn network_seqs(&self) -> &[ValueSeqNum] {
|
||||||
|
&self.network_seqs
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -35,9 +51,30 @@ impl DHTRecordReport {
|
|||||||
)]
|
)]
|
||||||
#[cfg_attr(target_arch = "wasm32", derive(Tsify), tsify(from_wasm_abi, namespace))]
|
#[cfg_attr(target_arch = "wasm32", derive(Tsify), tsify(from_wasm_abi, namespace))]
|
||||||
pub enum DHTReportScope {
|
pub enum DHTReportScope {
|
||||||
|
/// Return only the local copy sequence numbers
|
||||||
|
/// Useful for seeing what subkeys you have locally and which ones have not been retrieved
|
||||||
Local = 0,
|
Local = 0,
|
||||||
NetworkGet = 1,
|
/// Return the local sequence numbers and the network sequence numbers with GetValue fanout parameters
|
||||||
NetworkSet = 2,
|
/// Provides an independent view of both the local sequence numbers and the network sequence numbers for nodes that
|
||||||
|
/// would be reached as if the local copy did not exist locally.
|
||||||
|
/// Useful for determining if the current local copy should be updated from the network.
|
||||||
|
SyncGet = 1,
|
||||||
|
/// Return the local sequence numbers and the network sequence numbers with SetValue fanout parameters
|
||||||
|
/// Provides an independent view of both the local sequence numbers and the network sequence numbers for nodes that
|
||||||
|
/// would be reached as if the local copy did not exist locally.
|
||||||
|
/// Useful for determining if the unchanged local copy should be pushed to the network.
|
||||||
|
SyncSet = 2,
|
||||||
|
/// Return the local sequence numbers and the network sequence numbers with GetValue fanout parameters
|
||||||
|
/// Provides an view of both the local sequence numbers and the network sequence numbers for nodes that
|
||||||
|
/// would be reached as if a GetValue operation were being performed, including accepting newer values from the network.
|
||||||
|
/// Useful for determining which subkeys would change with a GetValue operation
|
||||||
|
UpdateGet = 3,
|
||||||
|
/// Return the local sequence numbers and the network sequence numbers with SetValue fanout parameters
|
||||||
|
/// Provides an view of both the local sequence numbers and the network sequence numbers for nodes that
|
||||||
|
/// would be reached as if a SetValue operation were being performed, including accepting newer values from the network.
|
||||||
|
/// This simulates a SetValue with the initial sequence number incremented by 1, like a real SetValue would when updating.
|
||||||
|
/// Useful for determine which subkeys would change on an SetValue operation
|
||||||
|
UpdateSet = 4,
|
||||||
}
|
}
|
||||||
impl Default for DHTReportScope {
|
impl Default for DHTReportScope {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
|
Loading…
Reference in New Issue
Block a user