mirror of
https://gitlab.com/veilid/veilid.git
synced 2025-09-19 04:25:02 -04:00
add better dht debugging
This commit is contained in:
parent
62aeec6faf
commit
291e3ef2fe
27 changed files with 817 additions and 95 deletions
|
@ -55,6 +55,7 @@ flume = { version = "^0", features = ["async"] }
|
|||
enumset = { version= "^1", features = ["serde"] }
|
||||
backtrace = { version = "^0" }
|
||||
stop-token = { version = "^0", default-features = false }
|
||||
num-traits = "0.2.15"
|
||||
|
||||
ed25519-dalek = { version = "^1", default_features = false, features = ["alloc", "u64_backend"] }
|
||||
x25519-dalek = { version = "^1", default_features = false, features = ["u64_backend"] }
|
||||
|
|
|
@ -77,18 +77,7 @@ where
|
|||
|
||||
macro_rules! byte_array_type {
|
||||
($name:ident, $size:expr, $encoded_size:expr) => {
|
||||
#[derive(
|
||||
Clone,
|
||||
Copy,
|
||||
Hash,
|
||||
Eq,
|
||||
PartialEq,
|
||||
PartialOrd,
|
||||
Ord,
|
||||
RkyvArchive,
|
||||
RkyvSerialize,
|
||||
RkyvDeserialize,
|
||||
)]
|
||||
#[derive(Clone, Copy, Hash, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
|
||||
#[archive_attr(repr(C), derive(CheckBytes, Hash, Eq, PartialEq, PartialOrd, Ord))]
|
||||
pub struct $name {
|
||||
pub bytes: [u8; $size],
|
||||
|
@ -125,6 +114,32 @@ macro_rules! byte_array_type {
|
|||
}
|
||||
}
|
||||
|
||||
impl PartialOrd for $name {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl Ord for $name {
|
||||
fn cmp(&self, other: &Self) -> core::cmp::Ordering {
|
||||
for n in 0..$size {
|
||||
let c = self.bytes[n].cmp(&other.bytes[n]);
|
||||
if c != core::cmp::Ordering::Equal {
|
||||
return c;
|
||||
}
|
||||
}
|
||||
core::cmp::Ordering::Equal
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for $name {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.bytes == other.bytes
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for $name {}
|
||||
|
||||
impl $name {
|
||||
pub fn new(bytes: [u8; $size]) -> Self {
|
||||
Self { bytes }
|
||||
|
|
|
@ -176,10 +176,10 @@ impl CryptoSystem for CryptoSystemVLD0 {
|
|||
}
|
||||
// Distance Metric
|
||||
fn distance(&self, key1: &PublicKey, key2: &PublicKey) -> CryptoKeyDistance {
|
||||
let mut bytes = [0u8; PUBLIC_KEY_LENGTH];
|
||||
let mut bytes = [0u8; CRYPTO_KEY_LENGTH];
|
||||
|
||||
for (n, byte) in bytes.iter_mut().enumerate() {
|
||||
*byte = key1.bytes[n] ^ key2.bytes[n];
|
||||
for n in 0..CRYPTO_KEY_LENGTH {
|
||||
bytes[n] = key1.bytes[n] ^ key2.bytes[n];
|
||||
}
|
||||
|
||||
CryptoKeyDistance::new(bytes)
|
||||
|
|
|
@ -99,8 +99,8 @@ impl RoutingTable {
|
|||
},
|
||||
);
|
||||
|
||||
// xxx test
|
||||
// Validate peers returned are, in fact, closer to the key than the node we sent this to
|
||||
// This same test is used on the other side so we vet things here
|
||||
let valid = match Self::verify_peers_closer(vcrypto2, own_node_id, key, &closest_nodes) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
|
@ -108,13 +108,16 @@ impl RoutingTable {
|
|||
}
|
||||
};
|
||||
if !valid {
|
||||
panic!("non-closer peers returned");
|
||||
error!(
|
||||
"non-closer peers returned: own_node_id={:#?} key={:#?} closest_nodes={:#?}",
|
||||
own_node_id, key, closest_nodes
|
||||
);
|
||||
}
|
||||
|
||||
NetworkResult::value(closest_nodes)
|
||||
}
|
||||
|
||||
/// Determine if set of peers is closer to key_near than key_far
|
||||
/// Determine if set of peers is closer to key_near than key_far is to key_near
|
||||
pub(crate) fn verify_peers_closer(
|
||||
vcrypto: CryptoSystemVersion,
|
||||
key_far: TypedKey,
|
||||
|
@ -128,14 +131,30 @@ impl RoutingTable {
|
|||
}
|
||||
|
||||
let mut closer = true;
|
||||
let d_far = vcrypto.distance(&key_far.value, &key_near.value);
|
||||
for peer in peers {
|
||||
let Some(key_peer) = peer.node_ids().get(kind) else {
|
||||
bail!("peers need to have a key with the same cryptosystem");
|
||||
};
|
||||
let d_near = vcrypto.distance(&key_near.value, &key_peer.value);
|
||||
let d_far = vcrypto.distance(&key_far.value, &key_peer.value);
|
||||
if d_far < d_near {
|
||||
let warning = format!(
|
||||
r#"peer: {}
|
||||
near (key): {}
|
||||
far (self): {}
|
||||
d_near: {}
|
||||
d_far: {}
|
||||
cmp: {:?}"#,
|
||||
key_peer.value,
|
||||
key_near.value,
|
||||
key_far.value,
|
||||
d_near,
|
||||
d_far,
|
||||
d_near.cmp(&d_far)
|
||||
);
|
||||
warn!("{}", warning);
|
||||
closer = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -198,12 +198,12 @@ impl RoutingTable {
|
|||
// Newly allocated routes
|
||||
let mut newly_allocated_routes = Vec::new();
|
||||
for _n in 0..routes_to_allocate {
|
||||
// Parameters here must be the default safety route spec
|
||||
// Parameters here must be the most inclusive safety route spec
|
||||
// These will be used by test_remote_route as well
|
||||
if let Some(k) = rss.allocate_route(
|
||||
&VALID_CRYPTO_KINDS,
|
||||
Stability::default(),
|
||||
Sequencing::default(),
|
||||
Sequencing::EnsureOrdered,
|
||||
default_route_hop_count,
|
||||
DirectionSet::all(),
|
||||
&[],
|
||||
|
|
|
@ -110,6 +110,28 @@ impl RPCMessageHeader {
|
|||
RPCMessageHeaderDetail::PrivateRouted(p) => p.direct.envelope.get_crypto_kind(),
|
||||
}
|
||||
}
|
||||
// pub fn direct_peer_noderef(&self) -> NodeRef {
|
||||
// match &self.detail {
|
||||
// RPCMessageHeaderDetail::Direct(d) => d.peer_noderef.clone(),
|
||||
// RPCMessageHeaderDetail::SafetyRouted(s) => s.direct.peer_noderef.clone(),
|
||||
// RPCMessageHeaderDetail::PrivateRouted(p) => p.direct.peer_noderef.clone(),
|
||||
// }
|
||||
// }
|
||||
pub fn direct_sender_node_id(&self) -> TypedKey {
|
||||
match &self.detail {
|
||||
RPCMessageHeaderDetail::Direct(d) => {
|
||||
TypedKey::new(d.envelope.get_crypto_kind(), d.envelope.get_sender_id())
|
||||
}
|
||||
RPCMessageHeaderDetail::SafetyRouted(s) => TypedKey::new(
|
||||
s.direct.envelope.get_crypto_kind(),
|
||||
s.direct.envelope.get_sender_id(),
|
||||
),
|
||||
RPCMessageHeaderDetail::PrivateRouted(p) => TypedKey::new(
|
||||
p.direct.envelope.get_crypto_kind(),
|
||||
p.direct.envelope.get_sender_id(),
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
|
|
|
@ -50,13 +50,13 @@ impl RPCProcessor {
|
|||
};
|
||||
|
||||
let debug_string = format!(
|
||||
"GetValue(key={} subkey={} last_descriptor={}) => {}",
|
||||
"OUT ==> GetValueQ({} #{}{}) => {}",
|
||||
key,
|
||||
subkey,
|
||||
if last_descriptor.is_some() {
|
||||
"Some"
|
||||
" +lastdesc"
|
||||
} else {
|
||||
"None"
|
||||
""
|
||||
},
|
||||
dest
|
||||
);
|
||||
|
@ -74,6 +74,8 @@ impl RPCProcessor {
|
|||
vcrypto: vcrypto.clone(),
|
||||
});
|
||||
|
||||
log_rpc!(debug "{}", debug_string);
|
||||
|
||||
let waitable_reply = network_result_try!(
|
||||
self.question(dest, question, Some(question_context))
|
||||
.await?
|
||||
|
@ -97,6 +99,28 @@ impl RPCProcessor {
|
|||
|
||||
let (value, peers, descriptor) = get_value_a.destructure();
|
||||
|
||||
let debug_string_value = value.as_ref().map(|v| {
|
||||
format!(" len={} writer={}",
|
||||
v.value_data().data().len(),
|
||||
v.value_data().writer(),
|
||||
)
|
||||
}).unwrap_or_default();
|
||||
|
||||
let debug_string_answer = format!(
|
||||
"OUT <== GetValueA({} #{}{}{} peers={})",
|
||||
key,
|
||||
subkey,
|
||||
debug_string_value,
|
||||
if descriptor.is_some() {
|
||||
" +desc"
|
||||
} else {
|
||||
""
|
||||
},
|
||||
peers.len(),
|
||||
);
|
||||
|
||||
log_rpc!(debug "{}", debug_string_answer);
|
||||
|
||||
// Validate peers returned are, in fact, closer to the key than the node we sent this to
|
||||
let valid = match RoutingTable::verify_peers_closer(vcrypto, target_node_id, key, &peers) {
|
||||
Ok(v) => v,
|
||||
|
@ -164,13 +188,50 @@ impl RPCProcessor {
|
|||
let routing_table = self.routing_table();
|
||||
let closer_to_key_peers = network_result_try!(routing_table.find_peers_closer_to_key(key));
|
||||
|
||||
let debug_string = format!(
|
||||
"IN <=== GetValueQ({} #{}{}) <== {}",
|
||||
key,
|
||||
subkey,
|
||||
if want_descriptor {
|
||||
" +wantdesc"
|
||||
} else {
|
||||
""
|
||||
},
|
||||
msg.header.direct_sender_node_id()
|
||||
);
|
||||
|
||||
log_rpc!(debug "{}", debug_string);
|
||||
|
||||
// See if we have this record ourselves
|
||||
let storage_manager = self.storage_manager();
|
||||
let subkey_result = network_result_try!(storage_manager
|
||||
.inbound_get_value(key, subkey, want_descriptor)
|
||||
.await
|
||||
.map_err(RPCError::internal)?);
|
||||
|
||||
let debug_string_value = subkey_result.value.as_ref().map(|v| {
|
||||
format!(" len={} writer={}",
|
||||
v.value_data().data().len(),
|
||||
v.value_data().writer(),
|
||||
)
|
||||
}).unwrap_or_default();
|
||||
|
||||
let debug_string_answer = format!(
|
||||
"IN ===> GetValueA({} #{}{}{} peers={}) ==> {}",
|
||||
key,
|
||||
subkey,
|
||||
debug_string_value,
|
||||
if subkey_result.descriptor.is_some() {
|
||||
" +desc"
|
||||
} else {
|
||||
""
|
||||
},
|
||||
closer_to_key_peers.len(),
|
||||
msg.header.direct_sender_node_id()
|
||||
);
|
||||
|
||||
log_rpc!(debug "{}", debug_string_answer);
|
||||
|
||||
// Make GetValue answer
|
||||
let get_value_a = RPCOperationGetValueA::new(
|
||||
subkey_result.value,
|
||||
|
|
|
@ -54,12 +54,16 @@ impl RPCProcessor {
|
|||
};
|
||||
|
||||
let debug_string = format!(
|
||||
"SetValue(key={} subkey={} value_data(writer)={} value_data(len)={} send_descriptor={}) => {}",
|
||||
"OUT ==> SetValueQ({} #{} len={} writer={}{}) => {}",
|
||||
key,
|
||||
subkey,
|
||||
value.value_data().writer(),
|
||||
value.value_data().data().len(),
|
||||
send_descriptor,
|
||||
value.value_data().writer(),
|
||||
if send_descriptor {
|
||||
" +senddesc"
|
||||
} else {
|
||||
""
|
||||
},
|
||||
dest
|
||||
);
|
||||
|
||||
|
@ -84,11 +88,14 @@ impl RPCProcessor {
|
|||
vcrypto: vcrypto.clone(),
|
||||
});
|
||||
|
||||
log_rpc!(debug "{}", debug_string);
|
||||
|
||||
let waitable_reply = network_result_try!(
|
||||
self.question(dest, question, Some(question_context))
|
||||
.await?
|
||||
);
|
||||
|
||||
|
||||
// Wait for reply
|
||||
let (msg, latency) = match self.wait_for_reply(waitable_reply, debug_string).await? {
|
||||
TimeoutOr::Timeout => return Ok(NetworkResult::Timeout),
|
||||
|
@ -106,6 +113,28 @@ impl RPCProcessor {
|
|||
};
|
||||
|
||||
let (set, value, peers) = set_value_a.destructure();
|
||||
|
||||
let debug_string_value = value.as_ref().map(|v| {
|
||||
format!(" len={} writer={}",
|
||||
v.value_data().data().len(),
|
||||
v.value_data().writer(),
|
||||
)
|
||||
}).unwrap_or_default();
|
||||
|
||||
let debug_string_answer = format!(
|
||||
"OUT <== SetValueA({} #{}{}{} peers={})",
|
||||
key,
|
||||
subkey,
|
||||
if set {
|
||||
" +set"
|
||||
} else {
|
||||
""
|
||||
},
|
||||
debug_string_value,
|
||||
peers.len(),
|
||||
);
|
||||
|
||||
log_rpc!(debug "{}", debug_string_answer);
|
||||
|
||||
// Validate peers returned are, in fact, closer to the key than the node we sent this to
|
||||
let valid = match RoutingTable::verify_peers_closer(vcrypto, target_node_id, key, &peers) {
|
||||
|
@ -172,6 +201,22 @@ impl RPCProcessor {
|
|||
let routing_table = self.routing_table();
|
||||
let closer_to_key_peers = network_result_try!(routing_table.find_peers_closer_to_key(key));
|
||||
|
||||
let debug_string = format!(
|
||||
"IN <=== SetValueQ({} #{} len={} writer={}{}) <== {}",
|
||||
key,
|
||||
subkey,
|
||||
value.value_data().data().len(),
|
||||
value.value_data().writer(),
|
||||
if descriptor.is_some() {
|
||||
" +desc"
|
||||
} else {
|
||||
""
|
||||
},
|
||||
msg.header.direct_sender_node_id()
|
||||
);
|
||||
|
||||
log_rpc!(debug "{}", debug_string);
|
||||
|
||||
// If there are less than 'set_value_count' peers that are closer, then store here too
|
||||
let set_value_count = {
|
||||
let c = self.config.get();
|
||||
|
@ -193,6 +238,29 @@ impl RPCProcessor {
|
|||
(true, new_value)
|
||||
};
|
||||
|
||||
let debug_string_value = new_value.as_ref().map(|v| {
|
||||
format!(" len={} writer={}",
|
||||
v.value_data().data().len(),
|
||||
v.value_data().writer(),
|
||||
)
|
||||
}).unwrap_or_default();
|
||||
|
||||
let debug_string_answer = format!(
|
||||
"IN ===> SetValueA({} #{}{}{} peers={}) ==> {}",
|
||||
key,
|
||||
subkey,
|
||||
if set {
|
||||
" +set"
|
||||
} else {
|
||||
""
|
||||
},
|
||||
debug_string_value,
|
||||
closer_to_key_peers.len(),
|
||||
msg.header.direct_sender_node_id()
|
||||
);
|
||||
|
||||
log_rpc!(debug "{}", debug_string_answer);
|
||||
|
||||
// Make SetValue answer
|
||||
let set_value_a = RPCOperationSetValueA::new(set, new_value, closer_to_key_peers)?;
|
||||
|
||||
|
|
|
@ -15,4 +15,24 @@ impl StorageManager {
|
|||
};
|
||||
remote_record_store.debug_records()
|
||||
}
|
||||
pub(crate) async fn purge_local_records(&self, reclaim: Option<usize>) -> String {
|
||||
let mut inner = self.inner.lock().await;
|
||||
let Some(local_record_store) = &mut inner.local_record_store else {
|
||||
return "not initialized".to_owned();
|
||||
};
|
||||
let reclaimed = local_record_store
|
||||
.reclaim_space(reclaim.unwrap_or(usize::MAX))
|
||||
.await;
|
||||
return format!("Local records purged: reclaimed {} bytes", reclaimed);
|
||||
}
|
||||
pub(crate) async fn purge_remote_records(&self, reclaim: Option<usize>) -> String {
|
||||
let mut inner = self.inner.lock().await;
|
||||
let Some(remote_record_store) = &mut inner.remote_record_store else {
|
||||
return "not initialized".to_owned();
|
||||
};
|
||||
let reclaimed = remote_record_store
|
||||
.reclaim_space(reclaim.unwrap_or(usize::MAX))
|
||||
.await;
|
||||
return format!("Remote records purged: reclaimed {} bytes", reclaimed);
|
||||
}
|
||||
}
|
||||
|
|
111
veilid-core/src/storage_manager/limited_size.rs
Normal file
111
veilid-core/src/storage_manager/limited_size.rs
Normal file
|
@ -0,0 +1,111 @@
|
|||
use super::*;
|
||||
use num_traits::{PrimInt, Unsigned};
|
||||
|
||||
#[derive(ThisError, Debug, Clone, Copy, Eq, PartialEq)]
|
||||
pub enum LimitError {
|
||||
#[error("limit overflow")]
|
||||
OverLimit,
|
||||
}
|
||||
|
||||
#[derive(ThisError, Debug, Clone, Copy, Eq, PartialEq)]
|
||||
pub enum NumericError {
|
||||
#[error("numeric overflow")]
|
||||
Overflow,
|
||||
#[error("numeric underflow")]
|
||||
Underflow,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct LimitedSize<T: PrimInt + Unsigned + fmt::Display + fmt::Debug> {
|
||||
description: String,
|
||||
value: T,
|
||||
limit: Option<T>,
|
||||
uncommitted_value: Option<T>,
|
||||
}
|
||||
|
||||
impl<T: PrimInt + Unsigned + fmt::Display + fmt::Debug> LimitedSize<T> {
|
||||
pub fn new(description: &str, value: T, limit: Option<T>) -> Self {
|
||||
Self {
|
||||
description: description.to_owned(),
|
||||
value,
|
||||
limit,
|
||||
uncommitted_value: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn current_value(&self) -> T {
|
||||
self.uncommitted_value.unwrap_or(self.value)
|
||||
}
|
||||
|
||||
pub fn set(&mut self, new_value: T) {
|
||||
self.uncommitted_value = Some(new_value);
|
||||
}
|
||||
|
||||
pub fn add(&mut self, v: T) -> Result<T, NumericError> {
|
||||
let current_value = self.current_value();
|
||||
let max_v = T::max_value() - current_value;
|
||||
if v > max_v {
|
||||
return Err(NumericError::Overflow);
|
||||
}
|
||||
let new_value = current_value + v;
|
||||
self.uncommitted_value = Some(new_value);
|
||||
Ok(new_value)
|
||||
}
|
||||
pub fn sub(&mut self, v: T) -> Result<T, NumericError> {
|
||||
let current_value = self.current_value();
|
||||
let max_v = current_value - T::min_value();
|
||||
if v > max_v {
|
||||
return Err(NumericError::Underflow);
|
||||
}
|
||||
let new_value = current_value - v;
|
||||
self.uncommitted_value = Some(new_value);
|
||||
Ok(new_value)
|
||||
}
|
||||
pub fn saturating_sub(&mut self, mut v: T) -> T {
|
||||
let current_value = self.current_value();
|
||||
let max_v = current_value - T::min_value();
|
||||
if v > max_v {
|
||||
log_stor!(debug "Numeric underflow ({})", self.description);
|
||||
v = max_v;
|
||||
}
|
||||
let new_value = current_value - v;
|
||||
self.uncommitted_value = Some(new_value);
|
||||
new_value
|
||||
}
|
||||
|
||||
pub fn check_limit(&self) -> bool {
|
||||
if let Some(uncommitted_value) = self.uncommitted_value {
|
||||
if let Some(limit) = self.limit {
|
||||
if uncommitted_value > limit {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
pub fn commit(&mut self) -> Result<T, LimitError> {
|
||||
if let Some(uncommitted_value) = self.uncommitted_value {
|
||||
if let Some(limit) = self.limit {
|
||||
if uncommitted_value > limit {
|
||||
log_stor!(debug "Commit over limit failed ({}): {} > {}", self.description, uncommitted_value, limit);
|
||||
return Err(LimitError::OverLimit);
|
||||
}
|
||||
}
|
||||
log_stor!(debug "Commit ({}): {} => {}", self.description, self.value, uncommitted_value);
|
||||
self.value = uncommitted_value;
|
||||
}
|
||||
Ok(self.value)
|
||||
}
|
||||
|
||||
pub fn rollback(&mut self) -> T {
|
||||
if let Some(uv) = self.uncommitted_value {
|
||||
log_stor!(debug "Rollback ({}): {} (drop {})", self.description, self.value, uv);
|
||||
}
|
||||
return self.value;
|
||||
}
|
||||
|
||||
pub fn get(&self) -> T {
|
||||
return self.value;
|
||||
}
|
||||
}
|
|
@ -1,6 +1,7 @@
|
|||
mod debug;
|
||||
mod get_value;
|
||||
mod keys;
|
||||
mod limited_size;
|
||||
mod record_store;
|
||||
mod record_store_limits;
|
||||
mod set_value;
|
||||
|
@ -9,6 +10,7 @@ mod tasks;
|
|||
mod types;
|
||||
|
||||
use keys::*;
|
||||
use limited_size::*;
|
||||
use record_store::*;
|
||||
use record_store_limits::*;
|
||||
use storage_manager_inner::*;
|
||||
|
|
|
@ -21,8 +21,8 @@ where
|
|||
subkey_table: Option<TableDB>,
|
||||
record_index: LruCache<RecordTableKey, Record<D>>,
|
||||
subkey_cache: LruCache<SubkeyTableKey, RecordData>,
|
||||
subkey_cache_total_size: usize,
|
||||
total_storage_space: usize,
|
||||
subkey_cache_total_size: LimitedSize<usize>,
|
||||
total_storage_space: LimitedSize<u64>,
|
||||
|
||||
dead_records: Vec<(RecordTableKey, Record<D>)>,
|
||||
changed_records: HashSet<RecordTableKey>,
|
||||
|
@ -47,6 +47,13 @@ where
|
|||
{
|
||||
pub fn new(table_store: TableStore, name: &str, limits: RecordStoreLimits) -> Self {
|
||||
let subkey_cache_size = limits.subkey_cache_size as usize;
|
||||
let limit_subkey_cache_total_size = limits
|
||||
.max_subkey_cache_memory_mb
|
||||
.map(|mb| mb * 1_048_576usize);
|
||||
let limit_max_storage_space = limits
|
||||
.max_storage_space_mb
|
||||
.map(|mb| mb as u64 * 1_048_576u64);
|
||||
|
||||
Self {
|
||||
table_store,
|
||||
name: name.to_owned(),
|
||||
|
@ -55,8 +62,16 @@ where
|
|||
subkey_table: None,
|
||||
record_index: LruCache::new(limits.max_records.unwrap_or(usize::MAX)),
|
||||
subkey_cache: LruCache::new(subkey_cache_size),
|
||||
subkey_cache_total_size: 0,
|
||||
total_storage_space: 0,
|
||||
subkey_cache_total_size: LimitedSize::new(
|
||||
"subkey_cache_total_size",
|
||||
0,
|
||||
limit_subkey_cache_total_size,
|
||||
),
|
||||
total_storage_space: LimitedSize::new(
|
||||
"total_storage_space",
|
||||
0,
|
||||
limit_max_storage_space,
|
||||
),
|
||||
dead_records: Vec::new(),
|
||||
changed_records: HashSet::new(),
|
||||
purge_dead_records_mutex: Arc::new(AsyncMutex::new(())),
|
||||
|
@ -89,8 +104,17 @@ where
|
|||
let mut dead_records = Vec::new();
|
||||
for ri in record_index_saved {
|
||||
// total the storage space
|
||||
self.total_storage_space += mem::size_of::<RecordTableKey>();
|
||||
self.total_storage_space += ri.1.total_size();
|
||||
self.total_storage_space
|
||||
.add(mem::size_of::<RecordTableKey>() as u64)
|
||||
.unwrap();
|
||||
self.total_storage_space
|
||||
.add(ri.1.total_size() as u64)
|
||||
.unwrap();
|
||||
if let Err(_) = self.total_storage_space.commit() {
|
||||
// If we overflow the limit, kill off the record
|
||||
dead_records.push((ri.0, ri.1));
|
||||
continue;
|
||||
}
|
||||
|
||||
// add to index and ensure we deduplicate in the case of an error
|
||||
if let Some(v) = self.record_index.insert(ri.0, ri.1, |k, v| {
|
||||
|
@ -130,24 +154,32 @@ where
|
|||
// Old data
|
||||
dead_size += old_record_data.total_size();
|
||||
}
|
||||
self.subkey_cache_total_size -= dead_size;
|
||||
self.subkey_cache_total_size += record_data_total_size;
|
||||
self.subkey_cache_total_size.sub(dead_size).unwrap();
|
||||
self.subkey_cache_total_size
|
||||
.add(record_data_total_size)
|
||||
.unwrap();
|
||||
|
||||
// Purge over size limit
|
||||
if let Some(max_subkey_cache_memory_mb) = self.limits.max_subkey_cache_memory_mb {
|
||||
while self.subkey_cache_total_size > (max_subkey_cache_memory_mb * 1_048_576usize) {
|
||||
if let Some((_, v)) = self.subkey_cache.remove_lru() {
|
||||
self.subkey_cache_total_size -= v.total_size();
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
while self.subkey_cache_total_size.commit().is_err() {
|
||||
if let Some((_, v)) = self.subkey_cache.remove_lru() {
|
||||
self.subkey_cache_total_size.saturating_sub(v.total_size());
|
||||
} else {
|
||||
self.subkey_cache_total_size.rollback();
|
||||
|
||||
log_stor!(error "subkey cache should not be empty, has {} bytes unaccounted for",self.subkey_cache_total_size.get());
|
||||
|
||||
self.subkey_cache_total_size.set(0);
|
||||
self.subkey_cache_total_size.commit().unwrap();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn remove_from_subkey_cache(&mut self, key: SubkeyTableKey) {
|
||||
if let Some(dead_record_data) = self.subkey_cache.remove(&key) {
|
||||
self.subkey_cache_total_size -= dead_record_data.total_size();
|
||||
self.subkey_cache_total_size
|
||||
.saturating_sub(dead_record_data.total_size());
|
||||
self.subkey_cache_total_size.commit().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -206,8 +238,11 @@ where
|
|||
}
|
||||
|
||||
// Remove from total size
|
||||
self.total_storage_space -= mem::size_of::<RecordTableKey>();
|
||||
self.total_storage_space -= v.total_size();
|
||||
self.total_storage_space
|
||||
.saturating_sub(mem::size_of::<RecordTableKey>() as u64);
|
||||
self.total_storage_space
|
||||
.saturating_sub(v.total_size() as u64);
|
||||
self.total_storage_space.commit().unwrap();
|
||||
}
|
||||
if let Err(e) = rt_xact.commit().await {
|
||||
log_stor!(error "failed to commit record table transaction: {}", e);
|
||||
|
@ -258,12 +293,12 @@ where
|
|||
};
|
||||
|
||||
// If over size limit, dont create record
|
||||
let new_total_storage_space =
|
||||
self.total_storage_space + mem::size_of::<RecordTableKey>() + record.total_size();
|
||||
if let Some(max_storage_space_mb) = &self.limits.max_storage_space_mb {
|
||||
if new_total_storage_space > (max_storage_space_mb * 1_048_576usize) {
|
||||
apibail_try_again!();
|
||||
}
|
||||
self.total_storage_space
|
||||
.add((mem::size_of::<RecordTableKey>() + record.total_size()) as u64)
|
||||
.unwrap();
|
||||
if !self.total_storage_space.check_limit() {
|
||||
self.total_storage_space.rollback();
|
||||
apibail_try_again!();
|
||||
}
|
||||
|
||||
// Save to record table
|
||||
|
@ -286,7 +321,7 @@ where
|
|||
}
|
||||
|
||||
// Update storage space
|
||||
self.total_storage_space = new_total_storage_space;
|
||||
self.total_storage_space.commit().unwrap();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -482,12 +517,14 @@ where
|
|||
}
|
||||
|
||||
// Check new total storage space
|
||||
let new_total_storage_space =
|
||||
self.total_storage_space + new_record_data_size - prior_record_data_size;
|
||||
if let Some(max_storage_space_mb) = self.limits.max_storage_space_mb {
|
||||
if new_total_storage_space > (max_storage_space_mb * 1_048_576usize) {
|
||||
apibail_try_again!();
|
||||
}
|
||||
self.total_storage_space
|
||||
.sub(prior_record_data_size as u64)
|
||||
.unwrap();
|
||||
self.total_storage_space
|
||||
.add(new_record_data_size as u64)
|
||||
.unwrap();
|
||||
if !self.total_storage_space.check_limit() {
|
||||
apibail_try_again!();
|
||||
}
|
||||
|
||||
// Write subkey
|
||||
|
@ -506,6 +543,9 @@ where
|
|||
})
|
||||
.expect("record should still be here");
|
||||
|
||||
// Update storage space
|
||||
self.total_storage_space.commit().unwrap();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -513,16 +553,19 @@ where
|
|||
/// This will force a garbage collection of the space immediately
|
||||
/// If zero is passed in here, a garbage collection will be performed of dead records
|
||||
/// without removing any live records
|
||||
pub async fn reclaim_space(&mut self, space: usize) {
|
||||
pub async fn reclaim_space(&mut self, space: usize) -> usize {
|
||||
let mut reclaimed = 0usize;
|
||||
while reclaimed < space {
|
||||
if let Some((k, v)) = self.record_index.remove_lru() {
|
||||
reclaimed += mem::size_of::<RecordTableKey>();
|
||||
reclaimed += v.total_size();
|
||||
self.add_dead_record(k, v);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
self.purge_dead_records(false).await;
|
||||
reclaimed
|
||||
}
|
||||
|
||||
pub(super) fn debug_records(&self) -> String {
|
||||
|
@ -532,9 +575,9 @@ where
|
|||
out += "Record Index:\n";
|
||||
for (rik, rec) in &self.record_index {
|
||||
out += &format!(
|
||||
" {} @ {} len={} subkeys={}\n",
|
||||
" {} age={} len={} subkeys={}\n",
|
||||
rik.key.to_string(),
|
||||
rec.last_touched().as_u64(),
|
||||
debug_duration(get_timestamp() - rec.last_touched().as_u64()),
|
||||
rec.record_data_size(),
|
||||
rec.stored_subkeys(),
|
||||
);
|
||||
|
@ -542,9 +585,9 @@ where
|
|||
out += &format!("Subkey Cache Count: {}\n", self.subkey_cache.len());
|
||||
out += &format!(
|
||||
"Subkey Cache Total Size: {}\n",
|
||||
self.subkey_cache_total_size
|
||||
self.subkey_cache_total_size.get()
|
||||
);
|
||||
out += &format!("Total Storage Space: {}\n", self.total_storage_space);
|
||||
out += &format!("Total Storage Space: {}\n", self.total_storage_space.get());
|
||||
out += &format!("Dead Records: {}\n", self.dead_records.len());
|
||||
for dr in &self.dead_records {
|
||||
out += &format!(" {}\n", dr.0.key.to_string());
|
||||
|
|
|
@ -901,6 +901,20 @@ impl VeilidAPI {
|
|||
return Ok(out);
|
||||
}
|
||||
|
||||
async fn debug_record_purge(&self, args: Vec<String>) -> VeilidAPIResult<String> {
|
||||
// <local|remote>
|
||||
let storage_manager = self.storage_manager()?;
|
||||
|
||||
let scope = get_debug_argument_at(&args, 1, "debug_record_purge", "scope", get_string)?;
|
||||
let bytes = get_debug_argument_at(&args, 2, "debug_record_purge", "bytes", get_number).ok();
|
||||
let out = match scope.as_str() {
|
||||
"local" => storage_manager.purge_local_records(bytes).await,
|
||||
"remote" => storage_manager.purge_remote_records(bytes).await,
|
||||
_ => "Invalid scope\n".to_owned(),
|
||||
};
|
||||
return Ok(out);
|
||||
}
|
||||
|
||||
async fn debug_record(&self, args: String) -> VeilidAPIResult<String> {
|
||||
let args: Vec<String> = args.split_whitespace().map(|s| s.to_owned()).collect();
|
||||
|
||||
|
@ -908,6 +922,8 @@ impl VeilidAPI {
|
|||
|
||||
if command == "list" {
|
||||
self.debug_record_list(args).await
|
||||
} else if command == "purge" {
|
||||
self.debug_record_purge(args).await
|
||||
} else {
|
||||
Ok(">>> Unknown command\n".to_owned())
|
||||
}
|
||||
|
@ -936,7 +952,8 @@ impl VeilidAPI {
|
|||
list
|
||||
import <blob>
|
||||
test <route>
|
||||
record list <local|remote>
|
||||
record list <local|remote>
|
||||
purge <local|remote> [bytes]
|
||||
|
||||
<destination> is:
|
||||
* direct: <node>[+<safety>][<modifiers>]
|
||||
|
|
|
@ -259,11 +259,11 @@ impl JsonRequestProcessor {
|
|||
.add_routing_context(routing_context.clone().with_sequencing(sequencing)),
|
||||
}
|
||||
}
|
||||
RoutingContextRequestOp::AppCall { target, request } => {
|
||||
RoutingContextRequestOp::AppCall { target, message } => {
|
||||
RoutingContextResponseOp::AppCall {
|
||||
result: to_json_api_result_with_vec_u8(
|
||||
self.parse_target(target)
|
||||
.then(|tr| async { routing_context.app_call(tr?, request).await })
|
||||
.then(|tr| async { routing_context.app_call(tr?, message).await })
|
||||
.await,
|
||||
),
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ pub enum RoutingContextRequestOp {
|
|||
target: String,
|
||||
#[serde(with = "json_as_base64")]
|
||||
#[schemars(with = "String")]
|
||||
request: Vec<u8>,
|
||||
message: Vec<u8>,
|
||||
},
|
||||
AppMessage {
|
||||
target: String,
|
||||
|
|
|
@ -137,14 +137,14 @@ impl RoutingContext {
|
|||
////////////////////////////////////////////////////////////////
|
||||
// App-level Messaging
|
||||
|
||||
pub async fn app_call(&self, target: Target, request: Vec<u8>) -> VeilidAPIResult<Vec<u8>> {
|
||||
pub async fn app_call(&self, target: Target, message: Vec<u8>) -> VeilidAPIResult<Vec<u8>> {
|
||||
let rpc_processor = self.api.rpc_processor()?;
|
||||
|
||||
// Get destination
|
||||
let dest = self.get_destination(target).await?;
|
||||
|
||||
// Send app message
|
||||
let answer = match rpc_processor.rpc_call_app_call(dest, request).await {
|
||||
let answer = match rpc_processor.rpc_call_app_call(dest, message).await {
|
||||
Ok(NetworkResult::Value(v)) => v,
|
||||
Ok(NetworkResult::Timeout) => apibail_timeout!(),
|
||||
Ok(NetworkResult::ServiceUnavailable(e)) => {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue