mirror of
https://gitlab.com/veilid/veilid.git
synced 2025-04-20 07:46:11 -04:00
Merge branch 'main' into convert_cicd_to_python
This commit is contained in:
commit
09f7210979
460
Cargo.lock
generated
460
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -27,12 +27,12 @@ rt-async-std = [
|
||||
rt-tokio = ["tokio", "tokio-util", "veilid-tools/rt-tokio", "cursive/rt-tokio"]
|
||||
|
||||
[dependencies]
|
||||
async-std = { version = "1.12.0", features = [
|
||||
async-std = { version = "1.13.0", features = [
|
||||
"unstable",
|
||||
"attributes",
|
||||
], optional = true }
|
||||
tokio = { version = "1.38.1", features = ["full", "tracing"], optional = true }
|
||||
tokio-util = { version = "0.7.11", features = ["compat"], optional = true }
|
||||
tokio = { version = "1.43.0", features = ["full", "tracing"], optional = true }
|
||||
tokio-util = { version = "0.7.13", features = ["compat"], optional = true }
|
||||
async-tungstenite = { version = "^0.23" }
|
||||
cursive = { git = "https://gitlab.com/veilid/cursive.git", default-features = false, features = [
|
||||
"crossterm",
|
||||
@ -44,7 +44,7 @@ cursive_buffered_backend = { git = "https://gitlab.com/veilid/cursive-buffered-b
|
||||
# cursive-multiplex = "0.6.0"
|
||||
# cursive_tree_view = "0.6.0"
|
||||
cursive_table_view = { git = "https://gitlab.com/veilid/cursive-table-view.git" }
|
||||
arboard = { version = "3.4.0", default-features = false }
|
||||
arboard = { version = "3.4.1", default-features = false }
|
||||
# cursive-tabs = "0.5.0"
|
||||
clap = { version = "4", features = ["derive"] }
|
||||
directories = "^5"
|
||||
@ -68,12 +68,12 @@ flume = { version = "^0", features = ["async"] }
|
||||
data-encoding = { version = "^2" }
|
||||
indent = { version = "0.1.1" }
|
||||
|
||||
chrono = "0.4.38"
|
||||
chrono = "0.4.40"
|
||||
owning_ref = "0.4.1"
|
||||
unicode-width = "0.1.13"
|
||||
unicode-width = "0.1.14"
|
||||
lru = "0.10.1"
|
||||
rustyline-async = "0.4.2"
|
||||
console = "0.15.8"
|
||||
rustyline-async = "0.4.5"
|
||||
console = "0.15.11"
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "^2"
|
||||
|
@ -71,8 +71,8 @@ veilid-tools = { version = "0.4.3", path = "../veilid-tools", features = [
|
||||
"tracing",
|
||||
], default-features = false }
|
||||
paste = "1.0.15"
|
||||
once_cell = "1.19.0"
|
||||
backtrace = "0.3.71"
|
||||
once_cell = "1.20.3"
|
||||
backtrace = "^0.3.71"
|
||||
num-traits = "0.2.19"
|
||||
shell-words = "1.1.0"
|
||||
static_assertions = "1.1.0"
|
||||
@ -82,14 +82,14 @@ lazy_static = "1.5.0"
|
||||
directories = "5.0.1"
|
||||
|
||||
# Logging
|
||||
tracing = { version = "0.1.40", features = ["log", "attributes"] }
|
||||
tracing-subscriber = "0.3.18"
|
||||
tracing-error = "0.2.0"
|
||||
tracing = { version = "0.1.41", features = ["log", "attributes"] }
|
||||
tracing-subscriber = "0.3.19"
|
||||
tracing-error = "0.2.1"
|
||||
eyre = "0.6.12"
|
||||
thiserror = "1.0.63"
|
||||
thiserror = "1.0.69"
|
||||
|
||||
# Data structures
|
||||
enumset = { version = "1.1.3", features = ["serde"] }
|
||||
enumset = { version = "1.1.5", features = ["serde"] }
|
||||
keyvaluedb = "0.1.2"
|
||||
range-set-blaze = "0.1.16"
|
||||
weak-table = "0.3.2"
|
||||
@ -98,10 +98,10 @@ hashlink = { package = "veilid-hashlink", version = "0.1.1", features = [
|
||||
] }
|
||||
|
||||
# System
|
||||
futures-util = { version = "0.3.30", default-features = false, features = [
|
||||
futures-util = { version = "0.3.31", default-features = false, features = [
|
||||
"alloc",
|
||||
] }
|
||||
flume = { version = "0.11.0", features = ["async"] }
|
||||
flume = { version = "0.11.1", features = ["async"] }
|
||||
parking_lot = "0.12.3"
|
||||
lock_api = "0.4.12"
|
||||
stop-token = { version = "0.7.0", default-features = false }
|
||||
@ -124,23 +124,23 @@ curve25519-dalek = { version = "4.1.3", default-features = false, features = [
|
||||
"zeroize",
|
||||
"precomputed-tables",
|
||||
] }
|
||||
blake3 = { version = "1.5.3" }
|
||||
blake3 = { version = "1.6.1" }
|
||||
chacha20poly1305 = "0.10.1"
|
||||
chacha20 = "0.9.1"
|
||||
argon2 = "0.5.3"
|
||||
|
||||
# Network
|
||||
async-std-resolver = { version = "0.24.1", optional = true }
|
||||
hickory-resolver = { version = "0.24.1", optional = true }
|
||||
async-std-resolver = { version = "0.24.4", optional = true }
|
||||
hickory-resolver = { version = "0.24.4", optional = true }
|
||||
|
||||
# Serialization
|
||||
capnp = { version = "0.19.6", default-features = false, features = ["alloc"] }
|
||||
serde = { version = "1.0.214", features = ["derive", "rc"] }
|
||||
serde_json = { version = "1.0.132" }
|
||||
capnp = { version = "0.19.8", default-features = false, features = ["alloc"] }
|
||||
serde = { version = "1.0.218", features = ["derive", "rc"] }
|
||||
serde_json = { version = "1.0.140" }
|
||||
serde-big-array = "0.5.1"
|
||||
json = "0.12.4"
|
||||
data-encoding = { version = "2.6.0" }
|
||||
schemars = "0.8.21"
|
||||
data-encoding = { version = "2.8.0" }
|
||||
schemars = "0.8.22"
|
||||
lz4_flex = { version = "0.11.3", default-features = false, features = [
|
||||
"safe-encode",
|
||||
"safe-decode",
|
||||
@ -155,18 +155,18 @@ sanitize-filename = "0.5.0"
|
||||
# Tools
|
||||
config = { version = "0.13.4", default-features = false, features = ["yaml"] }
|
||||
bugsalot = { package = "veilid-bugsalot", version = "0.2.0" }
|
||||
chrono = "0.4.38"
|
||||
libc = "0.2.155"
|
||||
chrono = "0.4.40"
|
||||
libc = "0.2.170"
|
||||
nix = "0.27.1"
|
||||
maxminddb = { version = "0.24.0", optional = true }
|
||||
|
||||
# System
|
||||
async-std = { version = "1.12.0", features = ["unstable"], optional = true }
|
||||
async-std = { version = "1.13.0", features = ["unstable"], optional = true }
|
||||
sysinfo = { version = "^0.30.13", default-features = false }
|
||||
tokio = { version = "1.38.1", features = ["full"], optional = true }
|
||||
tokio-util = { version = "0.7.11", features = ["compat"], optional = true }
|
||||
tokio-stream = { version = "0.1.15", features = ["net"], optional = true }
|
||||
futures-util = { version = "0.3.30", default-features = false, features = [
|
||||
tokio = { version = "1.43.0", features = ["full"], optional = true }
|
||||
tokio-util = { version = "0.7.13", features = ["compat"], optional = true }
|
||||
tokio-stream = { version = "0.1.17", features = ["net"], optional = true }
|
||||
futures-util = { version = "0.3.31", default-features = false, features = [
|
||||
"async-await",
|
||||
"sink",
|
||||
"std",
|
||||
@ -201,9 +201,9 @@ async_executors = { version = "0.7.0", default-features = false, features = [
|
||||
"bindgen",
|
||||
"timer",
|
||||
] }
|
||||
wasm-bindgen = "0.2.92"
|
||||
js-sys = "0.3.69"
|
||||
wasm-bindgen-futures = "0.4.42"
|
||||
wasm-bindgen = "0.2.100"
|
||||
js-sys = "0.3.77"
|
||||
wasm-bindgen-futures = "0.4.50"
|
||||
send_wrapper = { version = "0.6.0", features = ["futures"] }
|
||||
serde_bytes = { version = "0.11", default-features = false, features = [
|
||||
"alloc",
|
||||
@ -223,7 +223,7 @@ keyvaluedb-web = "0.1.2"
|
||||
|
||||
### Configuration for WASM32 'web-sys' crate
|
||||
[target.'cfg(all(target_arch = "wasm32", target_os = "unknown"))'.dependencies.web-sys]
|
||||
version = "0.3.69"
|
||||
version = "0.3.77"
|
||||
features = [
|
||||
'Document',
|
||||
'HtmlDocument',
|
||||
@ -263,23 +263,22 @@ tracing-oslog = { version = "0.1.2", optional = true }
|
||||
[target.'cfg(not(all(target_arch = "wasm32", target_os = "unknown")))'.dev-dependencies]
|
||||
simplelog = { version = "0.12.2", features = ["test"] }
|
||||
serial_test = "2.0.0"
|
||||
tracing-subscriber = { version = "0.3.18", features = ["env-filter"] }
|
||||
tracing-subscriber = { version = "0.3.19", features = ["env-filter"] }
|
||||
|
||||
[target.'cfg(all(target_arch = "wasm32", target_os = "unknown"))'.dev-dependencies]
|
||||
serial_test = { version = "2.0.0", default-features = false, features = [
|
||||
"async",
|
||||
] }
|
||||
wasm-bindgen-test = "0.3.42"
|
||||
wasm-bindgen-test = "0.3.50"
|
||||
console_error_panic_hook = "0.1.7"
|
||||
wee_alloc = "0.4.5"
|
||||
wasm-logger = "0.2.0"
|
||||
|
||||
### BUILD OPTIONS
|
||||
|
||||
[build-dependencies]
|
||||
capnpc = "0.19.0"
|
||||
glob = "0.3.1"
|
||||
filetime = "0.2.23"
|
||||
glob = "0.3.2"
|
||||
filetime = "0.2.25"
|
||||
sha2 = "0.10.8"
|
||||
hex = "0.4.3"
|
||||
reqwest = { version = "0.11", features = ["blocking"], optional = true }
|
||||
|
@ -50,6 +50,7 @@ mod logging;
|
||||
mod network_manager;
|
||||
mod routing_table;
|
||||
mod rpc_processor;
|
||||
mod stats_accounting;
|
||||
mod storage_manager;
|
||||
mod table_store;
|
||||
mod veilid_api;
|
||||
@ -64,6 +65,7 @@ pub use self::logging::{
|
||||
DEFAULT_LOG_FACILITIES_ENABLED_LIST, DEFAULT_LOG_FACILITIES_IGNORE_LIST,
|
||||
DURATION_LOG_FACILITIES, FLAME_LOG_FACILITIES_IGNORE_LIST, VEILID_LOG_KEY_FIELD,
|
||||
};
|
||||
pub(crate) use self::stats_accounting::*;
|
||||
pub use self::veilid_api::*;
|
||||
pub use self::veilid_config::*;
|
||||
pub use veilid_tools as tools;
|
||||
|
@ -289,3 +289,91 @@ macro_rules! veilid_log {
|
||||
$($k).+ = $($fields)*
|
||||
)};
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! network_result_value_or_log {
|
||||
($self:ident $r:expr => $f:expr) => {
|
||||
network_result_value_or_log!($self target: self::__VEILID_LOG_FACILITY, $r => [ "" ] $f )
|
||||
};
|
||||
($self:ident $r:expr => [ $d:expr ] $f:expr) => {
|
||||
network_result_value_or_log!($self target: self::__VEILID_LOG_FACILITY, $r => [ $d ] $f )
|
||||
};
|
||||
($self:ident target: $target:expr, $r:expr => $f:expr) => {
|
||||
network_result_value_or_log!($self target: $target, $r => [ "" ] $f )
|
||||
};
|
||||
($self:ident target: $target:expr, $r:expr => [ $d:expr ] $f:expr) => { {
|
||||
let __extra_message = if debug_target_enabled!("network_result") {
|
||||
$d.to_string()
|
||||
} else {
|
||||
"".to_string()
|
||||
};
|
||||
match $r {
|
||||
NetworkResult::Timeout => {
|
||||
veilid_log!($self debug target: $target,
|
||||
"{} at {}@{}:{} in {}{}",
|
||||
"Timeout",
|
||||
file!(),
|
||||
line!(),
|
||||
column!(),
|
||||
fn_name::uninstantiated!(),
|
||||
__extra_message
|
||||
);
|
||||
$f
|
||||
}
|
||||
NetworkResult::ServiceUnavailable(ref s) => {
|
||||
veilid_log!($self debug target: $target,
|
||||
"{}({}) at {}@{}:{} in {}{}",
|
||||
"ServiceUnavailable",
|
||||
s,
|
||||
file!(),
|
||||
line!(),
|
||||
column!(),
|
||||
fn_name::uninstantiated!(),
|
||||
__extra_message
|
||||
);
|
||||
$f
|
||||
}
|
||||
NetworkResult::NoConnection(ref e) => {
|
||||
veilid_log!($self debug target: $target,
|
||||
"{}({}) at {}@{}:{} in {}{}",
|
||||
"No connection",
|
||||
e.to_string(),
|
||||
file!(),
|
||||
line!(),
|
||||
column!(),
|
||||
fn_name::uninstantiated!(),
|
||||
__extra_message
|
||||
);
|
||||
$f
|
||||
}
|
||||
NetworkResult::AlreadyExists(ref e) => {
|
||||
veilid_log!($self debug target: $target,
|
||||
"{}({}) at {}@{}:{} in {}{}",
|
||||
"Already exists",
|
||||
e.to_string(),
|
||||
file!(),
|
||||
line!(),
|
||||
column!(),
|
||||
fn_name::uninstantiated!(),
|
||||
__extra_message
|
||||
);
|
||||
$f
|
||||
}
|
||||
NetworkResult::InvalidMessage(ref s) => {
|
||||
veilid_log!($self debug target: $target,
|
||||
"{}({}) at {}@{}:{} in {}{}",
|
||||
"Invalid message",
|
||||
s,
|
||||
file!(),
|
||||
line!(),
|
||||
column!(),
|
||||
fn_name::uninstantiated!(),
|
||||
__extra_message
|
||||
);
|
||||
$f
|
||||
}
|
||||
NetworkResult::Value(v) => v,
|
||||
}
|
||||
} };
|
||||
|
||||
}
|
||||
|
@ -8,7 +8,7 @@ impl_veilid_log_facility!("net");
|
||||
|
||||
const PROTECTED_CONNECTION_DROP_SPAN: TimestampDuration = TimestampDuration::new_secs(10);
|
||||
const PROTECTED_CONNECTION_DROP_COUNT: usize = 3;
|
||||
const NEW_CONNECTION_RETRY_COUNT: usize = 1;
|
||||
const NEW_CONNECTION_RETRY_COUNT: usize = 0;
|
||||
const NEW_CONNECTION_RETRY_DELAY_MS: u32 = 500;
|
||||
|
||||
///////////////////////////////////////////////////////////
|
||||
@ -415,7 +415,19 @@ impl ConnectionManager {
|
||||
let best_port = preferred_local_address.map(|pla| pla.port());
|
||||
|
||||
// Async lock on the remote address for atomicity per remote
|
||||
let _lock_guard = self.arc.address_lock_table.lock_tag(remote_addr).await;
|
||||
// Use the initial connection timeout here because multiple calls to get_or_create_connection
|
||||
// can be performed simultaneously and we want to wait for the first one to succeed or not
|
||||
let Ok(_lock_guard) = timeout(
|
||||
self.arc.connection_initial_timeout_ms,
|
||||
self.arc.address_lock_table.lock_tag(remote_addr),
|
||||
)
|
||||
.await
|
||||
else {
|
||||
veilid_log!(self debug "== get_or_create_connection: connection busy, not connecting to dial_info={:?}", dial_info);
|
||||
return Ok(NetworkResult::no_connection_other(
|
||||
"connection endpoint busy",
|
||||
));
|
||||
};
|
||||
|
||||
veilid_log!(self trace "== get_or_create_connection dial_info={:?}", dial_info);
|
||||
|
||||
@ -449,7 +461,8 @@ impl ConnectionManager {
|
||||
let mut retry_count = NEW_CONNECTION_RETRY_COUNT;
|
||||
let network_manager = self.network_manager();
|
||||
|
||||
let prot_conn = network_result_try!(loop {
|
||||
let nres = loop {
|
||||
veilid_log!(self trace "== get_or_create_connection connect({}) {:?} -> {}", retry_count, preferred_local_address, dial_info);
|
||||
let result_net_res = ProtocolNetworkConnection::connect(
|
||||
self.registry(),
|
||||
preferred_local_address,
|
||||
@ -474,12 +487,16 @@ impl ConnectionManager {
|
||||
}
|
||||
}
|
||||
};
|
||||
veilid_log!(self debug "get_or_create_connection retries left: {}", retry_count);
|
||||
retry_count -= 1;
|
||||
|
||||
// Release the preferred local address if things can't connect due to a low-level collision we dont have a record of
|
||||
preferred_local_address = None;
|
||||
// // XXX: This should not be necessary
|
||||
// // Release the preferred local address if things can't connect due to a low-level collision we dont have a record of
|
||||
// preferred_local_address = None;
|
||||
sleep(NEW_CONNECTION_RETRY_DELAY_MS).await;
|
||||
};
|
||||
|
||||
let prot_conn = network_result_value_or_log!(self target:"network_result", nres => [ format!("== get_or_create_connection failed {:?} -> {}", preferred_local_address, dial_info) ] {
|
||||
network_result_raise!(nres);
|
||||
});
|
||||
|
||||
// Add to the connection table
|
||||
@ -598,7 +615,7 @@ impl ConnectionManager {
|
||||
|
||||
// Callback from network connection receive loop when it exits
|
||||
// cleans up the entry in the connection table
|
||||
pub(super) async fn report_connection_finished(&self, connection_id: NetworkConnectionId) {
|
||||
pub(super) fn report_connection_finished(&self, connection_id: NetworkConnectionId) {
|
||||
// Get channel sender
|
||||
let sender = {
|
||||
let mut inner = self.arc.inner.lock();
|
||||
@ -668,7 +685,7 @@ impl ConnectionManager {
|
||||
}
|
||||
}
|
||||
}
|
||||
let _ = sender.send_async(ConnectionManagerEvent::Dead(conn)).await;
|
||||
let _ = sender.send(ConnectionManagerEvent::Dead(conn));
|
||||
}
|
||||
}
|
||||
|
||||
|
37
veilid-core/src/network_manager/debug.rs
Normal file
37
veilid-core/src/network_manager/debug.rs
Normal file
@ -0,0 +1,37 @@
|
||||
use super::*;
|
||||
|
||||
impl NetworkManager {
|
||||
pub fn debug_info_nodeinfo(&self) -> String {
|
||||
let mut out = String::new();
|
||||
let inner = self.inner.lock();
|
||||
out += &format!(
|
||||
"Relay Worker Dequeue Latency:\n{}",
|
||||
indent_all_string(&inner.stats.relay_worker_dequeue_latency)
|
||||
);
|
||||
out += "\n";
|
||||
out += &format!(
|
||||
"Relay Worker Process Latency:\n{}",
|
||||
indent_all_string(&inner.stats.relay_worker_process_latency)
|
||||
);
|
||||
out
|
||||
}
|
||||
|
||||
pub fn debug(&self) -> String {
|
||||
let stats = self.get_stats();
|
||||
|
||||
let mut out = String::new();
|
||||
out += "Network Manager\n";
|
||||
out += "---------------\n";
|
||||
let mut out = format!(
|
||||
"Transfer stats:\n{}\n",
|
||||
indent_all_string(&stats.self_stats.transfer_stats)
|
||||
);
|
||||
out += &self.debug_info_nodeinfo();
|
||||
|
||||
out += "Node Contact Method Cache\n";
|
||||
out += "-------------------------\n";
|
||||
out += &self.inner.lock().node_contact_method_cache.debug();
|
||||
|
||||
out
|
||||
}
|
||||
}
|
@ -10,10 +10,12 @@ mod address_filter;
|
||||
mod connection_handle;
|
||||
mod connection_manager;
|
||||
mod connection_table;
|
||||
mod debug;
|
||||
mod direct_boot;
|
||||
mod network_connection;
|
||||
mod node_contact_method_cache;
|
||||
mod receipt_manager;
|
||||
mod relay_worker;
|
||||
mod send_data;
|
||||
mod stats;
|
||||
mod tasks;
|
||||
@ -26,9 +28,10 @@ pub mod tests;
|
||||
|
||||
pub use connection_manager::*;
|
||||
pub use network_connection::*;
|
||||
pub(crate) use node_contact_method_cache::*;
|
||||
pub use receipt_manager::*;
|
||||
pub use stats::*;
|
||||
|
||||
pub(crate) use node_contact_method_cache::*;
|
||||
pub(crate) use types::*;
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////
|
||||
@ -42,6 +45,7 @@ use hashlink::LruCache;
|
||||
use native::*;
|
||||
#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))]
|
||||
pub use native::{MAX_CAPABILITIES, PUBLIC_INTERNET_CAPABILITIES};
|
||||
use relay_worker::*;
|
||||
use routing_table::*;
|
||||
use rpc_processor::*;
|
||||
#[cfg(all(target_arch = "wasm32", target_os = "unknown"))]
|
||||
@ -60,6 +64,7 @@ pub const IPADDR_MAX_INACTIVE_DURATION_US: TimestampDuration =
|
||||
pub const ADDRESS_FILTER_TASK_INTERVAL_SECS: u32 = 60;
|
||||
pub const BOOT_MAGIC: &[u8; 4] = b"BOOT";
|
||||
pub const HOLE_PUNCH_DELAY_MS: u32 = 100;
|
||||
pub const RELAY_WORKERS_PER_CORE: u32 = 16;
|
||||
|
||||
// Things we get when we start up and go away when we shut down
|
||||
// Routing table is not in here because we want it to survive a network shutdown/startup restart
|
||||
@ -171,7 +176,6 @@ impl Default for NetworkManagerStartupContext {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
// The mutable state of the network manager
|
||||
#[derive(Debug)]
|
||||
struct NetworkManagerInner {
|
||||
@ -181,6 +185,11 @@ struct NetworkManagerInner {
|
||||
address_check: Option<AddressCheck>,
|
||||
peer_info_change_subscription: Option<EventBusSubscription>,
|
||||
socket_address_change_subscription: Option<EventBusSubscription>,
|
||||
|
||||
// Relay workers
|
||||
relay_stop_source: Option<StopSource>,
|
||||
relay_send_channel: Option<flume::Sender<RelayWorkerRequest>>,
|
||||
relay_worker_join_handles: Vec<MustJoinHandle<()>>,
|
||||
}
|
||||
|
||||
pub(crate) struct NetworkManager {
|
||||
@ -202,6 +211,10 @@ pub(crate) struct NetworkManager {
|
||||
|
||||
// Startup context
|
||||
startup_context: NetworkManagerStartupContext,
|
||||
|
||||
// Relay workers config
|
||||
concurrency: u32,
|
||||
queue_size: u32,
|
||||
}
|
||||
|
||||
impl_veilid_component!(NetworkManager);
|
||||
@ -214,6 +227,8 @@ impl fmt::Debug for NetworkManager {
|
||||
.field("address_filter", &self.address_filter)
|
||||
.field("network_key", &self.network_key)
|
||||
.field("startup_context", &self.startup_context)
|
||||
.field("concurrency", &self.concurrency)
|
||||
.field("queue_size", &self.queue_size)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
@ -227,6 +242,10 @@ impl NetworkManager {
|
||||
address_check: None,
|
||||
peer_info_change_subscription: None,
|
||||
socket_address_change_subscription: None,
|
||||
//
|
||||
relay_send_channel: None,
|
||||
relay_stop_source: None,
|
||||
relay_worker_join_handles: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -264,6 +283,26 @@ impl NetworkManager {
|
||||
network_key
|
||||
};
|
||||
|
||||
// make local copy of node id for easy access
|
||||
let (concurrency, queue_size) = {
|
||||
let config = registry.config();
|
||||
let c = config.get();
|
||||
|
||||
// set up channel
|
||||
let mut concurrency = c.network.rpc.concurrency;
|
||||
let queue_size = c.network.rpc.queue_size;
|
||||
if concurrency == 0 {
|
||||
concurrency = get_concurrency();
|
||||
if concurrency == 0 {
|
||||
concurrency = 1;
|
||||
}
|
||||
|
||||
// Default relay concurrency is the number of CPUs * 16 relay workers per core
|
||||
concurrency *= RELAY_WORKERS_PER_CORE;
|
||||
}
|
||||
(concurrency, queue_size)
|
||||
};
|
||||
|
||||
let inner = Self::new_inner();
|
||||
let address_filter = AddressFilter::new(registry.clone());
|
||||
|
||||
@ -282,6 +321,8 @@ impl NetworkManager {
|
||||
),
|
||||
network_key,
|
||||
startup_context,
|
||||
concurrency,
|
||||
queue_size,
|
||||
};
|
||||
|
||||
this.setup_tasks();
|
||||
@ -360,7 +401,8 @@ impl NetworkManager {
|
||||
receipt_manager: receipt_manager.clone(),
|
||||
});
|
||||
|
||||
let address_check = AddressCheck::new(net.clone());
|
||||
// Startup relay workers
|
||||
self.startup_relay_workers()?;
|
||||
|
||||
// Register event handlers
|
||||
let peer_info_change_subscription =
|
||||
@ -371,6 +413,7 @@ impl NetworkManager {
|
||||
|
||||
{
|
||||
let mut inner = self.inner.lock();
|
||||
let address_check = AddressCheck::new(net.clone());
|
||||
inner.address_check = Some(address_check);
|
||||
inner.peer_info_change_subscription = Some(peer_info_change_subscription);
|
||||
inner.socket_address_change_subscription = Some(socket_address_change_subscription);
|
||||
@ -426,6 +469,9 @@ impl NetworkManager {
|
||||
inner.address_check = None;
|
||||
}
|
||||
|
||||
// Shutdown relay workers
|
||||
self.shutdown_relay_workers().await;
|
||||
|
||||
// Shutdown network components if they started up
|
||||
veilid_log!(self debug "shutting down network components");
|
||||
|
||||
@ -1099,10 +1145,11 @@ impl NetworkManager {
|
||||
relay_nr.set_sequencing(Sequencing::EnsureOrdered);
|
||||
};
|
||||
|
||||
// Relay the packet to the desired destination
|
||||
veilid_log!(self trace "relaying {} bytes to {}", data.len(), relay_nr);
|
||||
if let Err(e) = pin_future!(self.send_data(relay_nr, data.to_vec())).await {
|
||||
veilid_log!(self debug "failed to relay envelope: {}" ,e);
|
||||
// Pass relay to RPC system
|
||||
if let Err(e) = self.enqueue_relay(relay_nr, data.to_vec()) {
|
||||
// Couldn't enqueue, but not the sender's fault
|
||||
veilid_log!(self debug "failed to enqueue relay: {}", e);
|
||||
return Ok(false);
|
||||
}
|
||||
}
|
||||
// Inform caller that we dealt with the envelope, but did not process it locally
|
||||
|
@ -662,11 +662,9 @@ impl Network {
|
||||
};
|
||||
} else {
|
||||
// Handle connection-oriented protocols
|
||||
let connmgr = self.network_manager().connection_manager();
|
||||
let conn = network_result_try!(
|
||||
self.network_manager()
|
||||
.connection_manager()
|
||||
.get_or_create_connection(dial_info.clone())
|
||||
.await?
|
||||
connmgr.get_or_create_connection(dial_info.clone()).await?
|
||||
);
|
||||
|
||||
if let ConnectionHandleSendResult::NotSent(_) = conn.send_async(data).await {
|
||||
|
@ -118,7 +118,8 @@ impl Network {
|
||||
let socket_arc = Arc::new(udp_socket);
|
||||
|
||||
// Create protocol handler
|
||||
let protocol_handler = RawUdpProtocolHandler::new(self.registry(), socket_arc);
|
||||
let protocol_handler =
|
||||
RawUdpProtocolHandler::new(self.registry(), socket_arc, addr.is_ipv6());
|
||||
|
||||
// Record protocol handler
|
||||
let mut inner = self.inner.lock();
|
||||
|
@ -7,16 +7,30 @@ pub struct RawUdpProtocolHandler {
|
||||
registry: VeilidComponentRegistry,
|
||||
socket: Arc<UdpSocket>,
|
||||
assembly_buffer: AssemblyBuffer,
|
||||
is_ipv6: bool,
|
||||
default_ttl: u32,
|
||||
current_ttl: Arc<AsyncMutex<u32>>,
|
||||
}
|
||||
|
||||
impl_veilid_component_registry_accessor!(RawUdpProtocolHandler);
|
||||
|
||||
impl RawUdpProtocolHandler {
|
||||
pub fn new(registry: VeilidComponentRegistry, socket: Arc<UdpSocket>) -> Self {
|
||||
pub fn new(registry: VeilidComponentRegistry, socket: Arc<UdpSocket>, is_ipv6: bool) -> Self {
|
||||
// Get original TTL
|
||||
let default_ttl = if is_ipv6 {
|
||||
socket2_operation(socket.as_ref(), |s| s.unicast_hops_v6())
|
||||
.expect("getting IPV6_UNICAST_HOPS should not fail")
|
||||
} else {
|
||||
socket2_operation(socket.as_ref(), |s| s.ttl()).expect("getting IP_TTL should not fail")
|
||||
};
|
||||
|
||||
Self {
|
||||
registry,
|
||||
socket,
|
||||
assembly_buffer: AssemblyBuffer::new(),
|
||||
is_ipv6,
|
||||
default_ttl,
|
||||
current_ttl: Arc::new(AsyncMutex::new(default_ttl)),
|
||||
}
|
||||
}
|
||||
|
||||
@ -104,24 +118,35 @@ impl RawUdpProtocolHandler {
|
||||
return Ok(NetworkResult::no_connection_other("punished"));
|
||||
}
|
||||
|
||||
// Fragment and send
|
||||
let sender = |framed_chunk: Vec<u8>, remote_addr: SocketAddr| async move {
|
||||
let len = network_result_try!(self
|
||||
.socket
|
||||
.send_to(&framed_chunk, remote_addr)
|
||||
.await
|
||||
.into_network_result()?);
|
||||
if len != framed_chunk.len() {
|
||||
bail_io_error_other!("UDP partial send")
|
||||
// Ensure the TTL for sent packets is the default,
|
||||
// then fragment and send the packets
|
||||
{
|
||||
let current_ttl = self.current_ttl.lock().await;
|
||||
if *current_ttl != self.default_ttl {
|
||||
veilid_log!(self error "Incorrect TTL on sent UDP packet ({} != {}): len={}, remote_addr={:?}", *current_ttl, self.default_ttl, data.len(), remote_addr);
|
||||
}
|
||||
Ok(NetworkResult::value(()))
|
||||
};
|
||||
|
||||
network_result_try!(
|
||||
self.assembly_buffer
|
||||
.split_message(data, remote_addr, sender)
|
||||
.await?
|
||||
);
|
||||
// Fragment and send
|
||||
let sender = |framed_chunk: Vec<u8>, remote_addr: SocketAddr| async move {
|
||||
let len = network_result_try!(self
|
||||
.socket
|
||||
.send_to(&framed_chunk, remote_addr)
|
||||
.await
|
||||
.into_network_result()?);
|
||||
if len != framed_chunk.len() {
|
||||
bail_io_error_other!("UDP partial send")
|
||||
}
|
||||
|
||||
veilid_log!(self trace "udp::send_message:chunk(len={}) {:?}", len, remote_addr);
|
||||
Ok(NetworkResult::value(()))
|
||||
};
|
||||
|
||||
network_result_try!(
|
||||
self.assembly_buffer
|
||||
.split_message(data, remote_addr, sender)
|
||||
.await?
|
||||
);
|
||||
}
|
||||
|
||||
// Return a flow for the sent message
|
||||
let peer_addr = PeerAddress::new(
|
||||
@ -157,22 +182,44 @@ impl RawUdpProtocolHandler {
|
||||
return Ok(NetworkResult::no_connection_other("punished"));
|
||||
}
|
||||
|
||||
// Get synchronous socket
|
||||
let res = socket2_operation(self.socket.as_ref(), |s| {
|
||||
// Get original TTL
|
||||
let original_ttl = s.ttl()?;
|
||||
// Ensure the TTL for sent packets is the default,
|
||||
// then fragment and send the packets
|
||||
let res = {
|
||||
let mut current_ttl = self.current_ttl.lock().await;
|
||||
if *current_ttl != self.default_ttl {
|
||||
veilid_log!(self error "Incorrect TTL before sending holepunch UDP packet ({} != {}): remote_addr={:?}", *current_ttl, self.default_ttl, remote_addr);
|
||||
}
|
||||
|
||||
// Set TTL
|
||||
s.set_ttl(ttl)?;
|
||||
// Get synchronous socket
|
||||
socket2_operation(self.socket.as_ref(), |s| {
|
||||
// Set TTL
|
||||
let ttl_res = if self.is_ipv6 {
|
||||
s.set_unicast_hops_v6(ttl)
|
||||
} else {
|
||||
s.set_ttl(ttl)
|
||||
};
|
||||
ttl_res.inspect_err(|e| {
|
||||
veilid_log!(self error "Failed to set TTL on holepunch UDP socket: {} remote_addr={:?}", e, remote_addr);
|
||||
})?;
|
||||
*current_ttl = ttl;
|
||||
|
||||
// Send zero length packet
|
||||
let res = s.send_to(&[], &remote_addr.into());
|
||||
// Send zero length packet
|
||||
let res = s.send_to(&[], &remote_addr.into());
|
||||
|
||||
// Restore TTL immediately
|
||||
s.set_ttl(original_ttl)?;
|
||||
// Restore TTL immediately
|
||||
let ttl_res = if self.is_ipv6 {
|
||||
s.set_unicast_hops_v6(self.default_ttl)
|
||||
} else {
|
||||
s.set_ttl(self.default_ttl)
|
||||
};
|
||||
ttl_res.inspect_err(|e| {
|
||||
veilid_log!(self error "Failed to reset TTL on holepunch UDP socket: {} remote_addr={:?}", e, remote_addr);
|
||||
})?;
|
||||
*current_ttl = self.default_ttl;
|
||||
|
||||
res
|
||||
});
|
||||
res
|
||||
})
|
||||
};
|
||||
|
||||
// Check for errors
|
||||
let len = network_result_try!(res.into_network_result()?);
|
||||
@ -208,6 +255,10 @@ impl RawUdpProtocolHandler {
|
||||
let local_socket_addr = compatible_unspecified_socket_addr(socket_addr);
|
||||
let socket = bind_async_udp_socket(local_socket_addr)?
|
||||
.ok_or(io::Error::from(io::ErrorKind::AddrInUse))?;
|
||||
Ok(RawUdpProtocolHandler::new(registry, Arc::new(socket)))
|
||||
Ok(RawUdpProtocolHandler::new(
|
||||
registry,
|
||||
Arc::new(socket),
|
||||
local_socket_addr.is_ipv6(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
@ -6,7 +6,7 @@ use async_tungstenite::tungstenite::handshake::server::{
|
||||
Callback, ErrorResponse, Request, Response,
|
||||
};
|
||||
use async_tungstenite::tungstenite::http::StatusCode;
|
||||
use async_tungstenite::tungstenite::protocol::{frame::coding::CloseCode, CloseFrame, Message};
|
||||
use async_tungstenite::tungstenite::protocol::Message;
|
||||
use async_tungstenite::tungstenite::Error;
|
||||
use async_tungstenite::{accept_hdr_async, client_async, WebSocketStream};
|
||||
use futures_util::{AsyncRead, AsyncWrite, SinkExt};
|
||||
@ -98,45 +98,27 @@ where
|
||||
|
||||
#[instrument(level = "trace", target = "protocol", err, skip_all)]
|
||||
pub async fn close(&self) -> io::Result<NetworkResult<()>> {
|
||||
let timeout_ms = self
|
||||
.registry
|
||||
.config()
|
||||
.with(|c| c.network.connection_initial_timeout_ms);
|
||||
|
||||
// Make an attempt to close the stream normally
|
||||
let mut stream = self.stream.clone();
|
||||
let out = match stream
|
||||
.send(Message::Close(Some(CloseFrame {
|
||||
code: CloseCode::Normal,
|
||||
reason: "".into(),
|
||||
})))
|
||||
.await
|
||||
{
|
||||
Ok(v) => NetworkResult::value(v),
|
||||
Err(e) => err_to_network_result(e),
|
||||
};
|
||||
|
||||
// This close does not do a TCP shutdown so it is safe and will not cause TIME_WAIT
|
||||
let _ = stream.close().await;
|
||||
|
||||
Ok(out)
|
||||
|
||||
// Drive connection to close
|
||||
/*
|
||||
let cur_ts = get_timestamp();
|
||||
loop {
|
||||
match stream.flush().await {
|
||||
Ok(()) => {}
|
||||
Err(Error::Io(ioerr)) => {
|
||||
break Err(ioerr).into_network_result();
|
||||
}
|
||||
Err(Error::ConnectionClosed) => {
|
||||
break Ok(NetworkResult::value(()));
|
||||
}
|
||||
Err(e) => {
|
||||
break Err(to_io_error_other(e));
|
||||
}
|
||||
match timeout(timeout_ms, stream.close()).await {
|
||||
Ok(Ok(())) => {}
|
||||
Ok(Err(e)) => {
|
||||
return Ok(err_to_network_result(e));
|
||||
}
|
||||
if get_timestamp().saturating_sub(cur_ts) >= MAX_CONNECTION_CLOSE_WAIT_US {
|
||||
return Ok(NetworkResult::Timeout);
|
||||
Err(_) => {
|
||||
// Timed out
|
||||
return Ok(NetworkResult::timeout());
|
||||
}
|
||||
}
|
||||
*/
|
||||
};
|
||||
|
||||
Ok(NetworkResult::value(()))
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", target="protocol", err, skip(self, message), fields(network_result, message.len = message.len()))]
|
||||
|
@ -480,20 +480,21 @@ impl NetworkConnection {
|
||||
}
|
||||
}
|
||||
|
||||
veilid_log!(registry trace
|
||||
"Connection loop finished flow={:?}",
|
||||
flow
|
||||
);
|
||||
|
||||
// Let the connection manager know the receive loop exited
|
||||
connection_manager
|
||||
.report_connection_finished(connection_id)
|
||||
.await;
|
||||
.report_connection_finished(connection_id);
|
||||
|
||||
// Close the low level socket
|
||||
if let Err(e) = protocol_connection.close().await {
|
||||
veilid_log!(registry debug "Protocol connection close error: {}", e);
|
||||
}
|
||||
|
||||
veilid_log!(registry trace
|
||||
"Connection loop exited flow={:?}",
|
||||
flow
|
||||
);
|
||||
|
||||
}.in_current_span())
|
||||
}
|
||||
|
||||
|
120
veilid-core/src/network_manager/relay_worker.rs
Normal file
120
veilid-core/src/network_manager/relay_worker.rs
Normal file
@ -0,0 +1,120 @@
|
||||
use futures_util::StreamExt as _;
|
||||
use stop_token::future::FutureExt as _;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(super) enum RelayWorkerRequestKind {
|
||||
Relay {
|
||||
relay_nr: FilteredNodeRef,
|
||||
data: Vec<u8>,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(super) struct RelayWorkerRequest {
|
||||
enqueued_ts: Timestamp,
|
||||
span: Span,
|
||||
kind: RelayWorkerRequestKind,
|
||||
}
|
||||
|
||||
impl NetworkManager {
|
||||
pub(super) fn startup_relay_workers(&self) -> EyreResult<()> {
|
||||
let mut inner = self.inner.lock();
|
||||
|
||||
// Relay workers
|
||||
let channel = flume::bounded(self.queue_size as usize);
|
||||
inner.relay_send_channel = Some(channel.0.clone());
|
||||
inner.relay_stop_source = Some(StopSource::new());
|
||||
|
||||
// spin up N workers
|
||||
veilid_log!(self debug "Starting {} relay workers", self.concurrency);
|
||||
for task_n in 0..self.concurrency {
|
||||
let registry = self.registry();
|
||||
let receiver = channel.1.clone();
|
||||
let stop_token = inner.relay_stop_source.as_ref().unwrap().token();
|
||||
let jh = spawn(&format!("relay worker {}", task_n), async move {
|
||||
let this = registry.network_manager();
|
||||
Box::pin(this.relay_worker(stop_token, receiver)).await
|
||||
});
|
||||
inner.relay_worker_join_handles.push(jh);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(super) async fn shutdown_relay_workers(&self) {
|
||||
// Stop the relay workers
|
||||
let mut unord = FuturesUnordered::new();
|
||||
{
|
||||
let mut inner = self.inner.lock();
|
||||
// take the join handles out
|
||||
for h in inner.relay_worker_join_handles.drain(..) {
|
||||
unord.push(h);
|
||||
}
|
||||
// drop the stop
|
||||
drop(inner.relay_stop_source.take());
|
||||
}
|
||||
veilid_log!(self debug "Stopping {} relay workers", unord.len());
|
||||
|
||||
// Wait for them to complete
|
||||
while unord.next().await.is_some() {}
|
||||
}
|
||||
|
||||
pub(super) async fn relay_worker(
|
||||
&self,
|
||||
stop_token: StopToken,
|
||||
receiver: flume::Receiver<RelayWorkerRequest>,
|
||||
) {
|
||||
while let Ok(Ok(request)) = receiver.recv_async().timeout_at(stop_token.clone()).await {
|
||||
let relay_request_span = tracing::trace_span!("relay request");
|
||||
relay_request_span.follows_from(request.span);
|
||||
|
||||
// Measure dequeue time
|
||||
let dequeue_ts = Timestamp::now();
|
||||
let dequeue_latency = dequeue_ts.saturating_sub(request.enqueued_ts);
|
||||
|
||||
// Process request kind
|
||||
match request.kind {
|
||||
RelayWorkerRequestKind::Relay { relay_nr, data } => {
|
||||
// Relay the packet to the desired destination
|
||||
veilid_log!(self trace "relaying {} bytes to {}", data.len(), relay_nr);
|
||||
if let Err(e) = pin_future!(self.send_data(relay_nr, data.to_vec())).await {
|
||||
veilid_log!(self debug "failed to relay envelope: {}" ,e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Measure process time
|
||||
let process_ts = Timestamp::now();
|
||||
let process_latency = process_ts.saturating_sub(dequeue_ts);
|
||||
|
||||
// Accounting
|
||||
self.stats_relay_processed(dequeue_latency, process_latency)
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", target = "rpc", skip_all)]
|
||||
pub(super) fn enqueue_relay(&self, relay_nr: FilteredNodeRef, data: Vec<u8>) -> EyreResult<()> {
|
||||
let _guard = self
|
||||
.startup_context
|
||||
.startup_lock
|
||||
.enter()
|
||||
.wrap_err("not started up")?;
|
||||
|
||||
let send_channel = {
|
||||
let inner = self.inner.lock();
|
||||
let Some(send_channel) = inner.relay_send_channel.as_ref().cloned() else {
|
||||
bail!("send channel is closed");
|
||||
};
|
||||
send_channel
|
||||
};
|
||||
send_channel
|
||||
.try_send(RelayWorkerRequest {
|
||||
enqueued_ts: Timestamp::now(),
|
||||
span: Span::current(),
|
||||
kind: RelayWorkerRequestKind::Relay { relay_nr, data },
|
||||
})
|
||||
.map_err(|e| eyre!("failed to enqueue relay: {}", e))?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
@ -152,7 +152,7 @@ impl NetworkManager {
|
||||
// If a node is unreachable it may still have an existing inbound connection
|
||||
// Try that, but don't cache anything
|
||||
network_result_try!(
|
||||
pin_future_closure!(self.send_data_ncm_existing(target_node_ref, data)).await?
|
||||
pin_future_closure!(self.send_data_unreachable(target_node_ref, data)).await?
|
||||
)
|
||||
}
|
||||
Some(NodeContactMethod {
|
||||
@ -239,6 +239,42 @@ impl NetworkManager {
|
||||
}))
|
||||
}
|
||||
|
||||
/// Send data to unreachable node
|
||||
#[instrument(level = "trace", target = "net", skip_all, err)]
|
||||
async fn send_data_unreachable(
|
||||
&self,
|
||||
target_node_ref: FilteredNodeRef,
|
||||
data: Vec<u8>,
|
||||
) -> EyreResult<NetworkResult<UniqueFlow>> {
|
||||
// First try to send data to the last connection we've seen this peer on
|
||||
let Some(flow) = target_node_ref.last_flow() else {
|
||||
return Ok(NetworkResult::no_connection_other(format!(
|
||||
"node was unreachable: {}",
|
||||
target_node_ref
|
||||
)));
|
||||
};
|
||||
|
||||
let net = self.net();
|
||||
let unique_flow = match pin_future!(debug_duration(
|
||||
|| { net.send_data_to_existing_flow(flow, data) },
|
||||
Some(1_000_000)
|
||||
))
|
||||
.await?
|
||||
{
|
||||
SendDataToExistingFlowResult::Sent(unique_flow) => unique_flow,
|
||||
SendDataToExistingFlowResult::NotSent(_) => {
|
||||
return Ok(NetworkResult::no_connection_other(
|
||||
"failed to send to existing flow",
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
// Update timestamp for this last connection since we just sent to it
|
||||
self.set_last_flow(target_node_ref.unfiltered(), flow, Timestamp::now());
|
||||
|
||||
Ok(NetworkResult::value(unique_flow))
|
||||
}
|
||||
|
||||
/// Send data using NodeContactMethod::Existing
|
||||
#[instrument(level = "trace", target = "net", skip_all, err)]
|
||||
async fn send_data_ncm_existing(
|
||||
@ -255,7 +291,12 @@ impl NetworkManager {
|
||||
};
|
||||
|
||||
let net = self.net();
|
||||
let unique_flow = match pin_future!(net.send_data_to_existing_flow(flow, data)).await? {
|
||||
let unique_flow = match pin_future!(debug_duration(
|
||||
|| { net.send_data_to_existing_flow(flow, data) },
|
||||
Some(1_000_000)
|
||||
))
|
||||
.await?
|
||||
{
|
||||
SendDataToExistingFlowResult::Sent(unique_flow) => unique_flow,
|
||||
SendDataToExistingFlowResult::NotSent(_) => {
|
||||
return Ok(NetworkResult::no_connection_other(
|
||||
@ -297,7 +338,12 @@ impl NetworkManager {
|
||||
// First try to send data to the last flow we've seen this peer on
|
||||
let data = if let Some(flow) = seq_target_node_ref.last_flow() {
|
||||
let net = self.net();
|
||||
match pin_future!(net.send_data_to_existing_flow(flow, data)).await? {
|
||||
match pin_future!(debug_duration(
|
||||
|| { net.send_data_to_existing_flow(flow, data) },
|
||||
Some(1_000_000)
|
||||
))
|
||||
.await?
|
||||
{
|
||||
SendDataToExistingFlowResult::Sent(unique_flow) => {
|
||||
// Update timestamp for this last connection since we just sent to it
|
||||
self.set_last_flow(target_node_ref.unfiltered(), flow, Timestamp::now());
|
||||
@ -321,9 +367,16 @@ impl NetworkManager {
|
||||
data
|
||||
};
|
||||
|
||||
let connection_initial_timeout_us = self
|
||||
.config()
|
||||
.with(|c| c.network.connection_initial_timeout_ms as u64 * 1000);
|
||||
|
||||
let unique_flow = network_result_try!(
|
||||
pin_future!(self.do_reverse_connect(relay_nr.clone(), target_node_ref.clone(), data))
|
||||
.await?
|
||||
pin_future!(debug_duration(
|
||||
|| { self.do_reverse_connect(relay_nr.clone(), target_node_ref.clone(), data) },
|
||||
Some(connection_initial_timeout_us * 2)
|
||||
))
|
||||
.await?
|
||||
);
|
||||
Ok(NetworkResult::value(unique_flow))
|
||||
}
|
||||
@ -339,7 +392,12 @@ impl NetworkManager {
|
||||
// First try to send data to the last flow we've seen this peer on
|
||||
let data = if let Some(flow) = target_node_ref.last_flow() {
|
||||
let net = self.net();
|
||||
match pin_future!(net.send_data_to_existing_flow(flow, data)).await? {
|
||||
match pin_future!(debug_duration(
|
||||
|| { net.send_data_to_existing_flow(flow, data) },
|
||||
Some(1_000_000)
|
||||
))
|
||||
.await?
|
||||
{
|
||||
SendDataToExistingFlowResult::Sent(unique_flow) => {
|
||||
// Update timestamp for this last connection since we just sent to it
|
||||
self.set_last_flow(target_node_ref.unfiltered(), flow, Timestamp::now());
|
||||
@ -363,9 +421,16 @@ impl NetworkManager {
|
||||
data
|
||||
};
|
||||
|
||||
let hole_punch_receipt_time_us = self
|
||||
.config()
|
||||
.with(|c| c.network.hole_punch_receipt_time_ms as u64 * 1000);
|
||||
|
||||
let unique_flow = network_result_try!(
|
||||
pin_future!(self.do_hole_punch(relay_nr.clone(), target_node_ref.clone(), data))
|
||||
.await?
|
||||
pin_future!(debug_duration(
|
||||
|| { self.do_hole_punch(relay_nr.clone(), target_node_ref.clone(), data) },
|
||||
Some(hole_punch_receipt_time_us * 2)
|
||||
))
|
||||
.await?
|
||||
);
|
||||
|
||||
Ok(NetworkResult::value(unique_flow))
|
||||
@ -391,7 +456,12 @@ impl NetworkManager {
|
||||
);
|
||||
|
||||
let net = self.net();
|
||||
match pin_future!(net.send_data_to_existing_flow(flow, data)).await? {
|
||||
match pin_future!(debug_duration(
|
||||
|| { net.send_data_to_existing_flow(flow, data) },
|
||||
Some(1_000_000)
|
||||
))
|
||||
.await?
|
||||
{
|
||||
SendDataToExistingFlowResult::Sent(unique_flow) => {
|
||||
// Update timestamp for this last connection since we just sent to it
|
||||
self.set_last_flow(node_ref.unfiltered(), flow, Timestamp::now());
|
||||
|
@ -22,6 +22,10 @@ impl Default for PerAddressStatsKey {
|
||||
pub struct NetworkManagerStats {
|
||||
pub self_stats: PerAddressStats,
|
||||
pub per_address_stats: LruCache<PerAddressStatsKey, PerAddressStats>,
|
||||
pub relay_worker_dequeue_latency: LatencyStats,
|
||||
pub relay_worker_process_latency: LatencyStats,
|
||||
pub relay_worker_dequeue_latency_accounting: LatencyStatsAccounting,
|
||||
pub relay_worker_process_latency_accounting: LatencyStatsAccounting,
|
||||
}
|
||||
|
||||
impl Default for NetworkManagerStats {
|
||||
@ -29,6 +33,10 @@ impl Default for NetworkManagerStats {
|
||||
Self {
|
||||
self_stats: PerAddressStats::default(),
|
||||
per_address_stats: LruCache::new(IPADDR_TABLE_SIZE),
|
||||
relay_worker_dequeue_latency: LatencyStats::default(),
|
||||
relay_worker_process_latency: LatencyStats::default(),
|
||||
relay_worker_dequeue_latency_accounting: LatencyStatsAccounting::new(),
|
||||
relay_worker_process_latency_accounting: LatencyStatsAccounting::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -36,7 +44,7 @@ impl Default for NetworkManagerStats {
|
||||
impl NetworkManager {
|
||||
// Callbacks from low level network for statistics gathering
|
||||
pub fn stats_packet_sent(&self, addr: IpAddr, bytes: ByteCount) {
|
||||
let inner = &mut *self.inner.lock();
|
||||
let mut inner = self.inner.lock();
|
||||
inner
|
||||
.stats
|
||||
.self_stats
|
||||
@ -53,7 +61,7 @@ impl NetworkManager {
|
||||
}
|
||||
|
||||
pub fn stats_packet_rcvd(&self, addr: IpAddr, bytes: ByteCount) {
|
||||
let inner = &mut *self.inner.lock();
|
||||
let mut inner = self.inner.lock();
|
||||
inner
|
||||
.stats
|
||||
.self_stats
|
||||
@ -69,28 +77,27 @@ impl NetworkManager {
|
||||
.add_down(bytes);
|
||||
}
|
||||
|
||||
pub fn stats_relay_processed(
|
||||
&self,
|
||||
dequeue_latency: TimestampDuration,
|
||||
process_latency: TimestampDuration,
|
||||
) {
|
||||
let mut inner = self.inner.lock();
|
||||
inner.stats.relay_worker_dequeue_latency = inner
|
||||
.stats
|
||||
.relay_worker_dequeue_latency_accounting
|
||||
.record_latency(dequeue_latency);
|
||||
inner.stats.relay_worker_process_latency = inner
|
||||
.stats
|
||||
.relay_worker_process_latency_accounting
|
||||
.record_latency(process_latency);
|
||||
}
|
||||
|
||||
pub fn get_stats(&self) -> NetworkManagerStats {
|
||||
let inner = self.inner.lock();
|
||||
inner.stats.clone()
|
||||
}
|
||||
|
||||
pub fn debug(&self) -> String {
|
||||
let stats = self.get_stats();
|
||||
|
||||
let mut out = String::new();
|
||||
out += "Network Manager\n";
|
||||
out += "---------------\n";
|
||||
let mut out = format!(
|
||||
"Transfer stats:\n{}\n",
|
||||
indent_all_string(&stats.self_stats.transfer_stats)
|
||||
);
|
||||
out += "Node Contact Method Cache\n";
|
||||
out += "-------------------------\n";
|
||||
out += &self.inner.lock().node_contact_method_cache.debug();
|
||||
|
||||
out
|
||||
}
|
||||
|
||||
pub fn get_veilid_state(&self) -> Box<VeilidStateNetwork> {
|
||||
if !self.network_is_started() {
|
||||
return Box::new(VeilidStateNetwork {
|
||||
|
@ -52,8 +52,16 @@ impl WebsocketNetworkConnection {
|
||||
instrument(level = "trace", err, skip(self))
|
||||
)]
|
||||
pub async fn close(&self) -> io::Result<NetworkResult<()>> {
|
||||
let timeout_ms = self
|
||||
.registry
|
||||
.config()
|
||||
.with(|c| c.network.connection_initial_timeout_ms);
|
||||
|
||||
#[allow(unused_variables)]
|
||||
let x = self.inner.ws_meta.close().await.map_err(ws_err_to_io_error);
|
||||
let x = match timeout(timeout_ms, self.inner.ws_meta.close()).await {
|
||||
Ok(v) => v.map_err(ws_err_to_io_error),
|
||||
Err(_) => return Ok(NetworkResult::timeout()),
|
||||
};
|
||||
#[cfg(feature = "verbose-tracing")]
|
||||
veilid_log!(self debug "close result: {:?}", x);
|
||||
Ok(NetworkResult::value(()))
|
||||
|
@ -23,7 +23,7 @@ const UNRELIABLE_PING_SPAN_SECS: u32 = 60;
|
||||
const UNRELIABLE_PING_INTERVAL_SECS: u32 = 5;
|
||||
/// - Number of consecutive lost answers on an unordered protocol we will
|
||||
/// tolerate before we call something unreliable
|
||||
const UNRELIABLE_LOST_ANSWERS_UNORDERED: u32 = 1;
|
||||
const UNRELIABLE_LOST_ANSWERS_UNORDERED: u32 = 2;
|
||||
/// - Number of consecutive lost answers on an ordered protocol we will
|
||||
/// tolerate before we call something unreliable
|
||||
const UNRELIABLE_LOST_ANSWERS_ORDERED: u32 = 0;
|
||||
@ -1011,7 +1011,12 @@ impl BucketEntryInner {
|
||||
|
||||
match latest_contact_time {
|
||||
None => {
|
||||
error!("Peer is reliable, but not seen!");
|
||||
// Peer may be appear reliable from a previous attach/detach
|
||||
// But reliability uses last_seen_ts not the last_outbound_contact_time
|
||||
// Regardless, if we haven't pinged it, we need to ping it.
|
||||
// But it it was reliable before, and pings successfully then it can
|
||||
// stay reliable, so we don't make it unreliable just because we haven't
|
||||
// contacted it yet during this attachment.
|
||||
true
|
||||
}
|
||||
Some(latest_contact_time) => {
|
||||
@ -1068,11 +1073,14 @@ impl BucketEntryInner {
|
||||
}
|
||||
|
||||
pub(super) fn make_not_dead(&mut self, cur_ts: Timestamp) {
|
||||
self.peer_stats.rpc_stats.last_seen_ts = None;
|
||||
self.peer_stats.rpc_stats.failed_to_send = 0;
|
||||
self.peer_stats.rpc_stats.recent_lost_answers_unordered = 0;
|
||||
self.peer_stats.rpc_stats.recent_lost_answers_ordered = 0;
|
||||
assert!(self.check_dead(cur_ts).is_none());
|
||||
if self.check_dead(cur_ts).is_some() {
|
||||
self.peer_stats.rpc_stats.last_seen_ts = None;
|
||||
self.peer_stats.rpc_stats.first_consecutive_seen_ts = None;
|
||||
self.peer_stats.rpc_stats.failed_to_send = 0;
|
||||
self.peer_stats.rpc_stats.recent_lost_answers_unordered = 0;
|
||||
self.peer_stats.rpc_stats.recent_lost_answers_ordered = 0;
|
||||
assert!(self.check_dead(cur_ts).is_none());
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn _state_debug_info(&self, cur_ts: Timestamp) -> String {
|
||||
|
@ -43,7 +43,9 @@ pub const RELAY_MANAGEMENT_INTERVAL_SECS: u32 = 1;
|
||||
/// How frequently we optimize relays
|
||||
pub const RELAY_OPTIMIZATION_INTERVAL_SECS: u32 = 10;
|
||||
/// What percentile to keep our relays optimized to
|
||||
pub const RELAY_OPTIMIZATION_PERCENTILE: f32 = 75.0;
|
||||
pub const RELAY_OPTIMIZATION_PERCENTILE: f32 = 66.0;
|
||||
/// What percentile to choose our relays from (must be greater than RELAY_OPTIMIZATION_PERCENTILE)
|
||||
pub const RELAY_SELECTION_PERCENTILE: f32 = 85.0;
|
||||
|
||||
/// How frequently we tick the private route management routine
|
||||
pub const PRIVATE_ROUTE_MANAGEMENT_INTERVAL_SECS: u32 = 1;
|
||||
@ -1039,7 +1041,7 @@ impl RoutingTable {
|
||||
#[instrument(level = "trace", skip(self), err)]
|
||||
pub async fn find_nodes_close_to_node_id(
|
||||
&self,
|
||||
node_ref: NodeRef,
|
||||
node_ref: FilteredNodeRef,
|
||||
node_id: TypedKey,
|
||||
capabilities: Vec<Capability>,
|
||||
) -> EyreResult<NetworkResult<Vec<NodeRef>>> {
|
||||
@ -1047,11 +1049,7 @@ impl RoutingTable {
|
||||
|
||||
let res = network_result_try!(
|
||||
rpc_processor
|
||||
.rpc_call_find_node(
|
||||
Destination::direct(node_ref.default_filtered()),
|
||||
node_id,
|
||||
capabilities
|
||||
)
|
||||
.rpc_call_find_node(Destination::direct(node_ref), node_id, capabilities)
|
||||
.await?
|
||||
);
|
||||
|
||||
@ -1067,7 +1065,7 @@ impl RoutingTable {
|
||||
pub async fn find_nodes_close_to_self(
|
||||
&self,
|
||||
crypto_kind: CryptoKind,
|
||||
node_ref: NodeRef,
|
||||
node_ref: FilteredNodeRef,
|
||||
capabilities: Vec<Capability>,
|
||||
) -> EyreResult<NetworkResult<Vec<NodeRef>>> {
|
||||
let self_node_id = self.node_id(crypto_kind);
|
||||
@ -1081,7 +1079,7 @@ impl RoutingTable {
|
||||
pub async fn find_nodes_close_to_node_ref(
|
||||
&self,
|
||||
crypto_kind: CryptoKind,
|
||||
node_ref: NodeRef,
|
||||
node_ref: FilteredNodeRef,
|
||||
capabilities: Vec<Capability>,
|
||||
) -> EyreResult<NetworkResult<Vec<NodeRef>>> {
|
||||
let Some(target_node_id) = node_ref.node_ids().get(crypto_kind) else {
|
||||
@ -1102,7 +1100,7 @@ impl RoutingTable {
|
||||
capabilities: Vec<Capability>,
|
||||
) {
|
||||
// Ask node for nodes closest to our own node
|
||||
let closest_nodes = network_result_value_or_log!(self match pin_future!(self.find_nodes_close_to_self(crypto_kind, node_ref.clone(), capabilities.clone())).await {
|
||||
let closest_nodes = network_result_value_or_log!(self match pin_future!(self.find_nodes_close_to_self(crypto_kind, node_ref.sequencing_filtered(Sequencing::PreferOrdered), capabilities.clone())).await {
|
||||
Err(e) => {
|
||||
veilid_log!(self error
|
||||
"find_self failed for {:?}: {:?}",
|
||||
@ -1118,7 +1116,7 @@ impl RoutingTable {
|
||||
// Ask each node near us to find us as well
|
||||
if wide {
|
||||
for closest_nr in closest_nodes {
|
||||
network_result_value_or_log!(self match pin_future!(self.find_nodes_close_to_self(crypto_kind, closest_nr.clone(), capabilities.clone())).await {
|
||||
network_result_value_or_log!(self match pin_future!(self.find_nodes_close_to_self(crypto_kind, closest_nr.sequencing_filtered(Sequencing::PreferOrdered), capabilities.clone())).await {
|
||||
Err(e) => {
|
||||
veilid_log!(self error
|
||||
"find_self failed for {:?}: {:?}",
|
||||
@ -1146,6 +1144,18 @@ impl RoutingTable {
|
||||
inner.find_fastest_node(cur_ts, filter, metric)
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self, filter, metric), ret)]
|
||||
pub fn find_random_fast_node(
|
||||
&self,
|
||||
cur_ts: Timestamp,
|
||||
filter: impl Fn(&BucketEntryInner) -> bool,
|
||||
percentile: f32,
|
||||
metric: impl Fn(&LatencyStats) -> TimestampDuration,
|
||||
) -> Option<NodeRef> {
|
||||
let inner = self.inner.read();
|
||||
inner.find_random_fast_node(cur_ts, filter, percentile, metric)
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self, filter, metric), ret)]
|
||||
pub fn get_node_speed_percentile(
|
||||
&self,
|
||||
|
@ -119,6 +119,24 @@ impl NodeRefOperateTrait for FilteredNodeRef {
|
||||
let inner = &mut *routing_table.inner.write();
|
||||
self.entry.with_mut(inner, f)
|
||||
}
|
||||
|
||||
fn with_inner<T, F>(&self, f: F) -> T
|
||||
where
|
||||
F: FnOnce(&RoutingTableInner) -> T,
|
||||
{
|
||||
let routing_table = self.registry.routing_table();
|
||||
let inner = &*routing_table.inner.read();
|
||||
f(inner)
|
||||
}
|
||||
|
||||
fn with_inner_mut<T, F>(&self, f: F) -> T
|
||||
where
|
||||
F: FnOnce(&mut RoutingTableInner) -> T,
|
||||
{
|
||||
let routing_table = self.registry.routing_table();
|
||||
let inner = &mut *routing_table.inner.write();
|
||||
f(inner)
|
||||
}
|
||||
}
|
||||
|
||||
impl NodeRefCommonTrait for FilteredNodeRef {}
|
||||
|
@ -139,6 +139,24 @@ impl NodeRefOperateTrait for NodeRef {
|
||||
let inner = &mut *routing_table.inner.write();
|
||||
self.entry.with_mut(inner, f)
|
||||
}
|
||||
|
||||
fn with_inner<T, F>(&self, f: F) -> T
|
||||
where
|
||||
F: FnOnce(&RoutingTableInner) -> T,
|
||||
{
|
||||
let routing_table = self.routing_table();
|
||||
let inner = &*routing_table.inner.read();
|
||||
f(inner)
|
||||
}
|
||||
|
||||
fn with_inner_mut<T, F>(&self, f: F) -> T
|
||||
where
|
||||
F: FnOnce(&mut RoutingTableInner) -> T,
|
||||
{
|
||||
let routing_table = self.routing_table();
|
||||
let inner = &mut *routing_table.inner.write();
|
||||
f(inner)
|
||||
}
|
||||
}
|
||||
|
||||
impl NodeRefCommonTrait for NodeRef {}
|
||||
|
@ -90,6 +90,21 @@ impl<'a, N: NodeRefAccessorsTrait + NodeRefOperateTrait + fmt::Debug + fmt::Disp
|
||||
{
|
||||
panic!("need to locked_mut() for this operation")
|
||||
}
|
||||
|
||||
fn with_inner<T, F>(&self, f: F) -> T
|
||||
where
|
||||
F: FnOnce(&RoutingTableInner) -> T,
|
||||
{
|
||||
let inner = &*self.inner.lock();
|
||||
f(inner)
|
||||
}
|
||||
|
||||
fn with_inner_mut<T, F>(&self, _f: F) -> T
|
||||
where
|
||||
F: FnOnce(&mut RoutingTableInner) -> T,
|
||||
{
|
||||
panic!("need to locked_mut() for this operation")
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, N: NodeRefAccessorsTrait + NodeRefOperateTrait + fmt::Debug + fmt::Display + Clone>
|
||||
|
@ -92,6 +92,22 @@ impl<'a, N: NodeRefAccessorsTrait + NodeRefOperateTrait + fmt::Debug + fmt::Disp
|
||||
let inner = &mut *self.inner.lock();
|
||||
self.nr.entry().with_mut(inner, f)
|
||||
}
|
||||
|
||||
fn with_inner<T, F>(&self, f: F) -> T
|
||||
where
|
||||
F: FnOnce(&RoutingTableInner) -> T,
|
||||
{
|
||||
let inner = &*self.inner.lock();
|
||||
f(inner)
|
||||
}
|
||||
|
||||
fn with_inner_mut<T, F>(&self, f: F) -> T
|
||||
where
|
||||
F: FnOnce(&mut RoutingTableInner) -> T,
|
||||
{
|
||||
let inner = &mut *self.inner.lock();
|
||||
f(inner)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, N: NodeRefAccessorsTrait + NodeRefOperateTrait + fmt::Debug + fmt::Display + Clone>
|
||||
|
@ -20,6 +20,13 @@ pub(crate) trait NodeRefOperateTrait {
|
||||
fn operate_mut<T, F>(&self, f: F) -> T
|
||||
where
|
||||
F: FnOnce(&mut RoutingTableInner, &mut BucketEntryInner) -> T;
|
||||
#[expect(dead_code)]
|
||||
fn with_inner<T, F>(&self, f: F) -> T
|
||||
where
|
||||
F: FnOnce(&RoutingTableInner) -> T;
|
||||
fn with_inner_mut<T, F>(&self, f: F) -> T
|
||||
where
|
||||
F: FnOnce(&mut RoutingTableInner) -> T;
|
||||
}
|
||||
|
||||
// Common Operations
|
||||
@ -115,7 +122,7 @@ pub(crate) trait NodeRefCommonTrait: NodeRefAccessorsTrait + NodeRefOperateTrait
|
||||
// }
|
||||
|
||||
fn relay(&self, routing_domain: RoutingDomain) -> EyreResult<Option<FilteredNodeRef>> {
|
||||
self.operate_mut(|rti, e| {
|
||||
let Some(rpi) = self.operate(|rti, e| {
|
||||
let Some(sni) = e.signed_node_info(routing_domain) else {
|
||||
return Ok(None);
|
||||
};
|
||||
@ -127,8 +134,14 @@ pub(crate) trait NodeRefCommonTrait: NodeRefAccessorsTrait + NodeRefOperateTrait
|
||||
if rti.routing_table().matches_own_node_id(rpi.node_ids()) {
|
||||
bail!("Can't relay though ourselves");
|
||||
}
|
||||
Ok(Some(rpi))
|
||||
})?
|
||||
else {
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
// Register relay node and return noderef
|
||||
// Register relay node and return noderef
|
||||
self.with_inner_mut(|rti| {
|
||||
let nr = rti.register_node_with_peer_info(rpi, false)?;
|
||||
Ok(Some(nr))
|
||||
})
|
||||
|
@ -716,7 +716,7 @@ impl RouteSpecStore {
|
||||
};
|
||||
|
||||
let Some(rsid) = inner.content.get_id_by_key(&public_key.value) else {
|
||||
veilid_log!(self debug "route id does not exist: {:?}", public_key.value);
|
||||
veilid_log!(self debug target: "network_result", "route id does not exist: {:?}", public_key.value);
|
||||
return None;
|
||||
};
|
||||
let Some(rssd) = inner.content.get_detail(&rsid) else {
|
||||
@ -753,7 +753,7 @@ impl RouteSpecStore {
|
||||
return None;
|
||||
}
|
||||
Err(e) => {
|
||||
veilid_log!(self debug "errir verifying signature for hop {} at {} on private route {}: {}", hop_n, hop_public_key, public_key, e);
|
||||
veilid_log!(self debug "error verifying signature for hop {} at {} on private route {}: {}", hop_n, hop_public_key, public_key, e);
|
||||
return None;
|
||||
}
|
||||
}
|
||||
|
@ -1445,6 +1445,54 @@ impl RoutingTableInner {
|
||||
fastest_node.map(|e| NodeRef::new(self.registry(), e))
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self, filter, metric), ret)]
|
||||
pub fn find_random_fast_node(
|
||||
&self,
|
||||
cur_ts: Timestamp,
|
||||
filter: impl Fn(&BucketEntryInner) -> bool,
|
||||
percentile: f32,
|
||||
metric: impl Fn(&LatencyStats) -> TimestampDuration,
|
||||
) -> Option<NodeRef> {
|
||||
// Go through all entries and find all entries that matches filter function
|
||||
let mut all_filtered_nodes: Vec<Arc<BucketEntry>> = Vec::new();
|
||||
|
||||
// Iterate all known nodes for candidates
|
||||
self.with_entries(cur_ts, BucketEntryState::Unreliable, |rti, entry| {
|
||||
let entry2 = entry.clone();
|
||||
entry.with(rti, |_rti, e| {
|
||||
// Filter this node
|
||||
if filter(e) {
|
||||
all_filtered_nodes.push(entry2);
|
||||
}
|
||||
});
|
||||
// Don't end early, iterate through all entries
|
||||
Option::<()>::None
|
||||
});
|
||||
|
||||
// Sort by fastest tm90 reliable
|
||||
all_filtered_nodes.sort_by(|a, b| {
|
||||
a.with(self, |rti, ea| {
|
||||
b.with(rti, |_rti, eb| {
|
||||
BucketEntryInner::cmp_fastest_reliable(cur_ts, ea, eb, &metric)
|
||||
})
|
||||
})
|
||||
});
|
||||
|
||||
if all_filtered_nodes.is_empty() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let max_index =
|
||||
(((all_filtered_nodes.len() - 1) as f32) * (100.0 - percentile) / 100.0) as u32;
|
||||
let chosen_index = (get_random_u32() % (max_index + 1)) as usize;
|
||||
|
||||
// Return the chosen node node
|
||||
Some(NodeRef::new(
|
||||
self.registry(),
|
||||
all_filtered_nodes[chosen_index].clone(),
|
||||
))
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self, filter, metric), ret)]
|
||||
pub fn get_node_relative_performance(
|
||||
&self,
|
||||
|
@ -1,15 +1,5 @@
|
||||
use super::*;
|
||||
|
||||
// Latency entry is per round-trip packet (ping or data)
|
||||
// - Size is number of entries
|
||||
const ROLLING_LATENCIES_SIZE: usize = 50;
|
||||
|
||||
// Transfers entries are in bytes total for the interval
|
||||
// - Size is number of entries
|
||||
// - Interval is number of seconds in each entry
|
||||
const ROLLING_TRANSFERS_SIZE: usize = 10;
|
||||
pub const ROLLING_TRANSFERS_INTERVAL_SECS: u32 = 1;
|
||||
|
||||
// State entry is per state reason change
|
||||
// - Size is number of entries
|
||||
const ROLLING_STATE_REASON_SPAN_SIZE: usize = 32;
|
||||
@ -20,149 +10,6 @@ pub const UPDATE_STATE_STATS_INTERVAL_SECS: u32 = 1;
|
||||
// - Interval is number of seconds in each entry
|
||||
const ROLLING_ANSWERS_SIZE: usize = 10;
|
||||
pub const ROLLING_ANSWER_INTERVAL_SECS: u32 = 60;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||
pub struct TransferCount {
|
||||
down: ByteCount,
|
||||
up: ByteCount,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct TransferStatsAccounting {
|
||||
rolling_transfers: VecDeque<TransferCount>,
|
||||
current_transfer: TransferCount,
|
||||
}
|
||||
|
||||
impl TransferStatsAccounting {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
rolling_transfers: VecDeque::new(),
|
||||
current_transfer: TransferCount::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_down(&mut self, bytes: ByteCount) {
|
||||
self.current_transfer.down += bytes;
|
||||
}
|
||||
|
||||
pub fn add_up(&mut self, bytes: ByteCount) {
|
||||
self.current_transfer.up += bytes;
|
||||
}
|
||||
|
||||
pub fn roll_transfers(
|
||||
&mut self,
|
||||
last_ts: Timestamp,
|
||||
cur_ts: Timestamp,
|
||||
transfer_stats: &mut TransferStatsDownUp,
|
||||
) {
|
||||
let dur_ms = cur_ts.saturating_sub(last_ts) / 1000u64;
|
||||
while self.rolling_transfers.len() >= ROLLING_TRANSFERS_SIZE {
|
||||
self.rolling_transfers.pop_front();
|
||||
}
|
||||
self.rolling_transfers.push_back(self.current_transfer);
|
||||
|
||||
transfer_stats.down.total += self.current_transfer.down;
|
||||
transfer_stats.up.total += self.current_transfer.up;
|
||||
|
||||
self.current_transfer = TransferCount::default();
|
||||
|
||||
transfer_stats.down.maximum = 0.into();
|
||||
transfer_stats.up.maximum = 0.into();
|
||||
transfer_stats.down.minimum = u64::MAX.into();
|
||||
transfer_stats.up.minimum = u64::MAX.into();
|
||||
transfer_stats.down.average = 0.into();
|
||||
transfer_stats.up.average = 0.into();
|
||||
for xfer in &self.rolling_transfers {
|
||||
let bpsd = xfer.down * 1000u64 / dur_ms;
|
||||
let bpsu = xfer.up * 1000u64 / dur_ms;
|
||||
transfer_stats.down.maximum.max_assign(bpsd);
|
||||
transfer_stats.up.maximum.max_assign(bpsu);
|
||||
transfer_stats.down.minimum.min_assign(bpsd);
|
||||
transfer_stats.up.minimum.min_assign(bpsu);
|
||||
transfer_stats.down.average += bpsd;
|
||||
transfer_stats.up.average += bpsu;
|
||||
}
|
||||
let len = self.rolling_transfers.len() as u64;
|
||||
if len > 0 {
|
||||
transfer_stats.down.average /= len;
|
||||
transfer_stats.up.average /= len;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct LatencyStatsAccounting {
|
||||
rolling_latencies: VecDeque<TimestampDuration>,
|
||||
}
|
||||
|
||||
impl LatencyStatsAccounting {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
rolling_latencies: VecDeque::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn get_tm_n(sorted_latencies: &[TimestampDuration], n: usize) -> Option<TimestampDuration> {
|
||||
let tmcount = sorted_latencies.len() * n / 100;
|
||||
if tmcount == 0 {
|
||||
None
|
||||
} else {
|
||||
let mut tm = TimestampDuration::new(0);
|
||||
for l in &sorted_latencies[..tmcount] {
|
||||
tm += *l;
|
||||
}
|
||||
tm /= tmcount as u64;
|
||||
Some(tm)
|
||||
}
|
||||
}
|
||||
|
||||
fn get_p_n(sorted_latencies: &[TimestampDuration], n: usize) -> TimestampDuration {
|
||||
let pindex = (sorted_latencies.len() * n / 100).saturating_sub(1);
|
||||
sorted_latencies[pindex]
|
||||
}
|
||||
|
||||
pub fn record_latency(&mut self, latency: TimestampDuration) -> LatencyStats {
|
||||
while self.rolling_latencies.len() >= ROLLING_LATENCIES_SIZE {
|
||||
self.rolling_latencies.pop_front();
|
||||
}
|
||||
self.rolling_latencies.push_back(latency);
|
||||
|
||||
// Calculate latency stats
|
||||
|
||||
let mut fastest = TimestampDuration::new(u64::MAX);
|
||||
let mut slowest = TimestampDuration::new(0u64);
|
||||
let mut average = TimestampDuration::new(0u64);
|
||||
|
||||
for rl in &self.rolling_latencies {
|
||||
fastest.min_assign(*rl);
|
||||
slowest.max_assign(*rl);
|
||||
average += *rl;
|
||||
}
|
||||
let len = self.rolling_latencies.len() as u64;
|
||||
if len > 0 {
|
||||
average /= len;
|
||||
}
|
||||
|
||||
let mut sorted_latencies: Vec<_> = self.rolling_latencies.iter().copied().collect();
|
||||
sorted_latencies.sort();
|
||||
|
||||
let tm90 = Self::get_tm_n(&sorted_latencies, 90).unwrap_or(average);
|
||||
let tm75 = Self::get_tm_n(&sorted_latencies, 75).unwrap_or(average);
|
||||
let p90 = Self::get_p_n(&sorted_latencies, 90);
|
||||
let p75 = Self::get_p_n(&sorted_latencies, 75);
|
||||
|
||||
LatencyStats {
|
||||
fastest,
|
||||
average,
|
||||
slowest,
|
||||
tm90,
|
||||
tm75,
|
||||
p90,
|
||||
p75,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub struct StateReasonSpan {
|
||||
state_reason: BucketEntryStateReason,
|
||||
|
@ -289,7 +289,7 @@ impl RoutingTable {
|
||||
|
||||
// Get what contact method would be used for contacting the bootstrap
|
||||
let bsdi = match network_manager
|
||||
.get_node_contact_method(nr.default_filtered())
|
||||
.get_node_contact_method(nr.sequencing_filtered(Sequencing::PreferOrdered))
|
||||
{
|
||||
Ok(Some(ncm)) if ncm.is_direct() => ncm.direct_dial_info().unwrap(),
|
||||
Ok(v) => {
|
||||
@ -307,7 +307,7 @@ impl RoutingTable {
|
||||
|
||||
// Need VALID signed peer info, so ask bootstrap to find_node of itself
|
||||
// which will ensure it has the bootstrap's signed peer info as part of the response
|
||||
let _ = routing_table.find_nodes_close_to_node_ref(crypto_kind, nr.clone(), vec![]).await;
|
||||
let _ = routing_table.find_nodes_close_to_node_ref(crypto_kind, nr.sequencing_filtered(Sequencing::PreferOrdered), vec![]).await;
|
||||
|
||||
// Ensure we got the signed peer info
|
||||
if !nr.signed_node_info_has_valid_signature(routing_domain) {
|
||||
|
@ -95,39 +95,17 @@ impl RoutingTable {
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Ping the relay to keep it alive, over every protocol it is relaying for us
|
||||
#[instrument(level = "trace", skip(self, futurequeue), err)]
|
||||
async fn relay_keepalive_public_internet(
|
||||
// Get protocol-specific noderefs for a relay to determine its liveness
|
||||
// Relays get pinged over more protocols than non-relay nodes because we need to ensure
|
||||
// that they can reliably forward packets with 'all' sequencing, not just over 'any' sequencing
|
||||
fn get_relay_specific_noderefs(
|
||||
&self,
|
||||
cur_ts: Timestamp,
|
||||
futurequeue: &mut VecDeque<PingValidatorFuture>,
|
||||
) -> EyreResult<()> {
|
||||
// Get the PublicInternet relay if we are using one
|
||||
let Some(relay_nr) = self.relay_node(RoutingDomain::PublicInternet) else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
relay_nr: FilteredNodeRef,
|
||||
routing_domain: RoutingDomain,
|
||||
) -> Vec<FilteredNodeRef> {
|
||||
// Get our publicinternet dial info
|
||||
let dids = self.all_filtered_dial_info_details(
|
||||
RoutingDomain::PublicInternet.into(),
|
||||
&DialInfoFilter::all(),
|
||||
);
|
||||
|
||||
let opt_relay_keepalive_ts = self.relay_node_last_keepalive(RoutingDomain::PublicInternet);
|
||||
let relay_needs_keepalive = opt_relay_keepalive_ts
|
||||
.map(|kts| {
|
||||
cur_ts.saturating_sub(kts).as_u64()
|
||||
>= (RELAY_KEEPALIVE_PING_INTERVAL_SECS as u64 * 1_000_000u64)
|
||||
})
|
||||
.unwrap_or(true);
|
||||
|
||||
if !relay_needs_keepalive {
|
||||
return Ok(());
|
||||
}
|
||||
// Say we're doing this keepalive now
|
||||
self.inner
|
||||
.write()
|
||||
.set_relay_node_last_keepalive(RoutingDomain::PublicInternet, cur_ts);
|
||||
let dids =
|
||||
self.all_filtered_dial_info_details(routing_domain.into(), &DialInfoFilter::all());
|
||||
|
||||
// We need to keep-alive at one connection per ordering for relays
|
||||
// but also one per NAT mapping that we need to keep open for our inbound dial info
|
||||
@ -180,6 +158,41 @@ impl RoutingTable {
|
||||
relay_noderefs.push(relay_nr);
|
||||
}
|
||||
|
||||
relay_noderefs
|
||||
}
|
||||
|
||||
// Ping the relay to keep it alive, over every protocol it is relaying for us
|
||||
#[instrument(level = "trace", skip(self, futurequeue), err)]
|
||||
async fn relay_keepalive_public_internet(
|
||||
&self,
|
||||
cur_ts: Timestamp,
|
||||
futurequeue: &mut VecDeque<PingValidatorFuture>,
|
||||
) -> EyreResult<()> {
|
||||
// Get the PublicInternet relay if we are using one
|
||||
let Some(relay_nr) = self.relay_node(RoutingDomain::PublicInternet) else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
let opt_relay_keepalive_ts = self.relay_node_last_keepalive(RoutingDomain::PublicInternet);
|
||||
let relay_needs_keepalive = opt_relay_keepalive_ts
|
||||
.map(|kts| {
|
||||
cur_ts.saturating_sub(kts).as_u64()
|
||||
>= (RELAY_KEEPALIVE_PING_INTERVAL_SECS as u64 * 1_000_000u64)
|
||||
})
|
||||
.unwrap_or(true);
|
||||
|
||||
if !relay_needs_keepalive {
|
||||
return Ok(());
|
||||
}
|
||||
// Say we're doing this keepalive now
|
||||
self.inner
|
||||
.write()
|
||||
.set_relay_node_last_keepalive(RoutingDomain::PublicInternet, cur_ts);
|
||||
|
||||
// Get the sequencing-specific relay noderefs for this relay
|
||||
let relay_noderefs =
|
||||
self.get_relay_specific_noderefs(relay_nr, RoutingDomain::PublicInternet);
|
||||
|
||||
for relay_nr_filtered in relay_noderefs {
|
||||
futurequeue.push_back(
|
||||
async move {
|
||||
@ -249,24 +262,36 @@ impl RoutingTable {
|
||||
futurequeue: &mut VecDeque<PingValidatorFuture>,
|
||||
) -> EyreResult<()> {
|
||||
// Get all nodes needing pings in the PublicInternet routing domain
|
||||
let relay_node_filter = self.make_public_internet_relay_node_filter();
|
||||
let node_refs = self.get_nodes_needing_ping(RoutingDomain::PublicInternet, cur_ts);
|
||||
|
||||
// Just do a single ping with the best protocol for all the other nodes to check for liveness
|
||||
for nr in node_refs {
|
||||
let nr = nr.sequencing_clone(Sequencing::PreferOrdered);
|
||||
// If the node is relay-capable, we should ping it over ALL sequencing types
|
||||
// instead of just a simple liveness check on ANY best contact method
|
||||
|
||||
futurequeue.push_back(
|
||||
async move {
|
||||
#[cfg(feature = "verbose-tracing")]
|
||||
veilid_log!(nr debug "--> PublicInternet Validator ping to {:?}", nr);
|
||||
let rpc_processor = nr.rpc_processor();
|
||||
let _ = rpc_processor
|
||||
.rpc_call_status(Destination::direct(nr))
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
.boxed(),
|
||||
);
|
||||
let all_noderefs = if nr.operate(|_rti, e| !relay_node_filter(e)) {
|
||||
// If this is a relay capable node, get all the sequencing specific noderefs
|
||||
self.get_relay_specific_noderefs(nr, RoutingDomain::PublicInternet)
|
||||
} else {
|
||||
// If a non-relay node, ping with the normal ping type
|
||||
vec![nr.sequencing_clone(Sequencing::PreferOrdered)]
|
||||
};
|
||||
|
||||
for nr in all_noderefs {
|
||||
futurequeue.push_back(
|
||||
async move {
|
||||
#[cfg(feature = "verbose-tracing")]
|
||||
veilid_log!(nr debug "--> PublicInternet Validator ping to {:?}", nr);
|
||||
let rpc_processor = nr.rpc_processor();
|
||||
let _ = rpc_processor
|
||||
.rpc_call_status(Destination::direct(nr))
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
.boxed(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
@ -202,7 +202,12 @@ impl RoutingTable {
|
||||
}
|
||||
if !got_outbound_relay {
|
||||
// Find a node in our routing table that is an acceptable inbound relay
|
||||
if let Some(nr) = self.find_fastest_node(cur_ts, &relay_node_filter, |ls| ls.tm90) {
|
||||
if let Some(nr) = self.find_random_fast_node(
|
||||
cur_ts,
|
||||
&relay_node_filter,
|
||||
RELAY_SELECTION_PERCENTILE,
|
||||
|ls| ls.tm90,
|
||||
) {
|
||||
veilid_log!(self debug "Inbound relay node selected: {}", nr);
|
||||
editor.set_relay_node(Some(nr));
|
||||
}
|
||||
|
18
veilid-core/src/rpc_processor/debug.rs
Normal file
18
veilid-core/src/rpc_processor/debug.rs
Normal file
@ -0,0 +1,18 @@
|
||||
use super::*;
|
||||
|
||||
impl RPCProcessor {
|
||||
pub fn debug_info_nodeinfo(&self) -> String {
|
||||
let mut out = String::new();
|
||||
let inner = self.inner.lock();
|
||||
out += &format!(
|
||||
"RPC Worker Dequeue Latency:\n{}",
|
||||
indent_all_string(&inner.rpc_worker_dequeue_latency)
|
||||
);
|
||||
out += "\n";
|
||||
out += &format!(
|
||||
"RPC Worker Process Latency:\n{}",
|
||||
indent_all_string(&inner.rpc_worker_process_latency)
|
||||
);
|
||||
out
|
||||
}
|
||||
}
|
@ -206,7 +206,7 @@ impl Destination {
|
||||
}
|
||||
if opt_routing_domain.is_none() {
|
||||
// In the case of an unexpected relay, log it and don't pass any sender peer info into an unexpected relay
|
||||
veilid_log!(node warn "No routing domain for relay: relay={}, node={}", relay, node);
|
||||
veilid_log!(node debug "Unexpected relay: relay={}, node={}", relay, node);
|
||||
};
|
||||
|
||||
(
|
||||
|
@ -2,6 +2,7 @@ use super::*;
|
||||
|
||||
mod answer;
|
||||
mod coders;
|
||||
mod debug;
|
||||
mod destination;
|
||||
mod error;
|
||||
mod fanout;
|
||||
@ -22,6 +23,7 @@ mod rpc_status;
|
||||
mod rpc_validate_dial_info;
|
||||
mod rpc_value_changed;
|
||||
mod rpc_watch_value;
|
||||
mod rpc_worker;
|
||||
mod sender_info;
|
||||
mod sender_peer_info;
|
||||
|
||||
@ -48,7 +50,7 @@ pub(crate) use error::*;
|
||||
pub(crate) use fanout::*;
|
||||
pub(crate) use sender_info::*;
|
||||
|
||||
use futures_util::StreamExt;
|
||||
use futures_util::StreamExt as _;
|
||||
use stop_token::future::FutureExt as _;
|
||||
|
||||
use coders::*;
|
||||
@ -56,6 +58,7 @@ use message::*;
|
||||
use message_header::*;
|
||||
use operation_waiter::*;
|
||||
use rendered_operation::*;
|
||||
use rpc_worker::*;
|
||||
use sender_peer_info::*;
|
||||
|
||||
use crypto::*;
|
||||
@ -67,6 +70,10 @@ impl_veilid_log_facility!("rpc");
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
|
||||
const RPC_WORKERS_PER_CORE: u32 = 16;
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
|
||||
#[derive(Debug)]
|
||||
#[must_use]
|
||||
struct WaitableReplyContext {
|
||||
@ -122,9 +129,13 @@ impl Default for RPCProcessorStartupContext {
|
||||
#[derive(Debug)]
|
||||
#[must_use]
|
||||
struct RPCProcessorInner {
|
||||
send_channel: Option<flume::Sender<(Span, MessageEncoded)>>,
|
||||
stop_source: Option<StopSource>,
|
||||
worker_join_handles: Vec<MustJoinHandle<()>>,
|
||||
rpc_send_channel: Option<flume::Sender<RPCWorkerRequest>>,
|
||||
rpc_stop_source: Option<StopSource>,
|
||||
rpc_worker_join_handles: Vec<MustJoinHandle<()>>,
|
||||
rpc_worker_dequeue_latency: LatencyStats,
|
||||
rpc_worker_process_latency: LatencyStats,
|
||||
rpc_worker_dequeue_latency_accounting: LatencyStatsAccounting,
|
||||
rpc_worker_process_latency_accounting: LatencyStatsAccounting,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
@ -146,9 +157,13 @@ impl_veilid_component!(RPCProcessor);
|
||||
impl RPCProcessor {
|
||||
fn new_inner() -> RPCProcessorInner {
|
||||
RPCProcessorInner {
|
||||
send_channel: None,
|
||||
stop_source: None,
|
||||
worker_join_handles: Vec::new(),
|
||||
rpc_send_channel: None,
|
||||
rpc_stop_source: None,
|
||||
rpc_worker_join_handles: Vec::new(),
|
||||
rpc_worker_dequeue_latency: LatencyStats::default(),
|
||||
rpc_worker_process_latency: LatencyStats::default(),
|
||||
rpc_worker_dequeue_latency_accounting: LatencyStatsAccounting::new(),
|
||||
rpc_worker_process_latency_accounting: LatencyStatsAccounting::new(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -173,7 +188,7 @@ impl RPCProcessor {
|
||||
}
|
||||
|
||||
// Default RPC concurrency is the number of CPUs * 16 rpc workers per core, as a single worker takes about 1% CPU when relaying and 16% is reasonable for baseline plus relay
|
||||
concurrency *= 16;
|
||||
concurrency *= RPC_WORKERS_PER_CORE;
|
||||
}
|
||||
(concurrency, queue_size, max_route_hop_count, timeout_us)
|
||||
};
|
||||
@ -227,22 +242,12 @@ impl RPCProcessor {
|
||||
let mut inner = self.inner.lock();
|
||||
|
||||
let channel = flume::bounded(self.queue_size as usize);
|
||||
inner.send_channel = Some(channel.0.clone());
|
||||
inner.stop_source = Some(StopSource::new());
|
||||
|
||||
// spin up N workers
|
||||
veilid_log!(self trace "Spinning up {} RPC workers", self.concurrency);
|
||||
for task_n in 0..self.concurrency {
|
||||
let registry = self.registry();
|
||||
let receiver = channel.1.clone();
|
||||
let stop_token = inner.stop_source.as_ref().unwrap().token();
|
||||
let jh = spawn(&format!("rpc worker {}", task_n), async move {
|
||||
let this = registry.rpc_processor();
|
||||
Box::pin(this.rpc_worker(stop_token, receiver)).await
|
||||
});
|
||||
inner.worker_join_handles.push(jh);
|
||||
}
|
||||
inner.rpc_send_channel = Some(channel.0.clone());
|
||||
inner.rpc_stop_source = Some(StopSource::new());
|
||||
}
|
||||
|
||||
self.startup_rpc_workers()?;
|
||||
|
||||
guard.success();
|
||||
|
||||
veilid_log!(self debug "finished rpc processor startup");
|
||||
@ -260,21 +265,7 @@ impl RPCProcessor {
|
||||
.await
|
||||
.expect("should be started up");
|
||||
|
||||
// Stop the rpc workers
|
||||
let mut unord = FuturesUnordered::new();
|
||||
{
|
||||
let mut inner = self.inner.lock();
|
||||
// take the join handles out
|
||||
for h in inner.worker_join_handles.drain(..) {
|
||||
unord.push(h);
|
||||
}
|
||||
// drop the stop
|
||||
drop(inner.stop_source.take());
|
||||
}
|
||||
veilid_log!(self debug "stopping {} rpc worker tasks", unord.len());
|
||||
|
||||
// Wait for them to complete
|
||||
while unord.next().await.is_some() {}
|
||||
self.shutdown_rpc_workers().await;
|
||||
|
||||
veilid_log!(self debug "resetting rpc processor state");
|
||||
|
||||
@ -817,8 +808,10 @@ impl RPCProcessor {
|
||||
return SenderPeerInfo::default();
|
||||
};
|
||||
let Some(routing_domain) = opt_routing_domain else {
|
||||
// No routing domain for target, no node info
|
||||
// Only a stale connection or no connection exists
|
||||
// No routing domain for target, no node info is safe to send here
|
||||
// Only a stale connection or no connection exists, or an unexpected
|
||||
// relay was used, possibly due to the destination switching relays
|
||||
// in a race condition with our send
|
||||
return SenderPeerInfo::default();
|
||||
};
|
||||
|
||||
@ -1478,11 +1471,24 @@ impl RPCProcessor {
|
||||
let operation = match self.decode_rpc_operation(&encoded_msg) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
// Debug on error
|
||||
veilid_log!(self debug "Dropping routed RPC: {}", e);
|
||||
match e {
|
||||
// Invalid messages that should be punished
|
||||
RPCError::Protocol(_) | RPCError::InvalidFormat(_) => {
|
||||
veilid_log!(self debug "Invalid routed RPC Operation: {}", e);
|
||||
|
||||
// XXX: Punish routes that send routed undecodable crap
|
||||
// self.network_manager().address_filter().punish_route_id(xxx, PunishmentReason::FailedToDecodeRoutedMessage);
|
||||
}
|
||||
// Ignored messages that should be dropped
|
||||
RPCError::Ignore(_) | RPCError::Network(_) | RPCError::TryAgain(_) => {
|
||||
veilid_log!(self trace "Dropping routed RPC Operation: {}", e);
|
||||
}
|
||||
// Internal errors that deserve louder logging
|
||||
RPCError::Unimplemented(_) | RPCError::Internal(_) => {
|
||||
veilid_log!(self error "Error decoding routed RPC operation: {}", e);
|
||||
}
|
||||
};
|
||||
|
||||
// XXX: Punish routes that send routed undecodable crap
|
||||
// self.network_manager().address_filter().punish_route_id(xxx, PunishmentReason::FailedToDecodeRoutedMessage);
|
||||
return Ok(NetworkResult::invalid_message(e));
|
||||
}
|
||||
};
|
||||
@ -1600,16 +1606,16 @@ impl RPCProcessor {
|
||||
if let Err(e) = self.waiting_rpc_table.complete_op_waiter(op_id, msg) {
|
||||
match e {
|
||||
RPCError::Unimplemented(_) | RPCError::Internal(_) => {
|
||||
veilid_log!(self error "Could not complete rpc operation: id = {}: {}", op_id, e);
|
||||
veilid_log!(self error "Error in RPC operation: id = {}: {}", op_id, e);
|
||||
}
|
||||
RPCError::InvalidFormat(_)
|
||||
| RPCError::Protocol(_)
|
||||
| RPCError::Network(_)
|
||||
| RPCError::TryAgain(_) => {
|
||||
veilid_log!(self debug "Could not complete rpc operation: id = {}: {}", op_id, e);
|
||||
veilid_log!(self debug "Could not complete RPC operation: id = {}: {}", op_id, e);
|
||||
}
|
||||
RPCError::Ignore(_) => {
|
||||
veilid_log!(self debug "Answer late: id = {}", op_id);
|
||||
RPCError::Ignore(e) => {
|
||||
veilid_log!(self debug "RPC operation ignored: id = {}: {}", op_id, e);
|
||||
}
|
||||
};
|
||||
// Don't throw an error here because it's okay if the original operation timed out
|
||||
@ -1618,164 +1624,4 @@ impl RPCProcessor {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn rpc_worker(
|
||||
&self,
|
||||
stop_token: StopToken,
|
||||
receiver: flume::Receiver<(Span, MessageEncoded)>,
|
||||
) {
|
||||
while let Ok(Ok((prev_span, msg))) =
|
||||
receiver.recv_async().timeout_at(stop_token.clone()).await
|
||||
{
|
||||
let rpc_message_span = tracing::trace_span!("rpc message");
|
||||
rpc_message_span.follows_from(prev_span);
|
||||
|
||||
network_result_value_or_log!(self match self
|
||||
.process_rpc_message(msg).instrument(rpc_message_span)
|
||||
.await
|
||||
{
|
||||
Err(e) => {
|
||||
veilid_log!(self error "couldn't process rpc message: {}", e);
|
||||
continue;
|
||||
}
|
||||
|
||||
Ok(v) => {
|
||||
v
|
||||
}
|
||||
} => [ format!(": msg.header={:?}", msg.header) ] {});
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", target = "rpc", skip_all)]
|
||||
pub fn enqueue_direct_message(
|
||||
&self,
|
||||
envelope: Envelope,
|
||||
sender_noderef: FilteredNodeRef,
|
||||
flow: Flow,
|
||||
routing_domain: RoutingDomain,
|
||||
body: Vec<u8>,
|
||||
) -> EyreResult<()> {
|
||||
let _guard = self
|
||||
.startup_context
|
||||
.startup_lock
|
||||
.enter()
|
||||
.wrap_err("not started up")?;
|
||||
|
||||
if sender_noderef.routing_domain_set() != routing_domain {
|
||||
bail!("routing domain should match peer noderef filter");
|
||||
}
|
||||
|
||||
let header = MessageHeader {
|
||||
detail: RPCMessageHeaderDetail::Direct(RPCMessageHeaderDetailDirect {
|
||||
envelope,
|
||||
sender_noderef,
|
||||
flow,
|
||||
routing_domain,
|
||||
}),
|
||||
timestamp: Timestamp::now(),
|
||||
body_len: ByteCount::new(body.len() as u64),
|
||||
};
|
||||
|
||||
let msg = MessageEncoded {
|
||||
header,
|
||||
data: MessageData { contents: body },
|
||||
};
|
||||
|
||||
let send_channel = {
|
||||
let inner = self.inner.lock();
|
||||
let Some(send_channel) = inner.send_channel.as_ref().cloned() else {
|
||||
bail!("send channel is closed");
|
||||
};
|
||||
send_channel
|
||||
};
|
||||
send_channel
|
||||
.try_send((Span::current(), msg))
|
||||
.map_err(|e| eyre!("failed to enqueue direct RPC message: {}", e))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", target = "rpc", skip_all)]
|
||||
fn enqueue_safety_routed_message(
|
||||
&self,
|
||||
direct: RPCMessageHeaderDetailDirect,
|
||||
remote_safety_route: PublicKey,
|
||||
sequencing: Sequencing,
|
||||
body: Vec<u8>,
|
||||
) -> EyreResult<()> {
|
||||
let _guard = self
|
||||
.startup_context
|
||||
.startup_lock
|
||||
.enter()
|
||||
.wrap_err("not started up")?;
|
||||
|
||||
let header = MessageHeader {
|
||||
detail: RPCMessageHeaderDetail::SafetyRouted(RPCMessageHeaderDetailSafetyRouted {
|
||||
direct,
|
||||
remote_safety_route,
|
||||
sequencing,
|
||||
}),
|
||||
timestamp: Timestamp::now(),
|
||||
body_len: (body.len() as u64).into(),
|
||||
};
|
||||
|
||||
let msg = MessageEncoded {
|
||||
header,
|
||||
data: MessageData { contents: body },
|
||||
};
|
||||
let send_channel = {
|
||||
let inner = self.inner.lock();
|
||||
let Some(send_channel) = inner.send_channel.as_ref().cloned() else {
|
||||
bail!("send channel is closed");
|
||||
};
|
||||
send_channel
|
||||
};
|
||||
send_channel
|
||||
.try_send((Span::current(), msg))
|
||||
.map_err(|e| eyre!("failed to enqueue safety routed RPC message: {}", e))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", target = "rpc", skip_all)]
|
||||
fn enqueue_private_routed_message(
|
||||
&self,
|
||||
direct: RPCMessageHeaderDetailDirect,
|
||||
remote_safety_route: PublicKey,
|
||||
private_route: PublicKey,
|
||||
safety_spec: SafetySpec,
|
||||
body: Vec<u8>,
|
||||
) -> EyreResult<()> {
|
||||
let _guard = self
|
||||
.startup_context
|
||||
.startup_lock
|
||||
.enter()
|
||||
.wrap_err("not started up")?;
|
||||
|
||||
let header = MessageHeader {
|
||||
detail: RPCMessageHeaderDetail::PrivateRouted(RPCMessageHeaderDetailPrivateRouted {
|
||||
direct,
|
||||
remote_safety_route,
|
||||
private_route,
|
||||
safety_spec,
|
||||
}),
|
||||
timestamp: Timestamp::now(),
|
||||
body_len: (body.len() as u64).into(),
|
||||
};
|
||||
|
||||
let msg = MessageEncoded {
|
||||
header,
|
||||
data: MessageData { contents: body },
|
||||
};
|
||||
|
||||
let send_channel = {
|
||||
let inner = self.inner.lock();
|
||||
let Some(send_channel) = inner.send_channel.as_ref().cloned() else {
|
||||
bail!("send channel is closed");
|
||||
};
|
||||
send_channel
|
||||
};
|
||||
send_channel
|
||||
.try_send((Span::current(), msg))
|
||||
.map_err(|e| eyre!("failed to enqueue private routed RPC message: {}", e))?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -8,7 +8,7 @@ where
|
||||
{
|
||||
waiter: OperationWaiter<T, C>,
|
||||
op_id: OperationId,
|
||||
result_receiver: Option<flume::Receiver<(Span, T)>>,
|
||||
result_receiver: flume::Receiver<(Span, T)>,
|
||||
}
|
||||
|
||||
impl<T, C> OperationWaitHandle<T, C>
|
||||
@ -27,9 +27,7 @@ where
|
||||
C: Unpin + Clone,
|
||||
{
|
||||
fn drop(&mut self) {
|
||||
if self.result_receiver.is_some() {
|
||||
self.waiter.cancel_op_waiter(self.op_id);
|
||||
}
|
||||
self.waiter.cancel_op_waiter(self.op_id);
|
||||
}
|
||||
}
|
||||
|
||||
@ -106,7 +104,7 @@ where
|
||||
OperationWaitHandle {
|
||||
waiter: self.clone(),
|
||||
op_id,
|
||||
result_receiver: Some(result_receiver),
|
||||
result_receiver,
|
||||
}
|
||||
}
|
||||
|
||||
@ -125,65 +123,69 @@ where
|
||||
/// Get operation context
|
||||
pub fn get_op_context(&self, op_id: OperationId) -> Result<C, RPCError> {
|
||||
let inner = self.inner.lock();
|
||||
let Some(waiting_op) = inner.waiting_op_table.get(&op_id) else {
|
||||
return Err(RPCError::ignore(format!(
|
||||
"Missing operation id getting op context: id={}",
|
||||
op_id
|
||||
)));
|
||||
let res = {
|
||||
let Some(waiting_op) = inner.waiting_op_table.get(&op_id) else {
|
||||
return Err(RPCError::ignore(format!(
|
||||
"Missing operation id getting op context: id={}",
|
||||
op_id
|
||||
)));
|
||||
};
|
||||
Ok(waiting_op.context.clone())
|
||||
};
|
||||
Ok(waiting_op.context.clone())
|
||||
drop(inner);
|
||||
res
|
||||
}
|
||||
|
||||
/// Remove wait for op
|
||||
#[instrument(level = "trace", target = "rpc", skip_all)]
|
||||
fn cancel_op_waiter(&self, op_id: OperationId) {
|
||||
let mut inner = self.inner.lock();
|
||||
inner.waiting_op_table.remove(&op_id);
|
||||
{
|
||||
let waiting_op = inner.waiting_op_table.remove(&op_id);
|
||||
drop(waiting_op);
|
||||
}
|
||||
drop(inner);
|
||||
}
|
||||
|
||||
/// Complete the waiting op
|
||||
#[instrument(level = "trace", target = "rpc", skip_all)]
|
||||
pub fn complete_op_waiter(&self, op_id: OperationId, message: T) -> Result<(), RPCError> {
|
||||
let waiting_op = {
|
||||
let mut inner = self.inner.lock();
|
||||
inner
|
||||
.waiting_op_table
|
||||
.remove(&op_id)
|
||||
.ok_or_else(RPCError::else_ignore(format!(
|
||||
"Unmatched operation id: {}",
|
||||
op_id
|
||||
)))?
|
||||
let mut inner = self.inner.lock();
|
||||
let res = {
|
||||
let waiting_op =
|
||||
inner
|
||||
.waiting_op_table
|
||||
.remove(&op_id)
|
||||
.ok_or_else(RPCError::else_ignore(format!(
|
||||
"Unmatched operation id: {}",
|
||||
op_id
|
||||
)))?;
|
||||
waiting_op
|
||||
.result_sender
|
||||
.send((Span::current(), message))
|
||||
.map_err(RPCError::ignore)
|
||||
};
|
||||
waiting_op
|
||||
.result_sender
|
||||
.send((Span::current(), message))
|
||||
.map_err(RPCError::ignore)
|
||||
drop(inner);
|
||||
res
|
||||
}
|
||||
|
||||
/// Wait for operation to complete
|
||||
#[instrument(level = "trace", target = "rpc", skip_all)]
|
||||
pub async fn wait_for_op(
|
||||
&self,
|
||||
mut handle: OperationWaitHandle<T, C>,
|
||||
handle: OperationWaitHandle<T, C>,
|
||||
timeout_us: TimestampDuration,
|
||||
) -> Result<TimeoutOr<(T, TimestampDuration)>, RPCError> {
|
||||
let timeout_ms = us_to_ms(timeout_us.as_u64()).map_err(RPCError::internal)?;
|
||||
|
||||
// Take the receiver
|
||||
// After this, we must manually cancel since the cancel on handle drop is disabled
|
||||
let result_receiver = handle.result_receiver.take().unwrap();
|
||||
|
||||
let result_fut = result_receiver.recv_async().in_current_span();
|
||||
let result_fut = handle.result_receiver.recv_async().in_current_span();
|
||||
|
||||
// wait for eventualvalue
|
||||
let start_ts = Timestamp::now();
|
||||
let res = timeout(timeout_ms, result_fut).await.into_timeout_or();
|
||||
|
||||
match res {
|
||||
TimeoutOr::Timeout => {
|
||||
self.cancel_op_waiter(handle.op_id);
|
||||
Ok(TimeoutOr::Timeout)
|
||||
}
|
||||
TimeoutOr::Timeout => Ok(TimeoutOr::Timeout),
|
||||
TimeoutOr::Value(Ok((_span_id, ret))) => {
|
||||
let end_ts = Timestamp::now();
|
||||
|
||||
@ -192,7 +194,10 @@ where
|
||||
|
||||
Ok(TimeoutOr::Value((ret, end_ts.saturating_sub(start_ts))))
|
||||
}
|
||||
TimeoutOr::Value(Err(e)) => Err(RPCError::ignore(e)),
|
||||
TimeoutOr::Value(Err(e)) => {
|
||||
//
|
||||
Err(RPCError::ignore(e))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
247
veilid-core/src/rpc_processor/rpc_worker.rs
Normal file
247
veilid-core/src/rpc_processor/rpc_worker.rs
Normal file
@ -0,0 +1,247 @@
|
||||
use futures_util::StreamExt as _;
|
||||
use stop_token::future::FutureExt as _;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(super) enum RPCWorkerRequestKind {
|
||||
Message { message_encoded: MessageEncoded },
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(super) struct RPCWorkerRequest {
|
||||
enqueued_ts: Timestamp,
|
||||
span: Span,
|
||||
kind: RPCWorkerRequestKind,
|
||||
}
|
||||
|
||||
impl RPCProcessor {
|
||||
pub(super) fn startup_rpc_workers(&self) -> EyreResult<()> {
|
||||
let mut inner = self.inner.lock();
|
||||
|
||||
// Relay workers
|
||||
let channel = flume::bounded(self.queue_size as usize);
|
||||
inner.rpc_send_channel = Some(channel.0.clone());
|
||||
inner.rpc_stop_source = Some(StopSource::new());
|
||||
|
||||
// spin up N workers
|
||||
veilid_log!(self debug "Starting {} RPC workers", self.concurrency);
|
||||
for task_n in 0..self.concurrency {
|
||||
let registry = self.registry();
|
||||
let receiver = channel.1.clone();
|
||||
let stop_token = inner.rpc_stop_source.as_ref().unwrap().token();
|
||||
let jh = spawn(&format!("relay worker {}", task_n), async move {
|
||||
let this = registry.rpc_processor();
|
||||
Box::pin(this.rpc_worker(stop_token, receiver)).await
|
||||
});
|
||||
inner.rpc_worker_join_handles.push(jh);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(super) async fn shutdown_rpc_workers(&self) {
|
||||
// Stop the rpc workers
|
||||
let mut unord = FuturesUnordered::new();
|
||||
{
|
||||
let mut inner = self.inner.lock();
|
||||
// take the join handles out
|
||||
for h in inner.rpc_worker_join_handles.drain(..) {
|
||||
unord.push(h);
|
||||
}
|
||||
// drop the stop
|
||||
drop(inner.rpc_stop_source.take());
|
||||
}
|
||||
veilid_log!(self debug "Stopping {} RPC workers", unord.len());
|
||||
|
||||
// Wait for them to complete
|
||||
while unord.next().await.is_some() {}
|
||||
}
|
||||
|
||||
async fn rpc_worker(&self, stop_token: StopToken, receiver: flume::Receiver<RPCWorkerRequest>) {
|
||||
while let Ok(Ok(request)) = receiver.recv_async().timeout_at(stop_token.clone()).await {
|
||||
let rpc_request_span = tracing::trace_span!("rpc request");
|
||||
rpc_request_span.follows_from(request.span);
|
||||
|
||||
// Measure dequeue time
|
||||
let dequeue_ts = Timestamp::now();
|
||||
let dequeue_latency = dequeue_ts.saturating_sub(request.enqueued_ts);
|
||||
|
||||
// Process request kind
|
||||
match request.kind {
|
||||
// Process RPC Message
|
||||
RPCWorkerRequestKind::Message { message_encoded } => {
|
||||
network_result_value_or_log!(self target:"network_result", match self
|
||||
.process_rpc_message(message_encoded).instrument(rpc_request_span)
|
||||
.await
|
||||
{
|
||||
Err(e) => {
|
||||
veilid_log!(self error "couldn't process rpc message: {}", e);
|
||||
continue;
|
||||
}
|
||||
Ok(v) => {
|
||||
v
|
||||
}
|
||||
} => [ format!(": msg.header={:?}", message_encoded.header) ] {});
|
||||
}
|
||||
}
|
||||
|
||||
// Measure process time
|
||||
let process_ts = Timestamp::now();
|
||||
let process_latency = process_ts.saturating_sub(dequeue_ts);
|
||||
|
||||
// Accounting
|
||||
let mut inner = self.inner.lock();
|
||||
inner.rpc_worker_dequeue_latency = inner
|
||||
.rpc_worker_dequeue_latency_accounting
|
||||
.record_latency(dequeue_latency);
|
||||
inner.rpc_worker_process_latency = inner
|
||||
.rpc_worker_process_latency_accounting
|
||||
.record_latency(process_latency);
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", target = "rpc", skip_all)]
|
||||
pub fn enqueue_direct_message(
|
||||
&self,
|
||||
envelope: Envelope,
|
||||
sender_noderef: FilteredNodeRef,
|
||||
flow: Flow,
|
||||
routing_domain: RoutingDomain,
|
||||
body: Vec<u8>,
|
||||
) -> EyreResult<()> {
|
||||
let _guard = self
|
||||
.startup_context
|
||||
.startup_lock
|
||||
.enter()
|
||||
.wrap_err("not started up")?;
|
||||
|
||||
if sender_noderef.routing_domain_set() != routing_domain {
|
||||
bail!("routing domain should match peer noderef filter");
|
||||
}
|
||||
|
||||
let header = MessageHeader {
|
||||
detail: RPCMessageHeaderDetail::Direct(RPCMessageHeaderDetailDirect {
|
||||
envelope,
|
||||
sender_noderef,
|
||||
flow,
|
||||
routing_domain,
|
||||
}),
|
||||
timestamp: Timestamp::now(),
|
||||
body_len: ByteCount::new(body.len() as u64),
|
||||
};
|
||||
|
||||
let message_encoded = MessageEncoded {
|
||||
header,
|
||||
data: MessageData { contents: body },
|
||||
};
|
||||
|
||||
let send_channel = {
|
||||
let inner = self.inner.lock();
|
||||
let Some(send_channel) = inner.rpc_send_channel.as_ref().cloned() else {
|
||||
bail!("send channel is closed");
|
||||
};
|
||||
send_channel
|
||||
};
|
||||
send_channel
|
||||
.try_send(RPCWorkerRequest {
|
||||
enqueued_ts: Timestamp::now(),
|
||||
span: Span::current(),
|
||||
kind: RPCWorkerRequestKind::Message { message_encoded },
|
||||
})
|
||||
.map_err(|e| eyre!("failed to enqueue direct RPC message: {}", e))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", target = "rpc", skip_all)]
|
||||
pub(super) fn enqueue_safety_routed_message(
|
||||
&self,
|
||||
direct: RPCMessageHeaderDetailDirect,
|
||||
remote_safety_route: PublicKey,
|
||||
sequencing: Sequencing,
|
||||
body: Vec<u8>,
|
||||
) -> EyreResult<()> {
|
||||
let _guard = self
|
||||
.startup_context
|
||||
.startup_lock
|
||||
.enter()
|
||||
.wrap_err("not started up")?;
|
||||
|
||||
let header = MessageHeader {
|
||||
detail: RPCMessageHeaderDetail::SafetyRouted(RPCMessageHeaderDetailSafetyRouted {
|
||||
direct,
|
||||
remote_safety_route,
|
||||
sequencing,
|
||||
}),
|
||||
timestamp: Timestamp::now(),
|
||||
body_len: (body.len() as u64).into(),
|
||||
};
|
||||
|
||||
let message_encoded = MessageEncoded {
|
||||
header,
|
||||
data: MessageData { contents: body },
|
||||
};
|
||||
let send_channel = {
|
||||
let inner = self.inner.lock();
|
||||
let Some(send_channel) = inner.rpc_send_channel.as_ref().cloned() else {
|
||||
bail!("send channel is closed");
|
||||
};
|
||||
send_channel
|
||||
};
|
||||
send_channel
|
||||
.try_send(RPCWorkerRequest {
|
||||
enqueued_ts: Timestamp::now(),
|
||||
span: Span::current(),
|
||||
kind: RPCWorkerRequestKind::Message { message_encoded },
|
||||
})
|
||||
.map_err(|e| eyre!("failed to enqueue safety routed RPC message: {}", e))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", target = "rpc", skip_all)]
|
||||
pub(super) fn enqueue_private_routed_message(
|
||||
&self,
|
||||
direct: RPCMessageHeaderDetailDirect,
|
||||
remote_safety_route: PublicKey,
|
||||
private_route: PublicKey,
|
||||
safety_spec: SafetySpec,
|
||||
body: Vec<u8>,
|
||||
) -> EyreResult<()> {
|
||||
let _guard = self
|
||||
.startup_context
|
||||
.startup_lock
|
||||
.enter()
|
||||
.wrap_err("not started up")?;
|
||||
|
||||
let header = MessageHeader {
|
||||
detail: RPCMessageHeaderDetail::PrivateRouted(RPCMessageHeaderDetailPrivateRouted {
|
||||
direct,
|
||||
remote_safety_route,
|
||||
private_route,
|
||||
safety_spec,
|
||||
}),
|
||||
timestamp: Timestamp::now(),
|
||||
body_len: (body.len() as u64).into(),
|
||||
};
|
||||
|
||||
let message_encoded = MessageEncoded {
|
||||
header,
|
||||
data: MessageData { contents: body },
|
||||
};
|
||||
|
||||
let send_channel = {
|
||||
let inner = self.inner.lock();
|
||||
let Some(send_channel) = inner.rpc_send_channel.as_ref().cloned() else {
|
||||
bail!("send channel is closed");
|
||||
};
|
||||
send_channel
|
||||
};
|
||||
send_channel
|
||||
.try_send(RPCWorkerRequest {
|
||||
enqueued_ts: Timestamp::now(),
|
||||
span: Span::current(),
|
||||
kind: RPCWorkerRequestKind::Message { message_encoded },
|
||||
})
|
||||
.map_err(|e| eyre!("failed to enqueue private routed RPC message: {}", e))?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
153
veilid-core/src/stats_accounting.rs
Normal file
153
veilid-core/src/stats_accounting.rs
Normal file
@ -0,0 +1,153 @@
|
||||
use super::*;
|
||||
|
||||
// Latency entry is per round-trip packet (ping or data)
|
||||
// - Size is number of entries
|
||||
const ROLLING_LATENCIES_SIZE: usize = 50;
|
||||
|
||||
// Transfers entries are in bytes total for the interval
|
||||
// - Size is number of entries
|
||||
// - Interval is number of seconds in each entry
|
||||
const ROLLING_TRANSFERS_SIZE: usize = 10;
|
||||
pub const ROLLING_TRANSFERS_INTERVAL_SECS: u32 = 1;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||
pub struct TransferCount {
|
||||
down: ByteCount,
|
||||
up: ByteCount,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct TransferStatsAccounting {
|
||||
rolling_transfers: VecDeque<TransferCount>,
|
||||
current_transfer: TransferCount,
|
||||
}
|
||||
|
||||
impl TransferStatsAccounting {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
rolling_transfers: VecDeque::new(),
|
||||
current_transfer: TransferCount::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_down(&mut self, bytes: ByteCount) {
|
||||
self.current_transfer.down += bytes;
|
||||
}
|
||||
|
||||
pub fn add_up(&mut self, bytes: ByteCount) {
|
||||
self.current_transfer.up += bytes;
|
||||
}
|
||||
|
||||
pub fn roll_transfers(
|
||||
&mut self,
|
||||
last_ts: Timestamp,
|
||||
cur_ts: Timestamp,
|
||||
transfer_stats: &mut TransferStatsDownUp,
|
||||
) {
|
||||
let dur_ms = cur_ts.saturating_sub(last_ts) / 1000u64;
|
||||
while self.rolling_transfers.len() >= ROLLING_TRANSFERS_SIZE {
|
||||
self.rolling_transfers.pop_front();
|
||||
}
|
||||
self.rolling_transfers.push_back(self.current_transfer);
|
||||
|
||||
transfer_stats.down.total += self.current_transfer.down;
|
||||
transfer_stats.up.total += self.current_transfer.up;
|
||||
|
||||
self.current_transfer = TransferCount::default();
|
||||
|
||||
transfer_stats.down.maximum = 0.into();
|
||||
transfer_stats.up.maximum = 0.into();
|
||||
transfer_stats.down.minimum = u64::MAX.into();
|
||||
transfer_stats.up.minimum = u64::MAX.into();
|
||||
transfer_stats.down.average = 0.into();
|
||||
transfer_stats.up.average = 0.into();
|
||||
for xfer in &self.rolling_transfers {
|
||||
let bpsd = xfer.down * 1000u64 / dur_ms;
|
||||
let bpsu = xfer.up * 1000u64 / dur_ms;
|
||||
transfer_stats.down.maximum.max_assign(bpsd);
|
||||
transfer_stats.up.maximum.max_assign(bpsu);
|
||||
transfer_stats.down.minimum.min_assign(bpsd);
|
||||
transfer_stats.up.minimum.min_assign(bpsu);
|
||||
transfer_stats.down.average += bpsd;
|
||||
transfer_stats.up.average += bpsu;
|
||||
}
|
||||
let len = self.rolling_transfers.len() as u64;
|
||||
if len > 0 {
|
||||
transfer_stats.down.average /= len;
|
||||
transfer_stats.up.average /= len;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct LatencyStatsAccounting {
|
||||
rolling_latencies: VecDeque<TimestampDuration>,
|
||||
}
|
||||
|
||||
impl LatencyStatsAccounting {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
rolling_latencies: VecDeque::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn get_tm_n(sorted_latencies: &[TimestampDuration], n: usize) -> Option<TimestampDuration> {
|
||||
let tmcount = sorted_latencies.len() * n / 100;
|
||||
if tmcount == 0 {
|
||||
None
|
||||
} else {
|
||||
let mut tm = TimestampDuration::new(0);
|
||||
for l in &sorted_latencies[..tmcount] {
|
||||
tm += *l;
|
||||
}
|
||||
tm /= tmcount as u64;
|
||||
Some(tm)
|
||||
}
|
||||
}
|
||||
|
||||
fn get_p_n(sorted_latencies: &[TimestampDuration], n: usize) -> TimestampDuration {
|
||||
let pindex = (sorted_latencies.len() * n / 100).saturating_sub(1);
|
||||
sorted_latencies[pindex]
|
||||
}
|
||||
|
||||
pub fn record_latency(&mut self, latency: TimestampDuration) -> LatencyStats {
|
||||
while self.rolling_latencies.len() >= ROLLING_LATENCIES_SIZE {
|
||||
self.rolling_latencies.pop_front();
|
||||
}
|
||||
self.rolling_latencies.push_back(latency);
|
||||
|
||||
// Calculate latency stats
|
||||
|
||||
let mut fastest = TimestampDuration::new(u64::MAX);
|
||||
let mut slowest = TimestampDuration::new(0u64);
|
||||
let mut average = TimestampDuration::new(0u64);
|
||||
|
||||
for rl in &self.rolling_latencies {
|
||||
fastest.min_assign(*rl);
|
||||
slowest.max_assign(*rl);
|
||||
average += *rl;
|
||||
}
|
||||
let len = self.rolling_latencies.len() as u64;
|
||||
if len > 0 {
|
||||
average /= len;
|
||||
}
|
||||
|
||||
let mut sorted_latencies: Vec<_> = self.rolling_latencies.iter().copied().collect();
|
||||
sorted_latencies.sort();
|
||||
|
||||
let tm90 = Self::get_tm_n(&sorted_latencies, 90).unwrap_or(average);
|
||||
let tm75 = Self::get_tm_n(&sorted_latencies, 75).unwrap_or(average);
|
||||
let p90 = Self::get_p_n(&sorted_latencies, 90);
|
||||
let p75 = Self::get_p_n(&sorted_latencies, 75);
|
||||
|
||||
LatencyStats {
|
||||
fastest,
|
||||
average,
|
||||
slowest,
|
||||
tm90,
|
||||
tm75,
|
||||
p90,
|
||||
p75,
|
||||
}
|
||||
}
|
||||
}
|
@ -1120,7 +1120,7 @@ impl StorageManager {
|
||||
let dest = rpc_processor
|
||||
.resolve_target_to_destination(
|
||||
vc.target,
|
||||
SafetySelection::Unsafe(Sequencing::NoPreference),
|
||||
SafetySelection::Unsafe(Sequencing::PreferOrdered),
|
||||
)
|
||||
.await
|
||||
.map_err(VeilidAPIError::from)?;
|
||||
|
@ -761,7 +761,9 @@ impl VeilidAPI {
|
||||
async fn debug_nodeinfo(&self, _args: String) -> VeilidAPIResult<String> {
|
||||
// Dump routing table entry
|
||||
let registry = self.core_context()?.registry();
|
||||
let nodeinfo = registry.routing_table().debug_info_nodeinfo();
|
||||
let nodeinfo_rtab = registry.routing_table().debug_info_nodeinfo();
|
||||
let nodeinfo_net = registry.network_manager().debug_info_nodeinfo();
|
||||
let nodeinfo_rpc = registry.rpc_processor().debug_info_nodeinfo();
|
||||
|
||||
// Dump core state
|
||||
let state = self.get_state().await?;
|
||||
@ -790,7 +792,10 @@ impl VeilidAPI {
|
||||
"Connection manager unavailable when detached".to_owned()
|
||||
};
|
||||
|
||||
Ok(format!("{}\n{}\n{}\n", nodeinfo, peertable, connman))
|
||||
Ok(format!(
|
||||
"{}\n{}\n{}\n{}\n{}\n",
|
||||
nodeinfo_rtab, nodeinfo_net, nodeinfo_rpc, peertable, connman
|
||||
))
|
||||
}
|
||||
|
||||
fn debug_nodeid(&self, _args: String) -> VeilidAPIResult<String> {
|
||||
|
@ -10,10 +10,6 @@ use wasm_bindgen_test::*;
|
||||
|
||||
wasm_bindgen_test_configure!(run_in_browser);
|
||||
|
||||
extern crate wee_alloc;
|
||||
#[global_allocator]
|
||||
static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
|
||||
|
||||
static SETUP_ONCE: Once = Once::new();
|
||||
pub fn setup() -> () {
|
||||
SETUP_ONCE.call_once(|| {
|
||||
|
@ -35,17 +35,17 @@ debug-load = ["dep:ctor", "dep:libc-print", "dep:android_log-sys", "dep:oslog"]
|
||||
|
||||
[dependencies]
|
||||
veilid-core = { path = "../../veilid-core", default-features = false }
|
||||
tracing = { version = "0.1.40", features = ["log", "attributes"] }
|
||||
tracing-subscriber = "0.3.18"
|
||||
tracing = { version = "0.1.41", features = ["log", "attributes"] }
|
||||
tracing-subscriber = "0.3.19"
|
||||
parking_lot = "0.12.3"
|
||||
backtrace = "0.3.71"
|
||||
serde_json = "1.0.120"
|
||||
serde = "1.0.204"
|
||||
futures-util = { version = "0.3.30", default-features = false, features = [
|
||||
backtrace = "^0.3.71"
|
||||
serde_json = "1.0.140"
|
||||
serde = "1.0.218"
|
||||
futures-util = { version = "0.3.31", default-features = false, features = [
|
||||
"alloc",
|
||||
] }
|
||||
cfg-if = "1.0.0"
|
||||
data-encoding = { version = "2.6.0" }
|
||||
data-encoding = { version = "2.8.0" }
|
||||
tracing-flame = "0.2.0"
|
||||
|
||||
# Dependencies for native builds only
|
||||
@ -55,15 +55,15 @@ tracing-opentelemetry = "0.21"
|
||||
opentelemetry = { version = "0.20" }
|
||||
opentelemetry-otlp = { version = "0.13" }
|
||||
opentelemetry-semantic-conventions = "0.12"
|
||||
async-std = { version = "1.12.0", features = ["unstable"], optional = true }
|
||||
tokio = { version = "1.38.1", features = ["full"], optional = true }
|
||||
tokio-stream = { version = "0.1.15", features = ["net"], optional = true }
|
||||
tokio-util = { version = "0.7.11", features = ["compat"], optional = true }
|
||||
allo-isolate = "0.1.25"
|
||||
async-std = { version = "1.13.0", features = ["unstable"], optional = true }
|
||||
tokio = { version = "1.43.0", features = ["full"], optional = true }
|
||||
tokio-stream = { version = "0.1.17", features = ["net"], optional = true }
|
||||
tokio-util = { version = "0.7.13", features = ["compat"], optional = true }
|
||||
allo-isolate = "0.1.26"
|
||||
ffi-support = "0.4.4"
|
||||
lazy_static = "1.5.0"
|
||||
hostname = "0.3.1"
|
||||
ctor = { version = "0.2.8", optional = true }
|
||||
ctor = { version = "0.2.9", optional = true }
|
||||
libc-print = { version = "0.1.23", optional = true }
|
||||
|
||||
|
||||
@ -74,7 +74,7 @@ libc-print = { version = "0.1.23", optional = true }
|
||||
[target.'cfg(target_os = "android")'.dependencies]
|
||||
jni = "0.21.1"
|
||||
paranoid-android = "0.2.2"
|
||||
android_log-sys = { version = "0.3.1", optional = true }
|
||||
android_log-sys = { version = "0.3.2", optional = true }
|
||||
|
||||
# Dependencies for iOS builds only
|
||||
[target.'cfg(target_os = "ios")'.dependencies]
|
||||
|
@ -6,11 +6,20 @@ from .state import VeilidState
|
||||
|
||||
|
||||
class RoutingContext(ABC):
|
||||
ref_count: int
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
):
|
||||
self.ref_count = 0
|
||||
|
||||
async def __aenter__(self) -> Self:
|
||||
self.ref_count += 1
|
||||
return self
|
||||
|
||||
async def __aexit__(self, *excinfo):
|
||||
if not self.is_done():
|
||||
self.ref_count -= 1
|
||||
if self.ref_count == 0 and not self.is_done():
|
||||
await self.release()
|
||||
|
||||
@abstractmethod
|
||||
@ -109,13 +118,22 @@ class RoutingContext(ABC):
|
||||
|
||||
|
||||
class TableDbTransaction(ABC):
|
||||
ref_count: int
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
):
|
||||
self.ref_count = 0
|
||||
|
||||
async def __aenter__(self) -> Self:
|
||||
self.ref_count += 1
|
||||
return self
|
||||
|
||||
async def __aexit__(self, *excinfo):
|
||||
if not self.is_done():
|
||||
await self.rollback()
|
||||
|
||||
self.ref_count -= 1
|
||||
if self.ref_count == 0 and not self.is_done():
|
||||
await self.release()
|
||||
|
||||
@abstractmethod
|
||||
def is_done(self) -> bool:
|
||||
pass
|
||||
@ -138,11 +156,20 @@ class TableDbTransaction(ABC):
|
||||
|
||||
|
||||
class TableDb(ABC):
|
||||
ref_count: int
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
):
|
||||
self.ref_count = 0
|
||||
|
||||
async def __aenter__(self) -> Self:
|
||||
self.ref_count += 1
|
||||
return self
|
||||
|
||||
async def __aexit__(self, *excinfo):
|
||||
if not self.is_done():
|
||||
self.ref_count -= 1
|
||||
if self.ref_count == 0 and not self.is_done():
|
||||
await self.release()
|
||||
|
||||
@abstractmethod
|
||||
@ -179,11 +206,20 @@ class TableDb(ABC):
|
||||
|
||||
|
||||
class CryptoSystem(ABC):
|
||||
ref_count: int
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
):
|
||||
self.ref_count = 0
|
||||
|
||||
async def __aenter__(self) -> Self:
|
||||
self.ref_count += 1
|
||||
return self
|
||||
|
||||
async def __aexit__(self, *excinfo):
|
||||
if not self.is_done():
|
||||
self.ref_count -= 1
|
||||
if self.ref_count == 0 and not self.is_done():
|
||||
await self.release()
|
||||
|
||||
@abstractmethod
|
||||
@ -306,11 +342,20 @@ class CryptoSystem(ABC):
|
||||
|
||||
|
||||
class VeilidAPI(ABC):
|
||||
ref_count: int
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
):
|
||||
self.ref_count = 0
|
||||
|
||||
async def __aenter__(self) -> Self:
|
||||
self.ref_count += 1
|
||||
return self
|
||||
|
||||
async def __aexit__(self, *excinfo):
|
||||
if not self.is_done():
|
||||
self.ref_count -= 1
|
||||
if self.ref_count == 0 and not self.is_done():
|
||||
await self.release()
|
||||
|
||||
@abstractmethod
|
||||
|
@ -99,6 +99,8 @@ class _JsonVeilidAPI(VeilidAPI):
|
||||
update_callback: Callable[[VeilidUpdate], Awaitable],
|
||||
validate_schema: bool = True,
|
||||
):
|
||||
super().__init__()
|
||||
|
||||
self.reader = reader
|
||||
self.writer = writer
|
||||
self.update_callback = update_callback
|
||||
@ -308,7 +310,7 @@ class _JsonVeilidAPI(VeilidAPI):
|
||||
|
||||
# Validate if we have a validator
|
||||
if response["op"] != req["op"]:
|
||||
raise ValueError("Response op does not match request op")
|
||||
raise ValueError(f"Response op does not match request op: {response['op']} != {req['op']}")
|
||||
if validate is not None:
|
||||
validate(req, response)
|
||||
|
||||
@ -459,7 +461,7 @@ class _JsonVeilidAPI(VeilidAPI):
|
||||
|
||||
def validate_rc_op(request: dict, response: dict):
|
||||
if response["rc_op"] != request["rc_op"]:
|
||||
raise ValueError("Response rc_op does not match request rc_op")
|
||||
raise ValueError(f"Response rc_op does not match request rc_op: {response["rc_op"]} != {request["rc_op"]}")
|
||||
|
||||
|
||||
class _JsonRoutingContext(RoutingContext):
|
||||
@ -468,6 +470,8 @@ class _JsonRoutingContext(RoutingContext):
|
||||
done: bool
|
||||
|
||||
def __init__(self, api: _JsonVeilidAPI, rc_id: int):
|
||||
super().__init__()
|
||||
|
||||
self.api = api
|
||||
self.rc_id = rc_id
|
||||
self.done = False
|
||||
@ -728,7 +732,7 @@ class _JsonRoutingContext(RoutingContext):
|
||||
|
||||
def validate_tx_op(request: dict, response: dict):
|
||||
if response["tx_op"] != request["tx_op"]:
|
||||
raise ValueError("Response tx_op does not match request tx_op")
|
||||
raise ValueError(f"Response tx_op does not match request tx_op: {response['tx_op']} != {request['tx_op']}")
|
||||
|
||||
|
||||
class _JsonTableDbTransaction(TableDbTransaction):
|
||||
@ -737,6 +741,8 @@ class _JsonTableDbTransaction(TableDbTransaction):
|
||||
done: bool
|
||||
|
||||
def __init__(self, api: _JsonVeilidAPI, tx_id: int):
|
||||
super().__init__()
|
||||
|
||||
self.api = api
|
||||
self.tx_id = tx_id
|
||||
self.done = False
|
||||
@ -810,7 +816,7 @@ class _JsonTableDbTransaction(TableDbTransaction):
|
||||
|
||||
def validate_db_op(request: dict, response: dict):
|
||||
if response["db_op"] != request["db_op"]:
|
||||
raise ValueError("Response db_op does not match request db_op")
|
||||
raise ValueError(f"Response db_op does not match request db_op: {response['db_op']} != {request['db_op']}")
|
||||
|
||||
|
||||
class _JsonTableDb(TableDb):
|
||||
@ -819,6 +825,8 @@ class _JsonTableDb(TableDb):
|
||||
done: bool
|
||||
|
||||
def __init__(self, api: _JsonVeilidAPI, db_id: int):
|
||||
super().__init__()
|
||||
|
||||
self.api = api
|
||||
self.db_id = db_id
|
||||
self.done = False
|
||||
@ -929,7 +937,7 @@ class _JsonTableDb(TableDb):
|
||||
|
||||
def validate_cs_op(request: dict, response: dict):
|
||||
if response["cs_op"] != request["cs_op"]:
|
||||
raise ValueError("Response cs_op does not match request cs_op")
|
||||
raise ValueError(f"Response cs_op does not match request cs_op: {response['cs_op']} != {request['cs_op']}")
|
||||
|
||||
|
||||
class _JsonCryptoSystem(CryptoSystem):
|
||||
@ -938,6 +946,8 @@ class _JsonCryptoSystem(CryptoSystem):
|
||||
done: bool
|
||||
|
||||
def __init__(self, api: _JsonVeilidAPI, cs_id: int):
|
||||
super().__init__()
|
||||
|
||||
self.api = api
|
||||
self.cs_id = cs_id
|
||||
self.done = False
|
||||
|
@ -53,8 +53,8 @@ geolocation = ["veilid-core/geolocation"]
|
||||
|
||||
[dependencies]
|
||||
veilid-core = { path = "../veilid-core", default-features = false }
|
||||
tracing = { version = "^0.1.40", features = ["log", "attributes"] }
|
||||
tracing-subscriber = { version = "^0.3.18", features = ["env-filter", "time"] }
|
||||
tracing = { version = "^0.1.41", features = ["log", "attributes"] }
|
||||
tracing-subscriber = { version = "^0.3.19", features = ["env-filter", "time"] }
|
||||
tracing-appender = "^0.2.3"
|
||||
tracing-opentelemetry = "^0.24.0"
|
||||
# Buggy: tracing-error = "^0"
|
||||
@ -62,21 +62,21 @@ opentelemetry = { version = "^0.23" }
|
||||
opentelemetry-otlp = { version = "^0.16.0", default-features = false, optional = true }
|
||||
opentelemetry_sdk = "0.23.0"
|
||||
opentelemetry-semantic-conventions = "^0.16.0"
|
||||
async-std = { version = "^1.12.0", features = ["unstable"], optional = true }
|
||||
tokio = { version = "^1.38.1", features = ["full", "tracing"], optional = true }
|
||||
tokio-stream = { version = "^0.1.15", features = ["net"], optional = true }
|
||||
tokio-util = { version = "^0.7.11", features = ["compat"], optional = true }
|
||||
async-std = { version = "^1.13.0", features = ["unstable"], optional = true }
|
||||
tokio = { version = "^1.43.0", features = ["full", "tracing"], optional = true }
|
||||
tokio-stream = { version = "^0.1.17", features = ["net"], optional = true }
|
||||
tokio-util = { version = "^0.7.13", features = ["compat"], optional = true }
|
||||
console-subscriber = { version = "^0.3.0", optional = true }
|
||||
async-tungstenite = { version = "^0.27.0", features = ["async-tls"] }
|
||||
color-eyre = { version = "^0.6.3", default-features = false }
|
||||
backtrace = "^0.3.71"
|
||||
clap = { version = "^4.5.9", features = ["derive", "string", "wrap_help"] }
|
||||
clap = { version = "^4.5.31", features = ["derive", "string", "wrap_help"] }
|
||||
directories = "^5.0.1"
|
||||
parking_lot = "^0.12.3"
|
||||
config = { version = "^0.14.0", default-features = false, features = ["yaml"] }
|
||||
config = { version = "^0.14.1", default-features = false, features = ["yaml"] }
|
||||
cfg-if = "^1.0.0"
|
||||
serde = "^1.0.204"
|
||||
serde_derive = "^1.0.204"
|
||||
serde = "^1.0.218"
|
||||
serde_derive = "^1.0.218"
|
||||
serde_yaml = { package = "serde_yaml_ng", version = "^0.10.0" }
|
||||
json = "^0"
|
||||
futures-util = { version = "^0", default-features = false, features = [
|
||||
@ -91,10 +91,10 @@ rpassword = "^7"
|
||||
hostname = "^0"
|
||||
stop-token = { version = "^0", default-features = false }
|
||||
sysinfo = { version = "^0.30.13", default-features = false }
|
||||
wg = { version = "^0.9.1", features = ["future"] }
|
||||
wg = { version = "^0.9.2", features = ["future"] }
|
||||
tracing-flame = { version = "0.2.0", optional = true }
|
||||
time = { version = "0.3.36", features = ["local-offset"] }
|
||||
chrono = "0.4.38"
|
||||
time = { version = "0.3.38", features = ["local-offset"] }
|
||||
chrono = "0.4.40"
|
||||
|
||||
[target.'cfg(windows)'.dependencies]
|
||||
windows-service = "^0"
|
||||
@ -108,10 +108,10 @@ nix = "^0.29.0"
|
||||
tracing-perfetto = { version = "0.1.5", optional = true }
|
||||
|
||||
[target.'cfg(target_os = "linux")'.dependencies]
|
||||
tracing-journald = "^0.3.0"
|
||||
tracing-journald = "^0.3.1"
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "^3.1.1"
|
||||
serial_test = "^3.2.0"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
@ -47,6 +47,7 @@ veilid_tools_android_tests = ["dep:paranoid-android"]
|
||||
veilid_tools_ios_tests = ["dep:tracing", "dep:oslog", "dep:tracing-oslog"]
|
||||
tracing = ["dep:tracing", "dep:tracing-subscriber", "tokio/tracing"]
|
||||
debug-locks = []
|
||||
debug-duration-timeout = []
|
||||
|
||||
virtual-network = []
|
||||
virtual-network-server = [
|
||||
@ -67,66 +68,65 @@ virtual-router-bin = [
|
||||
]
|
||||
|
||||
[dependencies]
|
||||
tracing = { version = "0.1.40", features = [
|
||||
tracing = { version = "0.1.41", features = [
|
||||
"log",
|
||||
"attributes",
|
||||
], optional = true }
|
||||
tracing-subscriber = { version = "0.3.18", features = [
|
||||
tracing-subscriber = { version = "0.3.19", features = [
|
||||
"env-filter",
|
||||
"time",
|
||||
], optional = true }
|
||||
log = { version = "0.4.22" }
|
||||
log = { version = "0.4.26" }
|
||||
eyre = "0.6.12"
|
||||
static_assertions = "1.1.0"
|
||||
serde = { version = "1.0.214", features = ["derive", "rc"] }
|
||||
postcard = { version = "1.0.10", features = ["use-std"] }
|
||||
serde = { version = "1.0.218", features = ["derive", "rc"] }
|
||||
postcard = { version = "1.1.1", features = ["use-std"] }
|
||||
cfg-if = "1.0.0"
|
||||
thiserror = "1.0.63"
|
||||
futures-util = { version = "0.3.30", default-features = false, features = [
|
||||
thiserror = "1.0.69"
|
||||
futures-util = { version = "0.3.31", default-features = false, features = [
|
||||
"alloc",
|
||||
] }
|
||||
futures_codec = "0.4.1"
|
||||
parking_lot = "0.12.3"
|
||||
async-lock = "3.4.0"
|
||||
once_cell = "1.19.0"
|
||||
once_cell = "1.20.3"
|
||||
stop-token = { version = "0.7.0", default-features = false }
|
||||
rand = "0.8.5"
|
||||
rand_core = "0.6.4"
|
||||
backtrace = "0.3.71"
|
||||
backtrace = "^0.3.71"
|
||||
fn_name = "0.1.0"
|
||||
range-set-blaze = "0.1.16"
|
||||
flume = { version = "0.11.0", features = ["async"] }
|
||||
flume = { version = "0.11.1", features = ["async"] }
|
||||
imbl = { version = "3.0.0", features = ["serde"] }
|
||||
|
||||
|
||||
# Dependencies for native builds only
|
||||
# Linux, Windows, Mac, iOS, Android
|
||||
[target.'cfg(not(all(target_arch = "wasm32", target_os = "unknown")))'.dependencies]
|
||||
async-io = { version = "1.13.0" }
|
||||
async-std = { version = "1.12.0", features = ["unstable"], optional = true }
|
||||
async-std = { version = "1.13.0", features = ["unstable"], optional = true }
|
||||
bugsalot = { package = "veilid-bugsalot", version = "0.2.0", optional = true }
|
||||
time = { version = "0.3.36", features = [
|
||||
time = { version = "0.3.38", features = [
|
||||
"local-offset",
|
||||
"formatting",
|
||||
], optional = true }
|
||||
chrono = "0.4.38"
|
||||
chrono = "0.4.40"
|
||||
ctrlc = "^3"
|
||||
futures-util = { version = "0.3.30", default-features = false, features = [
|
||||
futures-util = { version = "0.3.31", default-features = false, features = [
|
||||
"async-await",
|
||||
"sink",
|
||||
"std",
|
||||
"io",
|
||||
] }
|
||||
indent = { version = "0.1.1", optional = true }
|
||||
libc = "0.2.155"
|
||||
libc = "0.2.170"
|
||||
nix = { version = "0.27.1", features = ["user"] }
|
||||
socket2 = { version = "0.5.7", features = ["all"] }
|
||||
tokio = { version = "1.38.1", features = ["full"], optional = true }
|
||||
tokio-util = { version = "0.7.11", features = ["compat"], optional = true }
|
||||
tokio-stream = { version = "0.1.15", features = ["net"], optional = true }
|
||||
socket2 = { version = "0.5.8", features = ["all"] }
|
||||
tokio = { version = "1.43.0", features = ["full"], optional = true }
|
||||
tokio-util = { version = "0.7.13", features = ["compat"], optional = true }
|
||||
tokio-stream = { version = "0.1.17", features = ["net"], optional = true }
|
||||
|
||||
ws_stream_tungstenite = { version = "0.14.0", optional = true }
|
||||
async-tungstenite = { version = "0.28.0", optional = true }
|
||||
async-tungstenite = { version = "0.28.2", optional = true }
|
||||
clap = { version = "4", features = ["derive"], optional = true }
|
||||
ipnet = { version = "2", features = ["serde"], optional = true }
|
||||
serde_yaml = { package = "serde_yaml_ng", version = "^0.10.0", optional = true }
|
||||
@ -135,9 +135,9 @@ rand_chacha = { version = "0.3.1", optional = true }
|
||||
|
||||
# Dependencies for WASM builds only
|
||||
[target.'cfg(all(target_arch = "wasm32", target_os = "unknown"))'.dependencies]
|
||||
wasm-bindgen = "0.2.92"
|
||||
js-sys = "0.3.70"
|
||||
wasm-bindgen-futures = "0.4.42"
|
||||
wasm-bindgen = "0.2.100"
|
||||
js-sys = "0.3.77"
|
||||
wasm-bindgen-futures = "0.4.50"
|
||||
async_executors = { version = "0.7.0", default-features = false }
|
||||
getrandom = { version = "0.2", features = ["js"] }
|
||||
ws_stream_wasm = { version = "0.7.4", optional = true }
|
||||
@ -183,8 +183,7 @@ serial_test = { version = "2.0.0", default-features = false, features = [
|
||||
"async",
|
||||
] }
|
||||
console_error_panic_hook = "0.1.7"
|
||||
wasm-bindgen-test = "0.3.42"
|
||||
wee_alloc = "0.4.5"
|
||||
wasm-bindgen-test = "0.3.50"
|
||||
wasm-logger = "0.2.0"
|
||||
tracing-wasm = { version = "0.2.1" }
|
||||
|
||||
|
@ -14,9 +14,6 @@ type SequenceType = u16;
|
||||
const HEADER_LEN: usize = 8;
|
||||
const MAX_LEN: usize = LengthType::MAX as usize;
|
||||
|
||||
// XXX: keep statistics on all drops and why we dropped them
|
||||
// XXX: move to config eventually?
|
||||
|
||||
/// The hard-coded maximum fragment size used by AssemblyBuffer
|
||||
///
|
||||
/// Eventually this should parameterized and made configurable.
|
||||
@ -119,7 +116,7 @@ impl PeerMessages {
|
||||
let mut assembly = MessageAssembly {
|
||||
timestamp,
|
||||
seq,
|
||||
data: vec![0u8; len as usize],
|
||||
data: unsafe { unaligned_u8_vec_uninit(len as usize) },
|
||||
parts: RangeSetBlaze::from_iter([part_start..=part_end]),
|
||||
};
|
||||
assembly.data[part_start as usize..=part_end as usize].copy_from_slice(chunk);
|
||||
@ -229,6 +226,7 @@ struct AssemblyBufferUnlockedInner {
|
||||
/// * No sequencing of packets. Packets may still be delivered to the application out of order, but this guarantees that only whole packets will be delivered if all of their fragments are received.
|
||||
|
||||
#[derive(Clone)]
|
||||
#[must_use]
|
||||
pub struct AssemblyBuffer {
|
||||
inner: Arc<Mutex<AssemblyBufferInner>>,
|
||||
unlocked_inner: Arc<AssemblyBufferUnlockedInner>,
|
||||
@ -247,7 +245,6 @@ impl AssemblyBuffer {
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
inner: Arc::new(Mutex::new(Self::new_inner())),
|
||||
|
@ -10,7 +10,7 @@ where
|
||||
{
|
||||
table: AsyncTagLockTable<T>,
|
||||
tag: T,
|
||||
_guard: AsyncMutexGuardArc<()>,
|
||||
guard: Option<AsyncMutexGuardArc<()>>,
|
||||
}
|
||||
|
||||
impl<T> AsyncTagLockGuard<T>
|
||||
@ -21,7 +21,7 @@ where
|
||||
Self {
|
||||
table,
|
||||
tag,
|
||||
_guard: guard,
|
||||
guard: Some(guard),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -45,7 +45,8 @@ where
|
||||
if guards == 0 {
|
||||
inner.table.remove(&self.tag).unwrap();
|
||||
}
|
||||
// Proceed with releasing _guard, which may cause some concurrent tag lock to acquire
|
||||
// Proceed with releasing guard, which may cause some concurrent tag lock to acquire
|
||||
drop(self.guard.take());
|
||||
}
|
||||
}
|
||||
|
||||
@ -153,7 +154,7 @@ where
|
||||
}
|
||||
std::collections::hash_map::Entry::Vacant(v) => {
|
||||
let mutex = Arc::new(AsyncMutex::new(()));
|
||||
let guard = asyncmutex_try_lock_arc!(mutex)?;
|
||||
let guard = asyncmutex_try_lock_arc!(mutex).unwrap();
|
||||
v.insert(AsyncTagLockTableEntry { mutex, guards: 1 });
|
||||
guard
|
||||
}
|
||||
|
@ -278,85 +278,3 @@ macro_rules! network_result_try {
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! network_result_value_or_log {
|
||||
($self:ident $r:expr => $f:expr) => {
|
||||
network_result_value_or_log!($self $r => [ "" ] $f )
|
||||
};
|
||||
($self:ident $r:expr => [ $d:expr ] $f:expr) => { {
|
||||
let __extra_message = if debug_target_enabled!("network_result") {
|
||||
$d.to_string()
|
||||
} else {
|
||||
"".to_string()
|
||||
};
|
||||
match $r {
|
||||
NetworkResult::Timeout => {
|
||||
veilid_log!($self debug
|
||||
"{} at {}@{}:{} in {}{}",
|
||||
"Timeout",
|
||||
file!(),
|
||||
line!(),
|
||||
column!(),
|
||||
fn_name::uninstantiated!(),
|
||||
__extra_message
|
||||
);
|
||||
$f
|
||||
}
|
||||
NetworkResult::ServiceUnavailable(ref s) => {
|
||||
veilid_log!($self debug
|
||||
"{}({}) at {}@{}:{} in {}{}",
|
||||
"ServiceUnavailable",
|
||||
s,
|
||||
file!(),
|
||||
line!(),
|
||||
column!(),
|
||||
fn_name::uninstantiated!(),
|
||||
__extra_message
|
||||
);
|
||||
$f
|
||||
}
|
||||
NetworkResult::NoConnection(ref e) => {
|
||||
veilid_log!($self debug
|
||||
"{}({}) at {}@{}:{} in {}{}",
|
||||
"No connection",
|
||||
e.to_string(),
|
||||
file!(),
|
||||
line!(),
|
||||
column!(),
|
||||
fn_name::uninstantiated!(),
|
||||
__extra_message
|
||||
);
|
||||
$f
|
||||
}
|
||||
NetworkResult::AlreadyExists(ref e) => {
|
||||
veilid_log!($self debug
|
||||
"{}({}) at {}@{}:{} in {}{}",
|
||||
"Already exists",
|
||||
e.to_string(),
|
||||
file!(),
|
||||
line!(),
|
||||
column!(),
|
||||
fn_name::uninstantiated!(),
|
||||
__extra_message
|
||||
);
|
||||
$f
|
||||
}
|
||||
NetworkResult::InvalidMessage(ref s) => {
|
||||
veilid_log!($self debug
|
||||
"{}({}) at {}@{}:{} in {}{}",
|
||||
"Invalid message",
|
||||
s,
|
||||
file!(),
|
||||
line!(),
|
||||
column!(),
|
||||
fn_name::uninstantiated!(),
|
||||
__extra_message
|
||||
);
|
||||
$f
|
||||
}
|
||||
NetworkResult::Value(v) => v,
|
||||
}
|
||||
} };
|
||||
|
||||
}
|
||||
|
@ -125,27 +125,33 @@ pub fn display_duration(dur: u64) -> String {
|
||||
let secs = dur / SEC;
|
||||
let dur = dur % SEC;
|
||||
let msecs = dur / MSEC;
|
||||
let dur = dur % MSEC;
|
||||
|
||||
format!(
|
||||
"{}{}{}{}.{:03}s",
|
||||
if days != 0 {
|
||||
format!("{}d", days)
|
||||
} else {
|
||||
"".to_owned()
|
||||
},
|
||||
if hours != 0 {
|
||||
format!("{}h", hours)
|
||||
} else {
|
||||
"".to_owned()
|
||||
},
|
||||
if mins != 0 {
|
||||
format!("{}m", mins)
|
||||
} else {
|
||||
"".to_owned()
|
||||
},
|
||||
secs,
|
||||
msecs
|
||||
)
|
||||
// microseconds format
|
||||
if days == 0 && hours == 0 && mins == 0 && secs == 0 {
|
||||
format!("{}.{:03}ms", msecs, dur)
|
||||
} else {
|
||||
format!(
|
||||
"{}{}{}{}.{:03}s",
|
||||
if days != 0 {
|
||||
format!("{}d", days)
|
||||
} else {
|
||||
"".to_owned()
|
||||
},
|
||||
if hours != 0 {
|
||||
format!("{}h", hours)
|
||||
} else {
|
||||
"".to_owned()
|
||||
},
|
||||
if mins != 0 {
|
||||
format!("{}m", mins)
|
||||
} else {
|
||||
"".to_owned()
|
||||
},
|
||||
secs,
|
||||
msecs
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
|
@ -522,13 +522,33 @@ pub fn is_debug_backtrace_enabled() -> bool {
|
||||
}
|
||||
|
||||
#[track_caller]
|
||||
pub fn debug_duration<R, F: Future<Output = R>, T: FnOnce() -> F>(f: T) -> impl Future<Output = R> {
|
||||
let location = std::panic::Location::caller();
|
||||
pub fn debug_duration<R, F: Future<Output = R>, T: FnOnce() -> F>(
|
||||
f: T,
|
||||
opt_timeout_us: Option<u64>,
|
||||
) -> impl Future<Output = R> {
|
||||
let location = core::panic::Location::caller();
|
||||
async move {
|
||||
let t1 = get_timestamp();
|
||||
let out = f().await;
|
||||
let t2 = get_timestamp();
|
||||
debug!("duration@{}: {}", location, display_duration(t2 - t1));
|
||||
let duration_us = t2 - t1;
|
||||
if let Some(timeout_us) = opt_timeout_us {
|
||||
if duration_us > timeout_us {
|
||||
#[cfg(not(feature = "debug-duration-timeout"))]
|
||||
debug!(
|
||||
"Excessive duration: {}\n{:?}",
|
||||
display_duration(duration_us),
|
||||
backtrace::Backtrace::new()
|
||||
);
|
||||
#[cfg(feature = "debug-duration-timeout")]
|
||||
panic!(format!(
|
||||
"Duration panic timeout exceeded: {}",
|
||||
display_duration(duration_us)
|
||||
));
|
||||
}
|
||||
} else {
|
||||
debug!("Duration: {} = {}", location, display_duration(duration_us),);
|
||||
}
|
||||
out
|
||||
}
|
||||
}
|
||||
|
@ -9,10 +9,6 @@ use wasm_bindgen_test::*;
|
||||
|
||||
wasm_bindgen_test_configure!(run_in_browser);
|
||||
|
||||
extern crate wee_alloc;
|
||||
#[global_allocator]
|
||||
static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
|
||||
|
||||
static SETUP_ONCE: Once = Once::new();
|
||||
pub fn setup() -> () {
|
||||
SETUP_ONCE.call_once(|| {
|
||||
|
@ -28,7 +28,6 @@ tracing-subscriber = "^0"
|
||||
|
||||
wasm-bindgen = { version = "^0", features = ["serde-serialize"] }
|
||||
console_error_panic_hook = "^0"
|
||||
wee_alloc = "^0"
|
||||
cfg-if = "^1"
|
||||
wasm-bindgen-futures = "^0"
|
||||
js-sys = "^0"
|
||||
|
@ -35,11 +35,6 @@ pub mod veilid_table_db_js;
|
||||
mod wasm_helpers;
|
||||
use wasm_helpers::*;
|
||||
|
||||
// Allocator
|
||||
extern crate wee_alloc;
|
||||
#[global_allocator]
|
||||
static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
|
||||
|
||||
// API Singleton
|
||||
lazy_static! {
|
||||
static ref VEILID_API: SendWrapper<RefCell<Option<veilid_core::VeilidAPI>>> =
|
||||
|
Loading…
x
Reference in New Issue
Block a user