mirror of
https://gitlab.com/veilid/veilid.git
synced 2025-02-21 06:59:55 -05:00
Log split
This commit is contained in:
parent
fe3fec34bf
commit
d34801ad42
@ -1,6 +1,8 @@
|
||||
use crate::{network_manager::StartupDisposition, *};
|
||||
use routing_table::RoutingTableHealth;
|
||||
|
||||
impl_veilid_log_facility!("attach");
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct AttachmentManagerStartupContext {
|
||||
pub startup_lock: Arc<StartupLock>,
|
||||
@ -216,7 +218,7 @@ impl AttachmentManager {
|
||||
guard.success();
|
||||
|
||||
// Inform api clients that things have changed
|
||||
log_net!(debug "sending network state update to api clients");
|
||||
veilid_log!(self trace "sending network state update to api clients");
|
||||
network_manager.send_network_update();
|
||||
|
||||
Ok(StartupDisposition::Success)
|
||||
@ -247,7 +249,7 @@ impl AttachmentManager {
|
||||
guard.success();
|
||||
|
||||
// send update
|
||||
log_net!(debug "sending network state update to api clients");
|
||||
veilid_log!(self debug "sending network state update to api clients");
|
||||
network_manager.send_network_update();
|
||||
}
|
||||
|
||||
@ -265,7 +267,7 @@ impl AttachmentManager {
|
||||
|
||||
#[instrument(parent = None, level = "debug", skip_all)]
|
||||
async fn attachment_maintainer(&self) {
|
||||
log_net!(debug "attachment starting");
|
||||
veilid_log!(self debug "attachment starting");
|
||||
self.update_attaching_detaching_state(AttachmentState::Attaching);
|
||||
|
||||
let network_manager = self.network_manager();
|
||||
@ -282,12 +284,12 @@ impl AttachmentManager {
|
||||
restart = true;
|
||||
}
|
||||
Ok(StartupDisposition::BindRetry) => {
|
||||
info!("waiting for network to bind...");
|
||||
veilid_log!(self info "waiting for network to bind...");
|
||||
restart = true;
|
||||
restart_delay = 10;
|
||||
}
|
||||
Ok(StartupDisposition::Success) => {
|
||||
log_net!(debug "started maintaining peers");
|
||||
veilid_log!(self debug "started maintaining peers");
|
||||
|
||||
while self.inner.lock().maintain_peers {
|
||||
// tick network manager
|
||||
@ -301,7 +303,7 @@ impl AttachmentManager {
|
||||
|
||||
// see if we need to restart the network
|
||||
if network_manager.network_needs_restart() {
|
||||
info!("Restarting network");
|
||||
veilid_log!(self info "Restarting network");
|
||||
restart = true;
|
||||
break;
|
||||
}
|
||||
@ -316,14 +318,14 @@ impl AttachmentManager {
|
||||
.clamp(0, 1_000_000u64);
|
||||
sleep((wait_duration / 1_000) as u32).await;
|
||||
}
|
||||
log_net!(debug "stopped maintaining peers");
|
||||
veilid_log!(self debug "stopped maintaining peers");
|
||||
|
||||
if !restart {
|
||||
self.update_attaching_detaching_state(AttachmentState::Detaching);
|
||||
log_net!(debug "attachment stopping");
|
||||
veilid_log!(self debug "attachment stopping");
|
||||
}
|
||||
|
||||
log_net!(debug "shutting down attachment");
|
||||
veilid_log!(self debug "shutting down attachment");
|
||||
self.shutdown().await;
|
||||
}
|
||||
}
|
||||
@ -332,7 +334,7 @@ impl AttachmentManager {
|
||||
break;
|
||||
}
|
||||
|
||||
log_net!(debug "completely restarting attachment");
|
||||
veilid_log!(self debug "completely restarting attachment");
|
||||
|
||||
// chill out for a second first, give network stack time to settle out
|
||||
for _ in 0..restart_delay {
|
||||
@ -344,7 +346,7 @@ impl AttachmentManager {
|
||||
}
|
||||
|
||||
self.update_attaching_detaching_state(AttachmentState::Detached);
|
||||
log_net!(debug "attachment stopped");
|
||||
veilid_log!(self debug "attachment stopped");
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
|
@ -1,7 +1,7 @@
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use super::*;
|
||||
|
||||
impl_veilid_log_facility!("registry");
|
||||
|
||||
pub trait AsAnyArcSendSync {
|
||||
fn as_any_arc_send_sync(self: Arc<Self>) -> Arc<dyn core::any::Any + Send + Sync>;
|
||||
}
|
||||
@ -15,6 +15,7 @@ impl<T: Send + Sync + 'static> AsAnyArcSendSync for T {
|
||||
pub trait VeilidComponent:
|
||||
AsAnyArcSendSync + VeilidComponentRegistryAccessor + core::fmt::Debug
|
||||
{
|
||||
fn name(&self) -> &'static str;
|
||||
fn init(&self) -> SendPinBoxFutureLifetime<'_, EyreResult<()>>;
|
||||
fn post_init(&self) -> SendPinBoxFutureLifetime<'_, EyreResult<()>>;
|
||||
fn pre_terminate(&self) -> SendPinBoxFutureLifetime<'_, ()>;
|
||||
@ -33,6 +34,15 @@ pub trait VeilidComponentRegistryAccessor {
|
||||
fn event_bus(&self) -> EventBus {
|
||||
self.registry().event_bus.clone()
|
||||
}
|
||||
fn namespace(&self) -> &'static str {
|
||||
self.registry().namespace()
|
||||
}
|
||||
fn program_name(&self) -> &'static str {
|
||||
self.registry().program_name()
|
||||
}
|
||||
fn log_key(&self) -> &'static str {
|
||||
self.registry().log_key()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct VeilidComponentGuard<'a, T: VeilidComponent + Send + Sync + 'static> {
|
||||
@ -62,12 +72,20 @@ struct VeilidComponentRegistryInner {
|
||||
pub struct VeilidComponentRegistry {
|
||||
inner: Arc<Mutex<VeilidComponentRegistryInner>>,
|
||||
config: VeilidConfig,
|
||||
namespace: &'static str,
|
||||
program_name: &'static str,
|
||||
log_key: &'static str,
|
||||
event_bus: EventBus,
|
||||
init_lock: Arc<AsyncMutex<bool>>,
|
||||
}
|
||||
|
||||
impl VeilidComponentRegistry {
|
||||
pub fn new(config: VeilidConfig) -> Self {
|
||||
let (namespace, program_name) =
|
||||
config.with(|c| (c.namespace.to_static_str(), c.program_name.to_static_str()));
|
||||
|
||||
let log_key = VeilidLayerFilter::make_veilid_log_key(program_name, namespace);
|
||||
|
||||
Self {
|
||||
inner: Arc::new(Mutex::new(VeilidComponentRegistryInner {
|
||||
type_map: HashMap::new(),
|
||||
@ -75,6 +93,9 @@ impl VeilidComponentRegistry {
|
||||
mock: false,
|
||||
})),
|
||||
config,
|
||||
namespace,
|
||||
program_name,
|
||||
log_key,
|
||||
event_bus: EventBus::new(),
|
||||
init_lock: Arc::new(AsyncMutex::new(false)),
|
||||
}
|
||||
@ -85,6 +106,18 @@ impl VeilidComponentRegistry {
|
||||
inner.mock = true;
|
||||
}
|
||||
|
||||
pub fn namespace(&self) -> &'static str {
|
||||
self.namespace
|
||||
}
|
||||
|
||||
pub fn program_name(&self) -> &'static str {
|
||||
self.program_name
|
||||
}
|
||||
|
||||
pub fn log_key(&self) -> &'static str {
|
||||
self.log_key
|
||||
}
|
||||
|
||||
pub fn register<
|
||||
T: VeilidComponent + Send + Sync + 'static,
|
||||
F: FnOnce(VeilidComponentRegistry) -> T,
|
||||
@ -216,6 +249,14 @@ impl VeilidComponentRegistry {
|
||||
}
|
||||
async fn terminate_inner(&self, initialized: Vec<Arc<dyn VeilidComponent + Send + Sync>>) {
|
||||
for component in initialized.iter().rev() {
|
||||
let refs = Arc::strong_count(component);
|
||||
if refs > 2 {
|
||||
veilid_log!(self warn
|
||||
"Terminating component '{}' while still referenced ({} extra references)",
|
||||
component.name(),
|
||||
refs - 2
|
||||
);
|
||||
}
|
||||
component.terminate().await;
|
||||
}
|
||||
}
|
||||
@ -243,7 +284,7 @@ impl VeilidComponentRegistry {
|
||||
.unwrap();
|
||||
Some(VeilidComponentGuard {
|
||||
component,
|
||||
_phantom: PhantomData {},
|
||||
_phantom: core::marker::PhantomData {},
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -275,6 +316,10 @@ macro_rules! impl_veilid_component {
|
||||
impl_veilid_component_registry_accessor!($component_name);
|
||||
|
||||
impl VeilidComponent for $component_name {
|
||||
fn name(&self) -> &'static str {
|
||||
stringify!($component_name)
|
||||
}
|
||||
|
||||
fn init(&self) -> SendPinBoxFutureLifetime<'_, EyreResult<()>> {
|
||||
Box::pin(async { self.init_async().await })
|
||||
}
|
||||
|
@ -9,6 +9,8 @@ use crate::veilid_api::*;
|
||||
use crate::veilid_config::*;
|
||||
use crate::*;
|
||||
|
||||
impl_veilid_log_facility!("corectx");
|
||||
|
||||
pub type UpdateCallback = Arc<dyn Fn(VeilidUpdate) + Send + Sync>;
|
||||
|
||||
type InitKey = (String, String);
|
||||
@ -53,8 +55,6 @@ impl VeilidCoreContext {
|
||||
}
|
||||
}
|
||||
|
||||
info!("Veilid API starting up");
|
||||
|
||||
let (program_name, namespace, update_callback) = {
|
||||
let cfginner = config.get();
|
||||
(
|
||||
@ -64,11 +64,14 @@ impl VeilidCoreContext {
|
||||
)
|
||||
};
|
||||
|
||||
ApiTracingLayer::add_callback(program_name, namespace, update_callback.clone()).await?;
|
||||
let log_key = VeilidLayerFilter::make_veilid_log_key(&program_name, &namespace).to_string();
|
||||
ApiTracingLayer::add_callback(log_key, update_callback.clone()).await?;
|
||||
|
||||
// Create component registry
|
||||
let registry = VeilidComponentRegistry::new(config);
|
||||
|
||||
veilid_log!(registry info "Veilid API starting up");
|
||||
|
||||
// Register all components
|
||||
registry.register(ProtectedStore::new);
|
||||
registry.register(Crypto::new);
|
||||
@ -99,14 +102,14 @@ impl VeilidCoreContext {
|
||||
return Err(VeilidAPIError::internal(e));
|
||||
}
|
||||
|
||||
info!("Veilid API startup complete");
|
||||
veilid_log!(registry info "Veilid API startup complete");
|
||||
|
||||
Ok(Self { registry })
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", target = "core_context", skip_all)]
|
||||
async fn shutdown(self) {
|
||||
info!("Veilid API shutdown complete");
|
||||
veilid_log!(self info "Veilid API shutting down");
|
||||
|
||||
let (program_name, namespace, update_callback) = {
|
||||
let config = self.registry.config();
|
||||
@ -127,7 +130,10 @@ impl VeilidCoreContext {
|
||||
// This should finish any shutdown operations for the subsystems
|
||||
self.registry.terminate().await;
|
||||
|
||||
if let Err(e) = ApiTracingLayer::remove_callback(program_name, namespace).await {
|
||||
veilid_log!(self info "Veilid API shutdown complete");
|
||||
|
||||
let log_key = VeilidLayerFilter::make_veilid_log_key(&program_name, &namespace).to_string();
|
||||
if let Err(e) = ApiTracingLayer::remove_callback(log_key).await {
|
||||
error!("Error removing callback from ApiTracingLayer: {}", e);
|
||||
}
|
||||
|
||||
|
@ -33,6 +33,8 @@ use hashlink::linked_hash_map::Entry;
|
||||
use hashlink::LruCache;
|
||||
use std::marker::PhantomData;
|
||||
|
||||
impl_veilid_log_facility!("crypto");
|
||||
|
||||
cfg_if! {
|
||||
if #[cfg(all(feature = "enable-crypto-none", feature = "enable-crypto-vld0"))] {
|
||||
/// Crypto kinds in order of preference, best cryptosystem is the first one, worst is the last one
|
||||
@ -183,7 +185,7 @@ impl Crypto {
|
||||
let crypto = registry.crypto();
|
||||
async move {
|
||||
if let Err(e) = crypto.flush().await {
|
||||
warn!("flush failed: {}", e);
|
||||
veilid_log!(crypto warn "flush failed: {}", e);
|
||||
}
|
||||
}
|
||||
});
|
||||
@ -208,10 +210,10 @@ impl Crypto {
|
||||
if let Some(f) = flush_future {
|
||||
f.await;
|
||||
}
|
||||
log_crypto!("starting termination flush");
|
||||
veilid_log!(self trace "starting termination flush");
|
||||
match self.flush().await {
|
||||
Ok(_) => {
|
||||
log_crypto!("finished termination flush");
|
||||
veilid_log!(self trace "finished termination flush");
|
||||
}
|
||||
Err(e) => {
|
||||
error!("failed termination flush: {}", e);
|
||||
@ -364,29 +366,29 @@ impl Crypto {
|
||||
let table_key_node_id_secret = format!("node_id_secret_{}", ck);
|
||||
|
||||
if node_id.is_none() {
|
||||
log_crypto!(debug "pulling {} from storage", table_key_node_id);
|
||||
veilid_log!(self debug "pulling {} from storage", table_key_node_id);
|
||||
if let Ok(Some(stored_node_id)) = config_table
|
||||
.load_json::<TypedKey>(0, table_key_node_id.as_bytes())
|
||||
.await
|
||||
{
|
||||
log_crypto!(debug "{} found in storage", table_key_node_id);
|
||||
veilid_log!(self debug "{} found in storage", table_key_node_id);
|
||||
node_id = Some(stored_node_id);
|
||||
} else {
|
||||
log_crypto!(debug "{} not found in storage", table_key_node_id);
|
||||
veilid_log!(self debug "{} not found in storage", table_key_node_id);
|
||||
}
|
||||
}
|
||||
|
||||
// See if node id secret was previously stored in the protected store
|
||||
if node_id_secret.is_none() {
|
||||
log_crypto!(debug "pulling {} from storage", table_key_node_id_secret);
|
||||
veilid_log!(self debug "pulling {} from storage", table_key_node_id_secret);
|
||||
if let Ok(Some(stored_node_id_secret)) = config_table
|
||||
.load_json::<TypedSecret>(0, table_key_node_id_secret.as_bytes())
|
||||
.await
|
||||
{
|
||||
log_crypto!(debug "{} found in storage", table_key_node_id_secret);
|
||||
veilid_log!(self debug "{} found in storage", table_key_node_id_secret);
|
||||
node_id_secret = Some(stored_node_id_secret);
|
||||
} else {
|
||||
log_crypto!(debug "{} not found in storage", table_key_node_id_secret);
|
||||
veilid_log!(self debug "{} not found in storage", table_key_node_id_secret);
|
||||
}
|
||||
}
|
||||
|
||||
@ -406,11 +408,11 @@ impl Crypto {
|
||||
(node_id, node_id_secret)
|
||||
} else {
|
||||
// If we still don't have a valid node id, generate one
|
||||
log_crypto!(debug "generating new node_id_{}", ck);
|
||||
veilid_log!(self debug "generating new node_id_{}", ck);
|
||||
let kp = vcrypto.generate_keypair().await;
|
||||
(TypedKey::new(ck, kp.key), TypedSecret::new(ck, kp.secret))
|
||||
};
|
||||
info!("Node Id: {}", node_id);
|
||||
veilid_log!(self info "Node Id: {}", node_id);
|
||||
|
||||
// Save the node id / secret in storage
|
||||
config_table
|
||||
|
@ -1,5 +1,7 @@
|
||||
use crate::*;
|
||||
|
||||
impl_veilid_log_facility!("bstore");
|
||||
|
||||
struct BlockStoreInner {
|
||||
//
|
||||
}
|
||||
|
@ -3,6 +3,8 @@ use data_encoding::BASE64URL_NOPAD;
|
||||
use keyring_manager::*;
|
||||
use std::path::Path;
|
||||
|
||||
impl_veilid_log_facility!("pstore");
|
||||
|
||||
pub struct ProtectedStoreInner {
|
||||
keyring_manager: Option<KeyringManager>,
|
||||
}
|
||||
@ -40,7 +42,7 @@ impl ProtectedStore {
|
||||
if let Err(e) = self.remove_user_secret(kpsk).await {
|
||||
error!("failed to delete '{}': {}", kpsk, e);
|
||||
} else {
|
||||
log_pstore!(debug "deleted table '{}'", kpsk);
|
||||
veilid_log!(self debug "deleted table '{}'", kpsk);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
@ -65,7 +67,7 @@ impl ProtectedStore {
|
||||
inner.keyring_manager = match maybe_km {
|
||||
Ok(v) => Some(v),
|
||||
Err(e) => {
|
||||
info!("Secure key storage service unavailable, falling back to direct disk-based storage: {}", e);
|
||||
veilid_log!(self info "Secure key storage service unavailable, falling back to direct disk-based storage: {}", e);
|
||||
None
|
||||
}
|
||||
};
|
||||
@ -94,7 +96,7 @@ impl ProtectedStore {
|
||||
);
|
||||
}
|
||||
if inner.keyring_manager.is_none() {
|
||||
log_pstore!(error "QWERQWER");
|
||||
veilid_log!(self error "QWERQWER");
|
||||
bail!("Could not initialize the protected store.");
|
||||
}
|
||||
c.protected_store.delete
|
||||
|
@ -1,5 +1,7 @@
|
||||
use crate::*;
|
||||
|
||||
impl_veilid_log_facility!("bstore");
|
||||
|
||||
struct BlockStoreInner {
|
||||
//
|
||||
}
|
||||
|
@ -3,6 +3,8 @@ use data_encoding::BASE64URL_NOPAD;
|
||||
|
||||
use web_sys::*;
|
||||
|
||||
impl_veilid_log_facility!("pstore");
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ProtectedStore {
|
||||
registry: VeilidComponentRegistry,
|
||||
@ -21,7 +23,7 @@ impl ProtectedStore {
|
||||
if let Err(e) = self.remove_user_secret(kpsk).await {
|
||||
error!("failed to delete '{}': {}", kpsk, e);
|
||||
} else {
|
||||
log_pstore!(debug "deleted table '{}'", kpsk);
|
||||
veilid_log!(self debug "deleted table '{}'", kpsk);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
|
@ -63,8 +63,9 @@ pub(crate) use self::component::*;
|
||||
pub(crate) use self::core_context::RegisteredComponents;
|
||||
pub use self::core_context::{api_startup, api_startup_config, api_startup_json, UpdateCallback};
|
||||
pub use self::logging::{
|
||||
ApiTracingLayer, VeilidLayerFilter, DEFAULT_LOG_FACILITIES_ENABLED_LIST,
|
||||
DEFAULT_LOG_FACILITIES_IGNORE_LIST, DURATION_LOG_FACILITIES, FLAME_LOG_FACILITIES_IGNORE_LIST,
|
||||
ApiTracingLayer, FmtStripFields, VeilidLayerFilter, VeilidLayerLogKeyFilter,
|
||||
DEFAULT_LOG_FACILITIES_ENABLED_LIST, DEFAULT_LOG_FACILITIES_IGNORE_LIST,
|
||||
DURATION_LOG_FACILITIES, FLAME_LOG_FACILITIES_IGNORE_LIST, VEILID_LOG_KEY_FIELD,
|
||||
};
|
||||
pub use self::veilid_api::*;
|
||||
pub use self::veilid_config::*;
|
||||
|
@ -5,7 +5,7 @@ use core::fmt::Write;
|
||||
use tracing_subscriber::*;
|
||||
|
||||
struct ApiTracingLayerInner {
|
||||
update_callbacks: HashMap<(String, String), UpdateCallback>,
|
||||
update_callbacks: HashMap<String, UpdateCallback>,
|
||||
}
|
||||
|
||||
/// API Tracing layer for 'tracing' subscribers
|
||||
@ -42,23 +42,26 @@ impl ApiTracingLayer {
|
||||
|
||||
#[instrument(level = "debug", skip(update_callback))]
|
||||
pub(crate) async fn add_callback(
|
||||
program_name: String,
|
||||
namespace: String,
|
||||
log_key: String,
|
||||
update_callback: UpdateCallback,
|
||||
) -> VeilidAPIResult<()> {
|
||||
let mut inner = API_LOGGER_INNER.lock();
|
||||
if inner.is_none() {
|
||||
*inner = Some(Self::new_inner());
|
||||
}
|
||||
let key = (program_name, namespace);
|
||||
if inner.as_ref().unwrap().update_callbacks.contains_key(&key) {
|
||||
if inner
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.update_callbacks
|
||||
.contains_key(&log_key)
|
||||
{
|
||||
apibail_already_initialized!();
|
||||
}
|
||||
inner
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.update_callbacks
|
||||
.insert(key, update_callback);
|
||||
.insert(log_key, update_callback);
|
||||
|
||||
API_LOGGER_ENABLED.store(true, Ordering::Release);
|
||||
|
||||
@ -66,12 +69,7 @@ impl ApiTracingLayer {
|
||||
}
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
pub(crate) async fn remove_callback(
|
||||
program_name: String,
|
||||
namespace: String,
|
||||
) -> VeilidAPIResult<()> {
|
||||
let key = (program_name, namespace);
|
||||
|
||||
pub(crate) async fn remove_callback(log_key: String) -> VeilidAPIResult<()> {
|
||||
let mut inner = API_LOGGER_INNER.lock();
|
||||
if inner.is_none() {
|
||||
apibail_not_initialized!();
|
||||
@ -80,7 +78,7 @@ impl ApiTracingLayer {
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.update_callbacks
|
||||
.remove(&key)
|
||||
.remove(&log_key)
|
||||
.is_none()
|
||||
{
|
||||
apibail_not_initialized!();
|
||||
@ -93,7 +91,16 @@ impl ApiTracingLayer {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn emit_log(&self, meta: &'static Metadata<'static>, message: String) {
|
||||
fn emit_log(&self, meta: &'static Metadata<'static>, log_key: &str, message: &str) {
|
||||
let opt_update_cb = if let Some(inner) = &mut *API_LOGGER_INNER.lock() {
|
||||
inner.update_callbacks.get(log_key).cloned()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let Some(update_cb) = opt_update_cb else {
|
||||
return;
|
||||
};
|
||||
|
||||
let level = *meta.level();
|
||||
let target = meta.target();
|
||||
let log_level = VeilidLogLevel::from_tracing_level(level);
|
||||
@ -116,7 +123,7 @@ impl ApiTracingLayer {
|
||||
if target.is_empty() {
|
||||
"".to_owned()
|
||||
} else {
|
||||
format!("[{}]", target)
|
||||
format!("[{}] ", target)
|
||||
},
|
||||
simplify_file(file),
|
||||
ln
|
||||
@ -126,7 +133,7 @@ impl ApiTracingLayer {
|
||||
.unwrap_or_default(),
|
||||
};
|
||||
|
||||
let message = format!("{}{}", origin, message).trim().to_owned();
|
||||
let message = format!("{} {}", origin, message).trim().to_owned();
|
||||
|
||||
let backtrace = if log_level <= VeilidLogLevel::Error {
|
||||
let bt = backtrace::Backtrace::new();
|
||||
@ -141,11 +148,7 @@ impl ApiTracingLayer {
|
||||
backtrace,
|
||||
}));
|
||||
|
||||
if let Some(inner) = &mut *API_LOGGER_INNER.lock() {
|
||||
for cb in inner.update_callbacks.values() {
|
||||
(cb)(log_update.clone());
|
||||
}
|
||||
}
|
||||
(update_cb)(log_update.clone());
|
||||
}
|
||||
}
|
||||
|
||||
@ -185,13 +188,13 @@ impl<S: Subscriber + for<'a> registry::LookupSpan<'a>> Layer<S> for ApiTracingLa
|
||||
return;
|
||||
}
|
||||
|
||||
let mut new_debug_record = StringRecorder::new();
|
||||
let mut new_debug_record = VeilidKeyedStringRecorder::new();
|
||||
attrs.record(&mut new_debug_record);
|
||||
|
||||
if let Some(span_ref) = ctx.span(id) {
|
||||
span_ref
|
||||
.extensions_mut()
|
||||
.insert::<StringRecorder>(new_debug_record);
|
||||
.insert::<VeilidKeyedStringRecorder>(new_debug_record);
|
||||
if crate::DURATION_LOG_FACILITIES.contains(&attrs.metadata().target()) {
|
||||
span_ref
|
||||
.extensions_mut()
|
||||
@ -213,9 +216,19 @@ impl<S: Subscriber + for<'a> registry::LookupSpan<'a>> Layer<S> for ApiTracingLa
|
||||
span_duration.end = Timestamp::now();
|
||||
let duration = span_duration.end.saturating_sub(span_duration.start);
|
||||
let meta = span_ref.metadata();
|
||||
|
||||
let mut extensions = span_ref.extensions_mut();
|
||||
let log_key =
|
||||
if let Some(span_ksr) = extensions.get_mut::<VeilidKeyedStringRecorder>() {
|
||||
span_ksr.log_key()
|
||||
} else {
|
||||
""
|
||||
};
|
||||
|
||||
self.emit_log(
|
||||
meta,
|
||||
format!(
|
||||
log_key,
|
||||
&format!(
|
||||
" {}{}: duration={}",
|
||||
span_ref
|
||||
.parent()
|
||||
@ -240,7 +253,10 @@ impl<S: Subscriber + for<'a> registry::LookupSpan<'a>> Layer<S> for ApiTracingLa
|
||||
return;
|
||||
}
|
||||
if let Some(span_ref) = ctx.span(id) {
|
||||
if let Some(debug_record) = span_ref.extensions_mut().get_mut::<StringRecorder>() {
|
||||
if let Some(debug_record) = span_ref
|
||||
.extensions_mut()
|
||||
.get_mut::<VeilidKeyedStringRecorder>()
|
||||
{
|
||||
values.record(debug_record);
|
||||
}
|
||||
}
|
||||
@ -251,27 +267,38 @@ impl<S: Subscriber + for<'a> registry::LookupSpan<'a>> Layer<S> for ApiTracingLa
|
||||
// Optimization if api logger has no callbacks
|
||||
return;
|
||||
}
|
||||
let mut recorder = StringRecorder::new();
|
||||
let mut recorder = VeilidKeyedStringRecorder::new();
|
||||
event.record(&mut recorder);
|
||||
let meta = event.metadata();
|
||||
self.emit_log(meta, recorder.to_string());
|
||||
self.emit_log(meta, recorder.log_key(), recorder.display());
|
||||
}
|
||||
}
|
||||
|
||||
struct StringRecorder {
|
||||
struct VeilidKeyedStringRecorder {
|
||||
log_key: String,
|
||||
display: String,
|
||||
//is_following_args: bool,
|
||||
}
|
||||
impl StringRecorder {
|
||||
impl VeilidKeyedStringRecorder {
|
||||
fn new() -> Self {
|
||||
StringRecorder {
|
||||
VeilidKeyedStringRecorder {
|
||||
log_key: String::new(),
|
||||
display: String::new(),
|
||||
// is_following_args: false,
|
||||
}
|
||||
}
|
||||
fn display(&self) -> &str {
|
||||
&self.display
|
||||
}
|
||||
fn log_key(&self) -> &str {
|
||||
&self.log_key
|
||||
}
|
||||
}
|
||||
|
||||
impl tracing::field::Visit for StringRecorder {
|
||||
impl tracing::field::Visit for VeilidKeyedStringRecorder {
|
||||
fn record_str(&mut self, field: &tracing::field::Field, value: &str) {
|
||||
if field.name() == VEILID_LOG_KEY_FIELD {
|
||||
self.log_key = value.to_owned();
|
||||
}
|
||||
}
|
||||
fn record_debug(&mut self, field: &tracing::field::Field, value: &dyn core::fmt::Debug) {
|
||||
if field.name() == "message" {
|
||||
if !self.display.is_empty() {
|
||||
@ -285,19 +312,3 @@ impl tracing::field::Visit for StringRecorder {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl core::fmt::Display for StringRecorder {
|
||||
fn fmt(&self, mut f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
|
||||
if !self.display.is_empty() {
|
||||
write!(&mut f, " {}", self.display)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl core::default::Default for StringRecorder {
|
||||
fn default() -> Self {
|
||||
StringRecorder::new()
|
||||
}
|
||||
}
|
||||
|
@ -68,10 +68,18 @@ pub static DEFAULT_LOG_FACILITIES_ENABLED_LIST: &[&str] = &[
|
||||
"pstore",
|
||||
"tstore",
|
||||
"crypto",
|
||||
"veilid_debug",
|
||||
];
|
||||
|
||||
pub static DURATION_LOG_FACILITIES: &[&str] = &["veilid_api"];
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! impl_veilid_log_facility {
|
||||
($facility:literal) => {
|
||||
const __VEILID_LOG_FACILITY: &'static str = $facility;
|
||||
};
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! fn_string {
|
||||
($text:expr) => {
|
||||
@ -80,353 +88,204 @@ macro_rules! fn_string {
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! log_net {
|
||||
(error $text:expr) => {error!(
|
||||
target: "net",
|
||||
macro_rules! veilid_log {
|
||||
// ERROR //////////////////////////////////////////////////////////////////////////
|
||||
// veilid_log!(self error "message")
|
||||
($self_expr:ident error $text:expr) => {error!(
|
||||
target: self::__VEILID_LOG_FACILITY,
|
||||
__VEILID_LOG_KEY = $self_expr.log_key(),
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
(error $fmt:literal, $($arg:expr),+) => {
|
||||
error!(target:"net", $fmt, $($arg),+);
|
||||
// veilid_log!(self error target: "facility", "message")
|
||||
($self_expr:ident error target: $target:expr, $text:expr) => {error!(
|
||||
target: $target,
|
||||
__VEILID_LOG_KEY = $self_expr.log_key(),
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
// veilid_log!(self error "data: {}", data)
|
||||
($self_expr:ident error $fmt:literal, $($arg:expr),+) => {error!(
|
||||
target: self::__VEILID_LOG_FACILITY,
|
||||
__VEILID_LOG_KEY = $self_expr.log_key(),
|
||||
$fmt, $($arg),+);
|
||||
};
|
||||
(warn $text:expr) => {warn!(
|
||||
target: "net",
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
(warn $fmt:literal, $($arg:expr),+) => {
|
||||
warn!(target:"net", $fmt, $($arg),+);
|
||||
// veilid_log!(self error target: "facility", "data: {}", data)
|
||||
($self_expr:ident error target: $target:expr, $fmt:literal, $($arg:expr),+) => {error!(
|
||||
target: $target,
|
||||
__VEILID_LOG_KEY = $self_expr.log_key(),
|
||||
$fmt, $($arg),+);
|
||||
};
|
||||
(debug $text:expr) => {debug!(
|
||||
target: "net",
|
||||
"{}",
|
||||
$text,
|
||||
// veilid_log!(self error field=value, ?other_field)
|
||||
($self_expr:ident error $($k:ident).+ = $($fields:tt)*) => {error!(
|
||||
target: self::__VEILID_LOG_FACILITY,
|
||||
__VEILID_LOG_KEY = $self_expr.log_key(),
|
||||
$($k).+ = $($fields)*
|
||||
)};
|
||||
(debug $fmt:literal, $($arg:expr),+) => {
|
||||
debug!(target:"net", $fmt, $($arg),+);
|
||||
};
|
||||
($text:expr) => {trace!(
|
||||
target: "net",
|
||||
"{}",
|
||||
$text,
|
||||
// veilid_log!(self error target: "facility", field=value, ?other_field)
|
||||
($self_expr:ident error target: $target:expr, $($k:ident).+ = $($fields:tt)*) => {error!(
|
||||
target: $target,
|
||||
__VEILID_LOG_KEY = $self_expr.log_key(),
|
||||
$($k).+ = $($fields)*
|
||||
)};
|
||||
($fmt:literal, $($arg:expr),+) => {
|
||||
trace!(target:"net", $fmt, $($arg),+);
|
||||
}
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! log_client_api {
|
||||
(error $text:expr) => {error!(
|
||||
target: "client_api",
|
||||
// WARN //////////////////////////////////////////////////////////////////////////
|
||||
// veilid_log!(self warn "message")
|
||||
($self_expr:ident warn $text:expr) => {warn!(
|
||||
target: self::__VEILID_LOG_FACILITY,
|
||||
__VEILID_LOG_KEY = $self_expr.log_key(),
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
(error $fmt:literal, $($arg:expr),+) => {
|
||||
error!(target:"client_api", $fmt, $($arg),+);
|
||||
// veilid_log!(self warn target: "facility", "message")
|
||||
($self_expr:ident warn target: $target:expr, $text:expr) => {warn!(
|
||||
target: $target,
|
||||
__VEILID_LOG_KEY = $self_expr.log_key(),
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
// veilid_log!(self warn "data: {}", data)
|
||||
($self_expr:ident warn $fmt:literal, $($arg:expr),+) => {warn!(
|
||||
target: self::__VEILID_LOG_FACILITY,
|
||||
__VEILID_LOG_KEY = $self_expr.log_key(),
|
||||
$fmt, $($arg),+);
|
||||
};
|
||||
(warn $text:expr) => {warn!(
|
||||
target: "client_api",
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
(warn $fmt:literal, $($arg:expr),+) => {
|
||||
warn!(target:"client_api", $fmt, $($arg),+);
|
||||
// veilid_log!(self warn target: "facility", "data: {}", data)
|
||||
($self_expr:ident warn target: $target:expr, $fmt:literal, $($arg:expr),+) => {warn!(
|
||||
target: $target,
|
||||
__VEILID_LOG_KEY = $self_expr.log_key(),
|
||||
$fmt, $($arg),+);
|
||||
};
|
||||
(debug $text:expr) => {debug!(
|
||||
target: "client_api",
|
||||
"{}",
|
||||
$text,
|
||||
// veilid_log!(self warn field=value, ?other_field)
|
||||
($self_expr:ident warn $($k:ident).+ = $($fields:tt)*) => {warn!(
|
||||
target: self::__VEILID_LOG_FACILITY,
|
||||
__VEILID_LOG_KEY = $self_expr.log_key(),
|
||||
$($k).+ = $($fields)*
|
||||
)};
|
||||
(debug $fmt:literal, $($arg:expr),+) => {
|
||||
debug!(target:"client_api", $fmt, $($arg),+);
|
||||
};
|
||||
($text:expr) => {trace!(
|
||||
target: "client_api",
|
||||
"{}",
|
||||
$text,
|
||||
// veilid_log!(self warn target: "facility", field=value, ?other_field)
|
||||
($self_expr:ident warn target: $target:expr, $($k:ident).+ = $($fields:tt)*) => {warn!(
|
||||
target: $target,
|
||||
__VEILID_LOG_KEY = $self_expr.log_key(),
|
||||
$($k).+ = $($fields)*
|
||||
)};
|
||||
($fmt:literal, $($arg:expr),+) => {
|
||||
trace!(target:"client_api", $fmt, $($arg),+);
|
||||
}
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! log_rpc {
|
||||
(error $text:expr) => { error!(
|
||||
target: "rpc",
|
||||
// INFO //////////////////////////////////////////////////////////////////////////
|
||||
// veilid_log!(self info "message")
|
||||
($self_expr:ident info $text:expr) => {info!(
|
||||
target: self::__VEILID_LOG_FACILITY,
|
||||
__VEILID_LOG_KEY = $self_expr.log_key(),
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
(error $fmt:literal, $($arg:expr),+) => {
|
||||
error!(target:"rpc", $fmt, $($arg),+);
|
||||
// veilid_log!(self info target: "facility", "message")
|
||||
($self_expr:ident info target: $target:expr, $text:expr) => {info!(
|
||||
target: $target,
|
||||
__VEILID_LOG_KEY = $self_expr.log_key(),
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
// veilid_log!(self info "data: {}", data)
|
||||
($self_expr:ident info $fmt:literal, $($arg:expr),+) => {info!(
|
||||
target: self::__VEILID_LOG_FACILITY,
|
||||
__VEILID_LOG_KEY = $self_expr.log_key(),
|
||||
$fmt, $($arg),+);
|
||||
};
|
||||
(warn $text:expr) => { warn!(
|
||||
target: "rpc",
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
(warn $fmt:literal, $($arg:expr),+) => {
|
||||
warn!(target:"rpc", $fmt, $($arg),+);
|
||||
// veilid_log!(self info target: "facility", "data: {}", data)
|
||||
($self_expr:ident info target: $target:expr, $fmt:literal, $($arg:expr),+) => {info!(
|
||||
target: $target,
|
||||
__VEILID_LOG_KEY = $self_expr.log_key(),
|
||||
$fmt, $($arg),+);
|
||||
};
|
||||
(debug $text:expr) => { debug!(
|
||||
target: "rpc",
|
||||
"{}",
|
||||
$text,
|
||||
// veilid_log!(self info field=value, ?other_field)
|
||||
($self_expr:ident info $($k:ident).+ = $($fields:tt)*) => {info!(
|
||||
target: self::__VEILID_LOG_FACILITY,
|
||||
__VEILID_LOG_KEY = $self_expr.log_key(),
|
||||
$($k).+ = $($fields)*
|
||||
)};
|
||||
(debug $fmt:literal, $($arg:expr),+) => {
|
||||
debug!(target:"rpc", $fmt, $($arg),+);
|
||||
};
|
||||
($text:expr) => {trace!(
|
||||
target: "rpc",
|
||||
"{}",
|
||||
$text,
|
||||
// veilid_log!(self info target: "facility", field=value, ?other_field)
|
||||
($self_expr:ident info target: $target:expr, $($k:ident).+ = $($fields:tt)*) => {info!(
|
||||
target: $target,
|
||||
__VEILID_LOG_KEY = $self_expr.log_key(),
|
||||
$($k).+ = $($fields)*
|
||||
)};
|
||||
($fmt:literal, $($arg:expr),+) => {
|
||||
trace!(target:"rpc", $fmt, $($arg),+);
|
||||
}
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! log_dht {
|
||||
(error $text:expr) => { error!(
|
||||
target: "dht",
|
||||
// DEBUG //////////////////////////////////////////////////////////////////////////
|
||||
// veilid_log!(self debug "message")
|
||||
($self_expr:ident debug $text:expr) => {debug!(
|
||||
target: self::__VEILID_LOG_FACILITY,
|
||||
__VEILID_LOG_KEY = $self_expr.log_key(),
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
(error $fmt:literal, $($arg:expr),+) => {
|
||||
error!(target:"dht", $fmt, $($arg),+);
|
||||
// veilid_log!(self debug target: "facility", "message")
|
||||
($self_expr:ident debug target: $target:expr, $text:expr) => {debug!(
|
||||
target: $target,
|
||||
__VEILID_LOG_KEY = $self_expr.log_key(),
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
// veilid_log!(self debug "data: {}", data)
|
||||
($self_expr:ident debug $fmt:literal, $($arg:expr),+) => {debug!(
|
||||
target: self::__VEILID_LOG_FACILITY,
|
||||
__VEILID_LOG_KEY = $self_expr.log_key(),
|
||||
$fmt, $($arg),+);
|
||||
};
|
||||
(warn $text:expr) => { warn!(
|
||||
target: "dht",
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
(warn $fmt:literal, $($arg:expr),+) => {
|
||||
warn!(target:"dht", $fmt, $($arg),+);
|
||||
// veilid_log!(self debug target: "facility", "data: {}", data)
|
||||
($self_expr:ident debug target: $target:expr, $fmt:literal, $($arg:expr),+) => {debug!(
|
||||
target: $target,
|
||||
__VEILID_LOG_KEY = $self_expr.log_key(),
|
||||
$fmt, $($arg),+);
|
||||
};
|
||||
(debug $text:expr) => { debug!(
|
||||
target: "dht",
|
||||
"{}",
|
||||
$text,
|
||||
// veilid_log!(self debug field=value, ?other_field)
|
||||
($self_expr:ident debug $($k:ident).+ = $($fields:tt)*) => {debug!(
|
||||
target: self::__VEILID_LOG_FACILITY,
|
||||
__VEILID_LOG_KEY = $self_expr.log_key(),
|
||||
$($k).+ = $($fields)*
|
||||
)};
|
||||
(debug $fmt:literal, $($arg:expr),+) => {
|
||||
debug!(target:"dht", $fmt, $($arg),+);
|
||||
};
|
||||
($text:expr) => {trace!(
|
||||
target: "dht",
|
||||
"{}",
|
||||
$text,
|
||||
// veilid_log!(self debug target: "facility", field=value, ?other_field)
|
||||
($self_expr:ident debug target: $target:expr, $($k:ident).+ = $($fields:tt)*) => {debug!(
|
||||
target: $target,
|
||||
__VEILID_LOG_KEY = $self_expr.log_key(),
|
||||
$($k).+ = $($fields)*
|
||||
)};
|
||||
($fmt:literal, $($arg:expr),+) => {
|
||||
trace!(target:"dht", $fmt, $($arg),+);
|
||||
}
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! log_rtab {
|
||||
(error $text:expr) => { error!(
|
||||
target: "rtab",
|
||||
// TRACE //////////////////////////////////////////////////////////////////////////
|
||||
// veilid_log!(self trace "message")
|
||||
($self_expr:ident trace $text:expr) => {trace!(
|
||||
target: self::__VEILID_LOG_FACILITY,
|
||||
__VEILID_LOG_KEY = $self_expr.log_key(),
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
(error $fmt:literal, $($arg:expr),+) => {
|
||||
error!(target:"rtab", $fmt, $($arg),+);
|
||||
// veilid_log!(self trace target: "facility", "message")
|
||||
($self_expr:ident trace target: $target:expr, $text:expr) => {trace!(
|
||||
target: $target,
|
||||
__VEILID_LOG_KEY = $self_expr.log_key(),
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
// veilid_log!(self trace "data: {}", data)
|
||||
($self_expr:ident trace $fmt:literal, $($arg:expr),+) => {trace!(
|
||||
target: self::__VEILID_LOG_FACILITY,
|
||||
__VEILID_LOG_KEY = $self_expr.log_key(),
|
||||
$fmt, $($arg),+);
|
||||
};
|
||||
(warn $text:expr) => { warn!(
|
||||
target: "rtab",
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
(warn $fmt:literal, $($arg:expr),+) => {
|
||||
warn!(target:"rtab", $fmt, $($arg),+);
|
||||
// veilid_log!(self trace target: "facility", "data: {}", data)
|
||||
($self_expr:ident trace target: $target:expr, $fmt:literal, $($arg:expr),+) => {trace!(
|
||||
target: $target,
|
||||
__VEILID_LOG_KEY = $self_expr.log_key(),
|
||||
$fmt, $($arg),+);
|
||||
};
|
||||
(debug $text:expr) => { debug!(
|
||||
target: "rtab",
|
||||
"{}",
|
||||
$text,
|
||||
// veilid_log!(self trace field=value, ?other_field)
|
||||
($self_expr:ident trace $($k:ident).+ = $($fields:tt)*) => {trace!(
|
||||
target: self::__VEILID_LOG_FACILITY,
|
||||
__VEILID_LOG_KEY = $self_expr.log_key(),
|
||||
$($k).+ = $($fields)*
|
||||
)};
|
||||
(debug $fmt:literal, $($arg:expr),+) => {
|
||||
debug!(target:"rtab", $fmt, $($arg),+);
|
||||
};
|
||||
($text:expr) => {trace!(
|
||||
target: "rtab",
|
||||
"{}",
|
||||
$text,
|
||||
// veilid_log!(self trace target: "facility", field=value, ?other_field)
|
||||
($self_expr:ident trace target: $target:expr, $($k:ident).+ = $($fields:tt)*) => {trace!(
|
||||
target: $target,
|
||||
__VEILID_LOG_KEY = $self_expr.log_key(),
|
||||
$($k).+ = $($fields)*
|
||||
)};
|
||||
($fmt:literal, $($arg:expr),+) => {
|
||||
trace!(target:"rtab", $fmt, $($arg),+);
|
||||
}
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! log_stor {
|
||||
(error $text:expr) => { error!(
|
||||
target: "stor",
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
(error $fmt:literal, $($arg:expr),+) => {
|
||||
error!(target:"stor", $fmt, $($arg),+);
|
||||
};
|
||||
(warn $text:expr) => { warn!(
|
||||
target: "stor",
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
(warn $fmt:literal, $($arg:expr),+) => {
|
||||
warn!(target:"stor", $fmt, $($arg),+);
|
||||
};
|
||||
(debug $text:expr) => { debug!(
|
||||
target: "stor",
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
(debug $fmt:literal, $($arg:expr),+) => {
|
||||
debug!(target:"stor", $fmt, $($arg),+);
|
||||
};
|
||||
($text:expr) => {trace!(
|
||||
target: "stor",
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
($fmt:literal, $($arg:expr),+) => {
|
||||
trace!(target:"stor", $fmt, $($arg),+);
|
||||
}
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! log_pstore {
|
||||
(error $text:expr) => { error!(
|
||||
target: "pstore",
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
(error $fmt:literal, $($arg:expr),+) => {
|
||||
error!(target:"pstore", $fmt, $($arg),+);
|
||||
};
|
||||
(warn $text:expr) => { warn!(
|
||||
target: "pstore",
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
(warn $fmt:literal, $($arg:expr),+) => {
|
||||
warn!(target:"pstore", $fmt, $($arg),+);
|
||||
};
|
||||
(debug $text:expr) => { debug!(
|
||||
target: "pstore",
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
(debug $fmt:literal, $($arg:expr),+) => {
|
||||
debug!(target:"pstore", $fmt, $($arg),+);
|
||||
};
|
||||
($text:expr) => {trace!(
|
||||
target: "pstore",
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
($fmt:literal, $($arg:expr),+) => {
|
||||
trace!(target:"pstore", $fmt, $($arg),+);
|
||||
}
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! log_tstore {
|
||||
(error $text:expr) => { error!(
|
||||
target: "tstore",
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
(error $fmt:literal, $($arg:expr),+) => {
|
||||
error!(target:"tstore", $fmt, $($arg),+);
|
||||
};
|
||||
(warn $text:expr) => { warn!(
|
||||
target: "tstore",
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
(warn $fmt:literal, $($arg:expr),+) => {
|
||||
warn!(target:"tstore", $fmt, $($arg),+);
|
||||
};
|
||||
(debug $text:expr) => { debug!(
|
||||
target: "tstore",
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
(debug $fmt:literal, $($arg:expr),+) => {
|
||||
debug!(target:"tstore", $fmt, $($arg),+);
|
||||
};
|
||||
($text:expr) => {trace!(
|
||||
target: "tstore",
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
($fmt:literal, $($arg:expr),+) => {
|
||||
trace!(target:"tstore", $fmt, $($arg),+);
|
||||
}
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! log_crypto {
|
||||
(error $text:expr) => { error!(
|
||||
target: "crypto",
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
(error $fmt:literal, $($arg:expr),+) => {
|
||||
error!(target:"crypto", $fmt, $($arg),+);
|
||||
};
|
||||
(warn $text:expr) => { warn!(
|
||||
target: "crypto",
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
(warn $fmt:literal, $($arg:expr),+) => {
|
||||
warn!(target:"crypto", $fmt, $($arg),+);
|
||||
};
|
||||
(debug $text:expr) => { debug!(
|
||||
target: "crypto",
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
(debug $fmt:literal, $($arg:expr),+) => {
|
||||
debug!(target:"crypto", $fmt, $($arg),+);
|
||||
};
|
||||
($text:expr) => {trace!(
|
||||
target: "crypto",
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
($fmt:literal, $($arg:expr),+) => {
|
||||
trace!(target:"crypto", $fmt, $($arg),+);
|
||||
}
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! log_rpc_message {
|
||||
(error $text:expr) => { error!(
|
||||
target: "rpc_message",
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
(error $fmt:literal, $($arg:expr),+) => {
|
||||
error!(target:"crypto", $fmt, $($arg),+);
|
||||
};
|
||||
(warn $text:expr) => { warn!(
|
||||
target: "rpc_message",
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
(warn $fmt:literal, $($arg:expr),+) => {
|
||||
warn!(target:"crypto", $fmt, $($arg),+);
|
||||
};
|
||||
($text:expr) => {trace!(
|
||||
target: "rpc_message",
|
||||
"{}",
|
||||
$text,
|
||||
)};
|
||||
($fmt:literal, $($arg:expr),+) => {
|
||||
trace!(target:"rpc_message", $fmt, $($arg),+);
|
||||
}
|
||||
}
|
||||
|
145
veilid-core/src/logging/fmt_strip_fields.rs
Normal file
145
veilid-core/src/logging/fmt_strip_fields.rs
Normal file
@ -0,0 +1,145 @@
|
||||
use super::*;
|
||||
use tracing_subscriber::{
|
||||
field::{MakeVisitor, RecordFields, Visit, VisitOutput},
|
||||
fmt::{
|
||||
format::{DefaultFields, Writer},
|
||||
FormatFields,
|
||||
},
|
||||
};
|
||||
|
||||
pub struct FmtStripFields {
|
||||
/// The inner formatter that will be used to format fields
|
||||
fmt: DefaultFields,
|
||||
/// The names of fields to remove
|
||||
remove_keys: HashSet<&'static str>,
|
||||
}
|
||||
|
||||
impl FmtStripFields {
|
||||
/// Create a new [`FmtStripFields`] formatter based on the given `fmt`,
|
||||
/// that removes fields with a given set of names
|
||||
pub fn new(fmt: DefaultFields, remove_keys: HashSet<&'static str>) -> Self {
|
||||
Self { fmt, remove_keys }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'writer> FormatFields<'writer> for FmtStripFields {
|
||||
fn format_fields<R: RecordFields>(&self, writer: Writer<'writer>, fields: R) -> fmt::Result {
|
||||
let mut visitor = FmtStripVisitor::new(self.fmt.make_visitor(writer), &self.remove_keys);
|
||||
fields.record(&mut visitor);
|
||||
visitor.finish()
|
||||
}
|
||||
}
|
||||
|
||||
struct FmtStripVisitor<'a, F, Out>
|
||||
where
|
||||
F: Visit + VisitOutput<Out>,
|
||||
{
|
||||
remove_keys: &'a HashSet<&'static str>,
|
||||
visitor: F,
|
||||
_phantom: core::marker::PhantomData<Out>,
|
||||
}
|
||||
|
||||
impl<'a, F, Out> FmtStripVisitor<'a, F, Out>
|
||||
where
|
||||
F: Visit + VisitOutput<Out>,
|
||||
{
|
||||
pub fn new(visitor: F, remove_keys: &'a HashSet<&'static str>) -> Self {
|
||||
Self {
|
||||
remove_keys,
|
||||
visitor,
|
||||
_phantom: core::marker::PhantomData {},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, F, Out> VisitOutput<Out> for FmtStripVisitor<'a, F, Out>
|
||||
where
|
||||
F: Visit + VisitOutput<Out>,
|
||||
{
|
||||
fn finish(self) -> Out {
|
||||
self.visitor.finish()
|
||||
}
|
||||
|
||||
fn visit<R>(self, fields: &R) -> Out
|
||||
where
|
||||
R: RecordFields,
|
||||
Self: Sized,
|
||||
{
|
||||
self.visitor.visit(fields)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, F, Out> Visit for FmtStripVisitor<'a, F, Out>
|
||||
where
|
||||
F: Visit + VisitOutput<Out>,
|
||||
{
|
||||
fn record_debug(&mut self, field: &field::Field, value: &dyn fmt::Debug) {
|
||||
if self.remove_keys.contains(field.name()) {
|
||||
return;
|
||||
}
|
||||
self.visitor.record_debug(field, value);
|
||||
}
|
||||
|
||||
fn record_f64(&mut self, field: &field::Field, value: f64) {
|
||||
if self.remove_keys.contains(field.name()) {
|
||||
return;
|
||||
}
|
||||
self.visitor.record_f64(field, value);
|
||||
}
|
||||
|
||||
fn record_i64(&mut self, field: &field::Field, value: i64) {
|
||||
if self.remove_keys.contains(field.name()) {
|
||||
return;
|
||||
}
|
||||
self.visitor.record_i64(field, value);
|
||||
}
|
||||
|
||||
fn record_u64(&mut self, field: &field::Field, value: u64) {
|
||||
if self.remove_keys.contains(field.name()) {
|
||||
return;
|
||||
}
|
||||
self.visitor.record_u64(field, value);
|
||||
}
|
||||
|
||||
fn record_i128(&mut self, field: &field::Field, value: i128) {
|
||||
if self.remove_keys.contains(field.name()) {
|
||||
return;
|
||||
}
|
||||
self.visitor.record_i128(field, value);
|
||||
}
|
||||
|
||||
fn record_u128(&mut self, field: &field::Field, value: u128) {
|
||||
if self.remove_keys.contains(field.name()) {
|
||||
return;
|
||||
}
|
||||
self.visitor.record_u128(field, value);
|
||||
}
|
||||
|
||||
fn record_bool(&mut self, field: &field::Field, value: bool) {
|
||||
if self.remove_keys.contains(field.name()) {
|
||||
return;
|
||||
}
|
||||
self.visitor.record_bool(field, value);
|
||||
}
|
||||
|
||||
fn record_str(&mut self, field: &field::Field, value: &str) {
|
||||
if self.remove_keys.contains(field.name()) {
|
||||
return;
|
||||
}
|
||||
self.visitor.record_str(field, value);
|
||||
}
|
||||
|
||||
fn record_bytes(&mut self, field: &field::Field, value: &[u8]) {
|
||||
if self.remove_keys.contains(field.name()) {
|
||||
return;
|
||||
}
|
||||
self.visitor.record_bytes(field, value);
|
||||
}
|
||||
|
||||
fn record_error(&mut self, field: &field::Field, value: &(dyn std::error::Error + 'static)) {
|
||||
if self.remove_keys.contains(field.name()) {
|
||||
return;
|
||||
}
|
||||
self.visitor.record_error(field, value);
|
||||
}
|
||||
}
|
@ -1,9 +1,11 @@
|
||||
mod api_tracing_layer;
|
||||
mod facilities;
|
||||
mod fmt_strip_fields;
|
||||
mod veilid_layer_filter;
|
||||
|
||||
use super::*;
|
||||
|
||||
pub use api_tracing_layer::*;
|
||||
pub use facilities::*;
|
||||
pub use fmt_strip_fields::*;
|
||||
pub use veilid_layer_filter::*;
|
||||
|
@ -6,17 +6,31 @@ use tracing_subscriber::layer;
|
||||
struct VeilidLayerFilterInner {
|
||||
max_level: LevelFilter,
|
||||
ignore_list: Vec<String>,
|
||||
log_key_filter: Option<VeilidLayerLogKeyFilter>,
|
||||
empty_log_key_enabled: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct VeilidLayerFilter {
|
||||
inner: Arc<RwLock<VeilidLayerFilterInner>>,
|
||||
}
|
||||
pub const VEILID_LOG_KEY_FIELD: &str = "__VEILID_LOG_KEY";
|
||||
|
||||
pub type VeilidLayerLogKeyFilter = Arc<dyn Fn(&str) -> bool + Send + Sync>;
|
||||
|
||||
impl VeilidLayerFilter {
|
||||
pub fn make_veilid_log_key(program_name: &str, namespace: &str) -> &'static str {
|
||||
if namespace.is_empty() {
|
||||
program_name.to_static_str()
|
||||
} else {
|
||||
format!("{}|{}", program_name, namespace).to_static_str()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new(
|
||||
max_level: VeilidConfigLogLevel,
|
||||
ignore_change_list: &[String],
|
||||
log_key_filter: Option<VeilidLayerLogKeyFilter>,
|
||||
) -> VeilidLayerFilter {
|
||||
let mut ignore_list = DEFAULT_LOG_FACILITIES_IGNORE_LIST
|
||||
.iter()
|
||||
@ -27,17 +41,22 @@ impl VeilidLayerFilter {
|
||||
inner: Arc::new(RwLock::new(VeilidLayerFilterInner {
|
||||
max_level: max_level.to_tracing_level_filter(),
|
||||
ignore_list,
|
||||
empty_log_key_enabled: log_key_filter.as_ref().map(|lkf| lkf("")).unwrap_or(true),
|
||||
log_key_filter,
|
||||
})),
|
||||
}
|
||||
}
|
||||
pub fn new_no_default(
|
||||
max_level: VeilidConfigLogLevel,
|
||||
ignore_list: &[String],
|
||||
namespace_filter: Option<VeilidLayerLogKeyFilter>,
|
||||
) -> VeilidLayerFilter {
|
||||
Self {
|
||||
inner: Arc::new(RwLock::new(VeilidLayerFilterInner {
|
||||
max_level: max_level.to_tracing_level_filter(),
|
||||
ignore_list: ignore_list.to_vec(),
|
||||
empty_log_key_enabled: namespace_filter.as_ref().map(|lkf| lkf("")).unwrap_or(true),
|
||||
log_key_filter: namespace_filter,
|
||||
})),
|
||||
}
|
||||
}
|
||||
@ -87,6 +106,10 @@ impl VeilidLayerFilter {
|
||||
return false;
|
||||
}
|
||||
|
||||
if !inner.empty_log_key_enabled && metadata.fields().field(VEILID_LOG_KEY_FIELD).is_none() {
|
||||
return false;
|
||||
}
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
@ -135,13 +158,13 @@ impl VeilidLayerFilter {
|
||||
}
|
||||
|
||||
impl<S: tracing::Subscriber> layer::Filter<S> for VeilidLayerFilter {
|
||||
fn enabled(&self, metadata: &tracing::Metadata<'_>, _: &layer::Context<'_, S>) -> bool {
|
||||
fn enabled(&self, metadata: &tracing::Metadata<'_>, _cx: &layer::Context<'_, S>) -> bool {
|
||||
self.interesting(metadata)
|
||||
}
|
||||
|
||||
fn callsite_enabled(&self, metadata: &'static tracing::Metadata<'static>) -> Interest {
|
||||
if self.interesting(metadata) {
|
||||
Interest::always()
|
||||
Interest::sometimes()
|
||||
} else {
|
||||
Interest::never()
|
||||
}
|
||||
@ -151,4 +174,43 @@ impl<S: tracing::Subscriber> layer::Filter<S> for VeilidLayerFilter {
|
||||
let inner = self.inner.read();
|
||||
Some(inner.max_level)
|
||||
}
|
||||
|
||||
fn event_enabled(&self, event: &Event<'_>, _cx: &layer::Context<'_, S>) -> bool {
|
||||
let (opt_log_key_filter, empty_log_key_enabled) = {
|
||||
let inner = self.inner.read();
|
||||
(inner.log_key_filter.clone(), inner.empty_log_key_enabled)
|
||||
};
|
||||
if let Some(log_key_filter) = opt_log_key_filter {
|
||||
let mut visitor = LogKeyFilterVisitor::new(log_key_filter);
|
||||
event.record(&mut visitor);
|
||||
visitor.enabled().unwrap_or(empty_log_key_enabled)
|
||||
} else {
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct LogKeyFilterVisitor {
|
||||
filter: VeilidLayerLogKeyFilter,
|
||||
enabled: Option<bool>,
|
||||
}
|
||||
impl LogKeyFilterVisitor {
|
||||
pub fn new(filter: VeilidLayerLogKeyFilter) -> Self {
|
||||
LogKeyFilterVisitor {
|
||||
filter,
|
||||
enabled: None,
|
||||
}
|
||||
}
|
||||
pub fn enabled(&self) -> Option<bool> {
|
||||
self.enabled
|
||||
}
|
||||
}
|
||||
|
||||
impl tracing::field::Visit for LogKeyFilterVisitor {
|
||||
fn record_str(&mut self, field: &tracing::field::Field, value: &str) {
|
||||
if field.name() == VEILID_LOG_KEY_FIELD {
|
||||
self.enabled = Some((self.filter)(value));
|
||||
}
|
||||
}
|
||||
fn record_debug(&mut self, _field: &tracing::field::Field, _value: &dyn fmt::Debug) {}
|
||||
}
|
||||
|
@ -2,6 +2,8 @@
|
||||
/// Used to determine if our address has changed and if we should re-publish new PeerInfo
|
||||
use super::*;
|
||||
|
||||
impl_veilid_log_facility!("net");
|
||||
|
||||
/// Number of 'existing dialinfo inconsistent' results in the cache during inbound-capable to trigger detection
|
||||
pub const ADDRESS_INCONSISTENCY_DETECTION_COUNT: usize = 5;
|
||||
|
||||
@ -35,6 +37,7 @@ struct AddressCheckCacheKey(RoutingDomain, ProtocolType, AddressType);
|
||||
/// Address checker - keep track of how other nodes are seeing our node's address on a per-protocol basis
|
||||
/// Used to determine if our address has changed and if we should re-publish new PeerInfo
|
||||
pub struct AddressCheck {
|
||||
registry: VeilidComponentRegistry,
|
||||
config: AddressCheckConfig,
|
||||
net: Network,
|
||||
published_peer_info: BTreeMap<RoutingDomain, Arc<PeerInfo>>,
|
||||
@ -61,9 +64,25 @@ impl fmt::Debug for AddressCheck {
|
||||
}
|
||||
}
|
||||
|
||||
impl_veilid_component_registry_accessor!(AddressCheck);
|
||||
|
||||
impl AddressCheck {
|
||||
pub fn new(config: AddressCheckConfig, net: Network) -> Self {
|
||||
pub fn new(net: Network) -> Self {
|
||||
let registry = net.registry();
|
||||
|
||||
let (detect_address_changes, ip6_prefix_size) = registry.config().with(|c| {
|
||||
(
|
||||
c.network.detect_address_changes,
|
||||
c.network.max_connections_per_ip6_prefix_size as usize,
|
||||
)
|
||||
});
|
||||
let config = AddressCheckConfig {
|
||||
detect_address_changes,
|
||||
ip6_prefix_size,
|
||||
};
|
||||
|
||||
Self {
|
||||
registry,
|
||||
config,
|
||||
net,
|
||||
published_peer_info: BTreeMap::new(),
|
||||
@ -158,7 +177,7 @@ impl AddressCheck {
|
||||
return;
|
||||
};
|
||||
if local.port() != pla.port() {
|
||||
log_network_result!(debug "ignoring address report because local port did not match listener: {} != {}", local.port(), pla.port());
|
||||
veilid_log!(self debug target:"network_result", "ignoring address report because local port did not match listener: {} != {}", local.port(), pla.port());
|
||||
return;
|
||||
}
|
||||
|
||||
@ -208,7 +227,7 @@ impl AddressCheck {
|
||||
if needs_address_detection {
|
||||
if self.config.detect_address_changes {
|
||||
// Reset the address check cache now so we can start detecting fresh
|
||||
info!(
|
||||
veilid_log!(self info
|
||||
"{:?} address has changed, detecting dial info",
|
||||
routing_domain
|
||||
);
|
||||
@ -216,7 +235,7 @@ impl AddressCheck {
|
||||
// Re-detect the public dialinfo
|
||||
self.net.trigger_update_network_class(routing_domain);
|
||||
} else {
|
||||
warn!(
|
||||
veilid_log!(self warn
|
||||
"{:?} address may have changed. Restarting the server may be required.",
|
||||
routing_domain
|
||||
);
|
||||
@ -257,11 +276,12 @@ impl AddressCheck {
|
||||
|
||||
// If we have something that matches our current dial info at all, consider it a validation
|
||||
if new_matches_current {
|
||||
let registry = self.registry();
|
||||
self.address_inconsistency_table
|
||||
.entry(acckey)
|
||||
.and_modify(|ait| {
|
||||
if *ait != 0 {
|
||||
log_net!(debug "Resetting address inconsistency for {:?} due to match on flow {:?} from {}", acckey, flow, reporting_peer);
|
||||
veilid_log!(registry debug "Resetting address inconsistency for {:?} due to match on flow {:?} from {}", acckey, flow, reporting_peer);
|
||||
}
|
||||
*ait = 0;
|
||||
})
|
||||
@ -282,7 +302,7 @@ impl AddressCheck {
|
||||
*ait += 1;
|
||||
})
|
||||
.or_insert(1);
|
||||
log_net!(debug "Adding address inconsistency ({}) for {:?} due to address {} on flow {:?} from {}", val, acckey, socket_address, flow, reporting_peer);
|
||||
veilid_log!(self debug "Adding address inconsistency ({}) for {:?} due to address {} on flow {:?} from {}", val, acckey, socket_address, flow, reporting_peer);
|
||||
return val >= ADDRESS_INCONSISTENCY_DETECTION_COUNT;
|
||||
}
|
||||
|
||||
@ -306,6 +326,7 @@ impl AddressCheck {
|
||||
}
|
||||
|
||||
// Add the currently seen socket address into the consistency table
|
||||
let registry = self.registry();
|
||||
let acckey =
|
||||
AddressCheckCacheKey(routing_domain, flow.protocol_type(), flow.address_type());
|
||||
let cache = self
|
||||
@ -325,7 +346,7 @@ impl AddressCheck {
|
||||
for (_k, v) in cache.iter() {
|
||||
let count = *consistencies.entry(*v).and_modify(|e| *e += 1).or_insert(1);
|
||||
if count >= ADDRESS_CONSISTENCY_DETECTION_COUNT {
|
||||
log_net!(debug "Address consistency detected for {:?}: {}", acckey, v);
|
||||
veilid_log!(registry debug "Address consistency detected for {:?}: {}", acckey, v);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,8 @@
|
||||
use super::*;
|
||||
use alloc::collections::btree_map::Entry;
|
||||
|
||||
impl_veilid_log_facility!("net");
|
||||
|
||||
const PUNISHMENT_DURATION_MIN: usize = 60;
|
||||
const MAX_PUNISHMENTS_BY_NODE_ID: usize = 65536;
|
||||
const DIAL_INFO_FAILURE_DURATION_MIN: usize = 10;
|
||||
@ -154,7 +156,7 @@ impl AddressFilter {
|
||||
}
|
||||
}
|
||||
for key in dead_keys {
|
||||
warn!("Forgiving: {}", key);
|
||||
veilid_log!(self warn "Forgiving: {}", key);
|
||||
inner.punishments_by_ip4.remove(&key);
|
||||
}
|
||||
}
|
||||
@ -170,7 +172,7 @@ impl AddressFilter {
|
||||
}
|
||||
}
|
||||
for key in dead_keys {
|
||||
warn!("Forgiving: {}", key);
|
||||
veilid_log!(self warn "Forgiving: {}", key);
|
||||
inner.punishments_by_ip6_prefix.remove(&key);
|
||||
}
|
||||
}
|
||||
@ -186,7 +188,7 @@ impl AddressFilter {
|
||||
}
|
||||
}
|
||||
for key in dead_keys {
|
||||
warn!("Forgiving: {}", key);
|
||||
veilid_log!(self warn "Forgiving: {}", key);
|
||||
inner.punishments_by_node_id.remove(&key);
|
||||
// make the entry alive again if it's still here
|
||||
if let Ok(Some(nr)) = self.routing_table().lookup_node_ref(key) {
|
||||
@ -206,7 +208,7 @@ impl AddressFilter {
|
||||
}
|
||||
}
|
||||
for key in dead_keys {
|
||||
log_net!(debug "DialInfo Permit: {}", key);
|
||||
veilid_log!(self debug "DialInfo Permit: {}", key);
|
||||
inner.dial_info_failures.remove(&key);
|
||||
}
|
||||
}
|
||||
@ -252,10 +254,10 @@ impl AddressFilter {
|
||||
|
||||
let mut inner = self.inner.lock();
|
||||
if inner.dial_info_failures.len() >= MAX_DIAL_INFO_FAILURES {
|
||||
warn!("DialInfo failure table full: {}", dial_info);
|
||||
veilid_log!(self warn "DialInfo failure table full: {}", dial_info);
|
||||
return;
|
||||
}
|
||||
log_net!(debug "DialInfo failure: {:?}", dial_info);
|
||||
veilid_log!(self debug "DialInfo failure: {:?}", dial_info);
|
||||
inner
|
||||
.dial_info_failures
|
||||
.entry(dial_info)
|
||||
@ -274,7 +276,7 @@ impl AddressFilter {
|
||||
}
|
||||
|
||||
pub fn punish_ip_addr(&self, addr: IpAddr, reason: PunishmentReason) {
|
||||
warn!("Punished: {} for {:?}", addr, reason);
|
||||
veilid_log!(self warn "Punished: {} for {:?}", addr, reason);
|
||||
let timestamp = Timestamp::now();
|
||||
let punishment = Punishment { reason, timestamp };
|
||||
|
||||
@ -318,10 +320,10 @@ impl AddressFilter {
|
||||
|
||||
let mut inner = self.inner.lock();
|
||||
if inner.punishments_by_node_id.len() >= MAX_PUNISHMENTS_BY_NODE_ID {
|
||||
warn!("Punishment table full: {}", node_id);
|
||||
veilid_log!(self warn "Punishment table full: {}", node_id);
|
||||
return;
|
||||
}
|
||||
warn!("Punished: {} for {:?}", node_id, reason);
|
||||
veilid_log!(self warn "Punished: {} for {:?}", node_id, reason);
|
||||
inner
|
||||
.punishments_by_node_id
|
||||
.entry(node_id)
|
||||
@ -361,7 +363,7 @@ impl AddressFilter {
|
||||
let cnt = inner.conn_count_by_ip4.entry(v4).or_default();
|
||||
assert!(*cnt <= self.max_connections_per_ip4);
|
||||
if *cnt == self.max_connections_per_ip4 {
|
||||
warn!("Address filter count exceeded: {:?}", v4);
|
||||
veilid_log!(self warn "Address filter count exceeded: {:?}", v4);
|
||||
return Err(AddressFilterError::CountExceeded);
|
||||
}
|
||||
// See if this ip block has connected too frequently
|
||||
@ -372,7 +374,7 @@ impl AddressFilter {
|
||||
});
|
||||
assert!(tstamps.len() <= self.max_connection_frequency_per_min);
|
||||
if tstamps.len() == self.max_connection_frequency_per_min {
|
||||
warn!("Address filter rate exceeded: {:?}", v4);
|
||||
veilid_log!(self warn "Address filter rate exceeded: {:?}", v4);
|
||||
return Err(AddressFilterError::RateExceeded);
|
||||
}
|
||||
|
||||
@ -385,14 +387,14 @@ impl AddressFilter {
|
||||
let cnt = inner.conn_count_by_ip6_prefix.entry(v6).or_default();
|
||||
assert!(*cnt <= self.max_connections_per_ip6_prefix);
|
||||
if *cnt == self.max_connections_per_ip6_prefix {
|
||||
warn!("Address filter count exceeded: {:?}", v6);
|
||||
veilid_log!(self warn "Address filter count exceeded: {:?}", v6);
|
||||
return Err(AddressFilterError::CountExceeded);
|
||||
}
|
||||
// See if this ip block has connected too frequently
|
||||
let tstamps = inner.conn_timestamps_by_ip6_prefix.entry(v6).or_default();
|
||||
assert!(tstamps.len() <= self.max_connection_frequency_per_min);
|
||||
if tstamps.len() == self.max_connection_frequency_per_min {
|
||||
warn!("Address filter rate exceeded: {:?}", v6);
|
||||
veilid_log!(self warn "Address filter rate exceeded: {:?}", v6);
|
||||
return Err(AddressFilterError::RateExceeded);
|
||||
}
|
||||
|
||||
|
@ -4,6 +4,8 @@ use connection_table::*;
|
||||
use network_connection::*;
|
||||
use stop_token::future::FutureExt;
|
||||
|
||||
impl_veilid_log_facility!("net");
|
||||
|
||||
const PROTECTED_CONNECTION_DROP_SPAN: TimestampDuration = TimestampDuration::new_secs(10);
|
||||
const PROTECTED_CONNECTION_DROP_COUNT: usize = 3;
|
||||
const NEW_CONNECTION_RETRY_COUNT: usize = 1;
|
||||
@ -132,7 +134,7 @@ impl ConnectionManager {
|
||||
pub async fn startup(&self) -> EyreResult<()> {
|
||||
let guard = self.arc.startup_lock.startup()?;
|
||||
|
||||
log_net!(debug "startup connection manager");
|
||||
veilid_log!(self debug "startup connection manager");
|
||||
|
||||
// Create channel for async_processor to receive notifications of networking events
|
||||
let (sender, receiver) = flume::unbounded();
|
||||
@ -164,14 +166,14 @@ impl ConnectionManager {
|
||||
}
|
||||
|
||||
pub async fn shutdown(&self) {
|
||||
log_net!(debug "starting connection manager shutdown");
|
||||
veilid_log!(self debug "starting connection manager shutdown");
|
||||
let Ok(guard) = self.arc.startup_lock.shutdown().await else {
|
||||
log_net!(debug "connection manager is already shut down");
|
||||
veilid_log!(self debug "connection manager is already shut down");
|
||||
return;
|
||||
};
|
||||
|
||||
// Stop the reconnection processor
|
||||
log_net!(debug "stopping reconnection processor task");
|
||||
veilid_log!(self debug "stopping reconnection processor task");
|
||||
self.arc.reconnection_processor.terminate().await;
|
||||
|
||||
// Remove the inner from the lock
|
||||
@ -185,18 +187,18 @@ impl ConnectionManager {
|
||||
}
|
||||
};
|
||||
// Stop all the connections and the async processor
|
||||
log_net!(debug "stopping async processor task");
|
||||
veilid_log!(self debug "stopping async processor task");
|
||||
drop(inner.stop_source.take());
|
||||
let async_processor_jh = inner.async_processor_jh.take().unwrap();
|
||||
// wait for the async processor to stop
|
||||
log_net!(debug "waiting for async processor to stop");
|
||||
veilid_log!(self debug "waiting for async processor to stop");
|
||||
async_processor_jh.await;
|
||||
// Wait for the connections to complete
|
||||
log_net!(debug "waiting for connection handlers to complete");
|
||||
veilid_log!(self debug "waiting for connection handlers to complete");
|
||||
self.arc.connection_table.join().await;
|
||||
|
||||
guard.success();
|
||||
log_net!(debug "finished connection manager shutdown");
|
||||
veilid_log!(self debug "finished connection manager shutdown");
|
||||
}
|
||||
|
||||
// Internal routine to see if we should keep this connection
|
||||
@ -267,11 +269,11 @@ impl ConnectionManager {
|
||||
.with_all_connections_mut(|conn| {
|
||||
if let Some(protect_nr) = conn.protected_node_ref() {
|
||||
if self.should_protect_connection(inner, conn).is_none() {
|
||||
log_net!(debug "== Unprotecting connection: {} -> {} for node {}", conn.connection_id(), conn.debug_print(Timestamp::now()), protect_nr);
|
||||
veilid_log!(self debug "== Unprotecting connection: {} -> {} for node {}", conn.connection_id(), conn.debug_print(Timestamp::now()), protect_nr);
|
||||
conn.unprotect();
|
||||
}
|
||||
} else if let Some(protect_nr) = self.should_protect_connection(inner, conn) {
|
||||
log_net!(debug "== Protecting existing connection: {} -> {} for node {}", conn.connection_id(), conn.debug_print(Timestamp::now()), protect_nr);
|
||||
veilid_log!(self debug "== Protecting existing connection: {} -> {} for node {}", conn.connection_id(), conn.debug_print(Timestamp::now()), protect_nr);
|
||||
conn.protect(protect_nr);
|
||||
}
|
||||
Option::<()>::None
|
||||
@ -291,7 +293,7 @@ impl ConnectionManager {
|
||||
// Get next connection id to use
|
||||
let id = inner.next_id;
|
||||
inner.next_id += 1u64;
|
||||
log_net!(
|
||||
veilid_log!(self trace
|
||||
"on_new_protocol_network_connection: id={} prot_conn={:?}",
|
||||
id,
|
||||
prot_conn
|
||||
@ -314,7 +316,7 @@ impl ConnectionManager {
|
||||
|
||||
// See if this should be a protected connection
|
||||
if let Some(protect_nr) = self.should_protect_connection(inner, &conn) {
|
||||
log_net!(debug "== Protecting new connection: {} -> {} for node {}", id, conn.debug_print(Timestamp::now()), protect_nr);
|
||||
veilid_log!(self debug "== Protecting new connection: {} -> {} for node {}", id, conn.debug_print(Timestamp::now()), protect_nr);
|
||||
conn.protect(protect_nr);
|
||||
}
|
||||
|
||||
@ -327,7 +329,7 @@ impl ConnectionManager {
|
||||
// Connection added and a different one LRU'd out
|
||||
// Send it to be terminated
|
||||
#[cfg(feature = "verbose-tracing")]
|
||||
log_net!(debug "== LRU kill connection due to limit: {:?}", conn.debug_print(Timestamp::now()));
|
||||
veilid_log!(self debug "== LRU kill connection due to limit: {:?}", conn.debug_print(Timestamp::now()));
|
||||
let _ = inner.sender.send(ConnectionManagerEvent::Dead(conn));
|
||||
}
|
||||
Err(ConnectionTableAddError::AddressFilter(conn, e)) => {
|
||||
@ -342,7 +344,7 @@ impl ConnectionManager {
|
||||
Err(ConnectionTableAddError::AlreadyExists(conn)) => {
|
||||
// Connection already exists
|
||||
let desc = conn.flow();
|
||||
log_net!(debug "== Connection already exists: {:?}", conn.debug_print(Timestamp::now()));
|
||||
veilid_log!(self debug "== Connection already exists: {:?}", conn.debug_print(Timestamp::now()));
|
||||
let _ = inner.sender.send(ConnectionManagerEvent::Dead(conn));
|
||||
return Ok(NetworkResult::no_connection_other(format!(
|
||||
"connection already exists: {:?}",
|
||||
@ -352,7 +354,7 @@ impl ConnectionManager {
|
||||
Err(ConnectionTableAddError::TableFull(conn)) => {
|
||||
// Connection table is full
|
||||
let desc = conn.flow();
|
||||
log_net!(debug "== Connection table full: {:?}", conn.debug_print(Timestamp::now()));
|
||||
veilid_log!(self debug "== Connection table full: {:?}", conn.debug_print(Timestamp::now()));
|
||||
let _ = inner.sender.send(ConnectionManagerEvent::Dead(conn));
|
||||
return Ok(NetworkResult::no_connection_other(format!(
|
||||
"connection table is full: {:?}",
|
||||
@ -415,7 +417,7 @@ impl ConnectionManager {
|
||||
// Async lock on the remote address for atomicity per remote
|
||||
let _lock_guard = self.arc.address_lock_table.lock_tag(remote_addr).await;
|
||||
|
||||
log_net!("== get_or_create_connection dial_info={:?}", dial_info);
|
||||
veilid_log!(self trace "== get_or_create_connection dial_info={:?}", dial_info);
|
||||
|
||||
// If any connection to this remote exists that has the same protocol, return it
|
||||
// Any connection will do, we don't have to match the local address but if we can
|
||||
@ -425,7 +427,7 @@ impl ConnectionManager {
|
||||
.connection_table
|
||||
.get_best_connection_by_remote(best_port, peer_address)
|
||||
{
|
||||
log_net!(
|
||||
veilid_log!(self trace
|
||||
"== Returning best existing connection {:?}",
|
||||
best_existing_conn
|
||||
);
|
||||
@ -449,6 +451,7 @@ impl ConnectionManager {
|
||||
|
||||
let prot_conn = network_result_try!(loop {
|
||||
let result_net_res = ProtocolNetworkConnection::connect(
|
||||
self.registry(),
|
||||
preferred_local_address,
|
||||
&dial_info,
|
||||
self.arc.connection_initial_timeout_ms,
|
||||
@ -471,7 +474,7 @@ impl ConnectionManager {
|
||||
}
|
||||
}
|
||||
};
|
||||
log_net!(debug "get_or_create_connection retries left: {}", retry_count);
|
||||
veilid_log!(self debug "get_or_create_connection retries left: {}", retry_count);
|
||||
retry_count -= 1;
|
||||
|
||||
// Release the preferred local address if things can't connect due to a low-level collision we dont have a record of
|
||||
@ -632,7 +635,7 @@ impl ConnectionManager {
|
||||
|
||||
if duration < PROTECTED_CONNECTION_DROP_SPAN {
|
||||
pa.drops_in_span += 1;
|
||||
log_net!(debug "== Protected connection dropped (count={}): {} -> {} for node {}", pa.drops_in_span, conn.connection_id(), conn.debug_print(Timestamp::now()), protect_nr);
|
||||
veilid_log!(self debug "== Protected connection dropped (count={}): {} -> {} for node {}", pa.drops_in_span, conn.connection_id(), conn.debug_print(Timestamp::now()), protect_nr);
|
||||
|
||||
if pa.drops_in_span >= PROTECTED_CONNECTION_DROP_COUNT {
|
||||
// Consider this as a failure to send if we've dropped the connection too many times in a single timespan
|
||||
@ -648,7 +651,7 @@ impl ConnectionManager {
|
||||
pa.drops_in_span = 1;
|
||||
pa.span_start_ts = cur_ts;
|
||||
|
||||
log_net!(debug "== Protected connection dropped (count={}): {} -> {} for node {}", pa.drops_in_span, conn.connection_id(), conn.debug_print(Timestamp::now()), protect_nr);
|
||||
veilid_log!(self debug "== Protected connection dropped (count={}): {} -> {} for node {}", pa.drops_in_span, conn.connection_id(), conn.debug_print(Timestamp::now()), protect_nr);
|
||||
}
|
||||
|
||||
// Reconnect the protected connection immediately
|
||||
@ -656,7 +659,7 @@ impl ConnectionManager {
|
||||
if let Some(dial_info) = conn.dial_info() {
|
||||
self.spawn_reconnector(dial_info);
|
||||
} else {
|
||||
log_net!(debug "Can't reconnect to accepted protected connection: {} -> {} for node {}", conn.connection_id(), conn.debug_print(Timestamp::now()), protect_nr);
|
||||
veilid_log!(self debug "Can't reconnect to accepted protected connection: {} -> {} for node {}", conn.connection_id(), conn.debug_print(Timestamp::now()), protect_nr);
|
||||
}
|
||||
}
|
||||
|
||||
@ -678,13 +681,13 @@ impl ConnectionManager {
|
||||
Box::pin(async move {
|
||||
match this.get_or_create_connection(dial_info.clone()).await {
|
||||
Ok(NetworkResult::Value(conn)) => {
|
||||
log_net!(debug "Reconnection successful to {}: {:?}", dial_info,conn);
|
||||
veilid_log!(this debug "Reconnection successful to {}: {:?}", dial_info,conn);
|
||||
}
|
||||
Ok(res) => {
|
||||
log_net!(debug "Reconnection unsuccessful to {}: {:?}", dial_info, res);
|
||||
veilid_log!(this debug "Reconnection unsuccessful to {}: {:?}", dial_info, res);
|
||||
}
|
||||
Err(e) => {
|
||||
log_net!(debug "Reconnection error to {}: {}", dial_info, e);
|
||||
veilid_log!(this debug "Reconnection error to {}: {}", dial_info, e);
|
||||
}
|
||||
}
|
||||
false
|
||||
|
@ -2,6 +2,8 @@ use super::*;
|
||||
use futures_util::StreamExt;
|
||||
use hashlink::LruCache;
|
||||
|
||||
impl_veilid_log_facility!("net");
|
||||
|
||||
/// Allow 25% of the table size to be occupied by priority flows
|
||||
/// that will not be subject to LRU termination.
|
||||
const PRIORITY_FLOW_PERCENTAGE: usize = 25;
|
||||
@ -110,7 +112,7 @@ impl ConnectionTable {
|
||||
let unord = FuturesUnordered::new();
|
||||
for table in &mut inner.conn_by_id {
|
||||
for (_, mut v) in table.drain() {
|
||||
log_net!("connection table join: {:?}", v);
|
||||
veilid_log!(self trace "connection table join: {:?}", v);
|
||||
v.close();
|
||||
unord.push(v);
|
||||
}
|
||||
|
@ -39,7 +39,7 @@ impl NetworkManager {
|
||||
// Send boot magic to requested peer address
|
||||
let data = BOOT_MAGIC.to_vec();
|
||||
|
||||
let out_data: Vec<u8> = network_result_value_or_log!(self
|
||||
let out_data: Vec<u8> = network_result_value_or_log!(self self
|
||||
.net()
|
||||
.send_recv_data_unbound_to_dial_info(dial_info, data, timeout_ms)
|
||||
.await? => [ format!(": dial_info={}, data.len={}", dial_info, data.len()) ]
|
||||
|
@ -49,6 +49,8 @@ pub use wasm::{/* LOCAL_NETWORK_CAPABILITIES, */ MAX_CAPABILITIES, PUBLIC_INTERN
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
impl_veilid_log_facility!("net");
|
||||
|
||||
pub const MAX_MESSAGE_SIZE: usize = MAX_ENVELOPE_SIZE;
|
||||
pub const IPADDR_TABLE_SIZE: usize = 1024;
|
||||
pub const IPADDR_MAX_INACTIVE_DURATION_US: TimestampDuration =
|
||||
@ -213,7 +215,7 @@ impl NetworkManager {
|
||||
let network_key_password = c.network.network_key_password.clone();
|
||||
let network_key = if let Some(network_key_password) = network_key_password {
|
||||
if !network_key_password.is_empty() {
|
||||
info!("Using network key");
|
||||
veilid_log!(registry info "Using network key");
|
||||
|
||||
let bcs = crypto.best();
|
||||
// Yes the use of the salt this way is generally bad, but this just needs to be hashed
|
||||
@ -310,7 +312,7 @@ impl NetworkManager {
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
pub async fn internal_startup(&self) -> EyreResult<StartupDisposition> {
|
||||
if self.components.read().is_some() {
|
||||
log_net!(debug "NetworkManager::internal_startup already started");
|
||||
veilid_log!(self debug "NetworkManager::internal_startup already started");
|
||||
return Ok(StartupDisposition::Success);
|
||||
}
|
||||
|
||||
@ -320,7 +322,7 @@ impl NetworkManager {
|
||||
// Create network components
|
||||
let connection_manager = ConnectionManager::new(self.registry());
|
||||
let net = Network::new(self.registry());
|
||||
let receipt_manager = ReceiptManager::new();
|
||||
let receipt_manager = ReceiptManager::new(self.registry());
|
||||
|
||||
*self.components.write() = Some(NetworkComponents {
|
||||
net: net.clone(),
|
||||
@ -328,17 +330,7 @@ impl NetworkManager {
|
||||
receipt_manager: receipt_manager.clone(),
|
||||
});
|
||||
|
||||
let (detect_address_changes, ip6_prefix_size) = self.config().with(|c| {
|
||||
(
|
||||
c.network.detect_address_changes,
|
||||
c.network.max_connections_per_ip6_prefix_size as usize,
|
||||
)
|
||||
});
|
||||
let address_check_config = AddressCheckConfig {
|
||||
detect_address_changes,
|
||||
ip6_prefix_size,
|
||||
};
|
||||
let address_check = AddressCheck::new(address_check_config, net.clone());
|
||||
let address_check = AddressCheck::new(net.clone());
|
||||
|
||||
// Register event handlers
|
||||
let peer_info_change_subscription =
|
||||
@ -365,7 +357,7 @@ impl NetworkManager {
|
||||
|
||||
receipt_manager.startup().await?;
|
||||
|
||||
log_net!("NetworkManager::internal_startup end");
|
||||
veilid_log!(self trace "NetworkManager::internal_startup end");
|
||||
|
||||
Ok(StartupDisposition::Success)
|
||||
}
|
||||
@ -405,7 +397,7 @@ impl NetworkManager {
|
||||
}
|
||||
|
||||
// Shutdown network components if they started up
|
||||
log_net!(debug "shutting down network components");
|
||||
veilid_log!(self debug "shutting down network components");
|
||||
|
||||
{
|
||||
let components = self.components.read().clone();
|
||||
@ -418,7 +410,7 @@ impl NetworkManager {
|
||||
*self.components.write() = None;
|
||||
|
||||
// reset the state
|
||||
log_net!(debug "resetting network manager state");
|
||||
veilid_log!(self debug "resetting network manager state");
|
||||
{
|
||||
*self.inner.lock() = NetworkManager::new_inner();
|
||||
}
|
||||
@ -427,11 +419,11 @@ impl NetworkManager {
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub async fn shutdown(&self) {
|
||||
// Cancel all tasks
|
||||
log_net!(debug "stopping network manager tasks");
|
||||
veilid_log!(self debug "stopping network manager tasks");
|
||||
self.cancel_tasks().await;
|
||||
|
||||
// Proceed with shutdown
|
||||
log_net!(debug "starting network manager shutdown");
|
||||
veilid_log!(self debug "starting network manager shutdown");
|
||||
let guard = self
|
||||
.startup_context
|
||||
.startup_lock
|
||||
@ -442,7 +434,7 @@ impl NetworkManager {
|
||||
self.shutdown_internal().await;
|
||||
|
||||
guard.success();
|
||||
log_net!(debug "finished network manager shutdown");
|
||||
veilid_log!(self debug "finished network manager shutdown");
|
||||
}
|
||||
|
||||
#[expect(dead_code)]
|
||||
@ -852,14 +844,14 @@ impl NetworkManager {
|
||||
let out = self.build_envelope(best_node_id, envelope_version, body)?;
|
||||
|
||||
if !node_ref.same_entry(&destination_node_ref) {
|
||||
log_net!(
|
||||
veilid_log!(self trace
|
||||
"sending envelope to {:?} via {:?}, len={}",
|
||||
destination_node_ref,
|
||||
node_ref,
|
||||
out.len()
|
||||
);
|
||||
} else {
|
||||
log_net!("sending envelope to {:?}, len={}", node_ref, out.len());
|
||||
veilid_log!(self trace "sending envelope to {:?}, len={}", node_ref, out.len());
|
||||
}
|
||||
|
||||
// Send the envelope via whatever means necessary
|
||||
@ -874,7 +866,7 @@ impl NetworkManager {
|
||||
rcpt_data: Vec<u8>,
|
||||
) -> EyreResult<()> {
|
||||
let Ok(_guard) = self.startup_context.startup_lock.enter() else {
|
||||
log_net!(debug "not sending out-of-band receipt to {} because network is stopped", dial_info);
|
||||
veilid_log!(self debug "not sending out-of-band receipt to {} because network is stopped", dial_info);
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
@ -885,7 +877,7 @@ impl NetworkManager {
|
||||
// should not be subject to our ability to decode it
|
||||
|
||||
// Send receipt directly
|
||||
network_result_value_or_log!(self
|
||||
network_result_value_or_log!(self self
|
||||
.net()
|
||||
.send_data_unbound_to_dial_info(dial_info, rcpt_data)
|
||||
.await? => [ format!(": dial_info={}, rcpt_data.len={}", dial_info, rcpt_data.len()) ] {
|
||||
@ -904,7 +896,7 @@ impl NetworkManager {
|
||||
return Ok(false);
|
||||
};
|
||||
|
||||
log_net!("envelope of {} bytes received from {:?}", data.len(), flow);
|
||||
veilid_log!(self trace "envelope of {} bytes received from {:?}", data.len(), flow);
|
||||
let remote_addr = flow.remote_address().ip_addr();
|
||||
|
||||
// Network accounting
|
||||
@ -919,7 +911,7 @@ impl NetworkManager {
|
||||
|
||||
// Ensure we can read the magic number
|
||||
if data.len() < 4 {
|
||||
log_net!(debug "short packet");
|
||||
veilid_log!(self debug "short packet");
|
||||
self.address_filter()
|
||||
.punish_ip_addr(remote_addr, PunishmentReason::ShortPacket);
|
||||
return Ok(false);
|
||||
@ -932,20 +924,20 @@ impl NetworkManager {
|
||||
{
|
||||
Some(rd) => rd,
|
||||
None => {
|
||||
log_net!(debug "no routing domain for envelope received from {:?}", flow);
|
||||
veilid_log!(self debug "no routing domain for envelope received from {:?}", flow);
|
||||
return Ok(false);
|
||||
}
|
||||
};
|
||||
|
||||
// Is this a direct bootstrap request instead of an envelope?
|
||||
if data[0..4] == *BOOT_MAGIC {
|
||||
network_result_value_or_log!(self.handle_boot_request(flow).await? => [ format!(": flow={:?}", flow) ] {});
|
||||
network_result_value_or_log!(self self.handle_boot_request(flow).await? => [ format!(": flow={:?}", flow) ] {});
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
// Is this an out-of-band receipt instead of an envelope?
|
||||
if data[0..3] == *RECEIPT_MAGIC {
|
||||
network_result_value_or_log!(self.handle_out_of_band_receipt(data).await => [ format!(": data.len={}", data.len()) ] {});
|
||||
network_result_value_or_log!(self self.handle_out_of_band_receipt(data).await => [ format!(": data.len={}", data.len()) ] {});
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
@ -954,7 +946,7 @@ impl NetworkManager {
|
||||
let envelope = match Envelope::from_signed_data(&crypto, data, &self.network_key) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
log_net!(debug "envelope failed to decode: {}", e);
|
||||
veilid_log!(self debug "envelope failed to decode: {}", e);
|
||||
// safe to punish here because relays also check here to ensure they arent forwarding things that don't decode
|
||||
self.address_filter()
|
||||
.punish_ip_addr(remote_addr, PunishmentReason::FailedToDecodeEnvelope);
|
||||
@ -983,7 +975,7 @@ impl NetworkManager {
|
||||
let ets = envelope.get_timestamp();
|
||||
if let Some(tsbehind) = tsbehind {
|
||||
if tsbehind.as_u64() != 0 && (ts > ets && ts.saturating_sub(ets) > tsbehind) {
|
||||
log_net!(debug
|
||||
veilid_log!(self debug
|
||||
"Timestamp behind: {}ms ({})",
|
||||
timestamp_to_secs(ts.saturating_sub(ets).as_u64()) * 1000f64,
|
||||
flow.remote()
|
||||
@ -993,7 +985,7 @@ impl NetworkManager {
|
||||
}
|
||||
if let Some(tsahead) = tsahead {
|
||||
if tsahead.as_u64() != 0 && (ts < ets && ets.saturating_sub(ts) > tsahead) {
|
||||
log_net!(debug
|
||||
veilid_log!(self debug
|
||||
"Timestamp ahead: {}ms ({})",
|
||||
timestamp_to_secs(ets.saturating_sub(ts).as_u64()) * 1000f64,
|
||||
flow.remote()
|
||||
@ -1033,7 +1025,7 @@ impl NetworkManager {
|
||||
{
|
||||
Ok(v) => v.map(|nr| nr.default_filtered()),
|
||||
Err(e) => {
|
||||
log_net!(debug "failed to resolve recipient node for relay, dropping relayed envelope: {}" ,e);
|
||||
veilid_log!(self debug "failed to resolve recipient node for relay, dropping relayed envelope: {}" ,e);
|
||||
return Ok(false);
|
||||
}
|
||||
}
|
||||
@ -1046,14 +1038,14 @@ impl NetworkManager {
|
||||
.config()
|
||||
.with(|c| c.capabilities.disable.contains(&CAP_RELAY))
|
||||
{
|
||||
log_net!(debug "node has relay capability disabled, dropping relayed envelope from {} to {}", sender_id, recipient_id);
|
||||
veilid_log!(self debug "node has relay capability disabled, dropping relayed envelope from {} to {}", sender_id, recipient_id);
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// If our own node requires a relay, we should not be asked to relay
|
||||
// on behalf of other nodes, just drop relayed packets if we can't relay
|
||||
if routing_table.relay_node(routing_domain).is_some() {
|
||||
log_net!(debug "node requires a relay itself, dropping relayed envelope from {} to {}", sender_id, recipient_id);
|
||||
veilid_log!(self debug "node requires a relay itself, dropping relayed envelope from {} to {}", sender_id, recipient_id);
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
@ -1064,7 +1056,7 @@ impl NetworkManager {
|
||||
match routing_table.lookup_node_ref(recipient_id) {
|
||||
Ok(v) => v.map(|nr| nr.default_filtered()),
|
||||
Err(e) => {
|
||||
log_net!(debug "failed to look up recipient node for relay, dropping relayed envelope: {}" ,e);
|
||||
veilid_log!(self debug "failed to look up recipient node for relay, dropping relayed envelope: {}" ,e);
|
||||
return Ok(false);
|
||||
}
|
||||
}
|
||||
@ -1078,13 +1070,13 @@ impl NetworkManager {
|
||||
};
|
||||
|
||||
// Relay the packet to the desired destination
|
||||
log_net!("relaying {} bytes to {}", data.len(), relay_nr);
|
||||
veilid_log!(self trace "relaying {} bytes to {}", data.len(), relay_nr);
|
||||
|
||||
network_result_value_or_log!(match self.send_data(relay_nr, data.to_vec())
|
||||
network_result_value_or_log!(self match self.send_data(relay_nr, data.to_vec())
|
||||
.await {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
log_net!(debug "failed to forward envelope: {}" ,e);
|
||||
veilid_log!(self debug "failed to forward envelope: {}" ,e);
|
||||
return Ok(false);
|
||||
}
|
||||
} => [ format!(": relay_nr={}, data.len={}", relay_nr, data.len()) ] {
|
||||
@ -1104,7 +1096,7 @@ impl NetworkManager {
|
||||
let body = match envelope.decrypt_body(&crypto, data, &node_id_secret, &self.network_key) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
log_net!(debug "failed to decrypt envelope body: {}", e);
|
||||
veilid_log!(self debug "failed to decrypt envelope body: {}", e);
|
||||
// Can't punish by ip address here because relaying can't decrypt envelope bodies to check
|
||||
// But because the envelope was properly signed by the time it gets here, it is safe to
|
||||
// punish by node id
|
||||
@ -1116,15 +1108,18 @@ impl NetworkManager {
|
||||
|
||||
// Add the sender's node without its peer info
|
||||
// Gets noderef filtered to the routing domain
|
||||
let sender_noderef =
|
||||
match routing_table.register_node_with_id(routing_domain, sender_id, ts) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
// If the node couldn't be registered just skip this envelope,
|
||||
log_net!(debug "failed to register node with existing connection: {}", e);
|
||||
return Ok(false);
|
||||
}
|
||||
};
|
||||
let sender_noderef = match routing_table.register_node_with_id(
|
||||
routing_domain,
|
||||
sender_id,
|
||||
ts,
|
||||
) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
// If the node couldn't be registered just skip this envelope,
|
||||
veilid_log!(self debug "failed to register node with existing connection: {}", e);
|
||||
return Ok(false);
|
||||
}
|
||||
};
|
||||
|
||||
// Filter the noderef further by its inbound flow
|
||||
let sender_noderef = sender_noderef.filtered_clone(
|
||||
@ -1144,7 +1139,7 @@ impl NetworkManager {
|
||||
rpc.enqueue_direct_message(envelope, sender_noderef, flow, routing_domain, body)
|
||||
{
|
||||
// Couldn't enqueue, but not the sender's fault
|
||||
log_net!(debug "failed to enqueue direct message: {}", e);
|
||||
veilid_log!(self debug "failed to enqueue direct message: {}", e);
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
|
@ -4,6 +4,8 @@ use super::*;
|
||||
use futures_util::stream::FuturesUnordered;
|
||||
use igd_manager::{IGDAddressType, IGDProtocolType};
|
||||
|
||||
impl_veilid_log_facility!("net");
|
||||
|
||||
const PORT_MAP_VALIDATE_TRY_COUNT: usize = 3;
|
||||
const PORT_MAP_VALIDATE_DELAY_MS: u32 = 500;
|
||||
const PORT_MAP_TRY_COUNT: usize = 3;
|
||||
@ -119,10 +121,10 @@ impl DiscoveryContext {
|
||||
// filtered down to the protocol/address type we are checking the public address for
|
||||
node_ref.clear_last_flows();
|
||||
|
||||
let res = network_result_value_or_log!(match rpc.rpc_call_status(Destination::direct(node_ref.clone())).await {
|
||||
let res = network_result_value_or_log!(self match rpc.rpc_call_status(Destination::direct(node_ref.clone())).await {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
log_net!(error
|
||||
veilid_log!(self error
|
||||
"failed to get status answer from {:?}: {}",
|
||||
node_ref, e
|
||||
);
|
||||
@ -133,8 +135,8 @@ impl DiscoveryContext {
|
||||
}
|
||||
);
|
||||
|
||||
log_network_result!(
|
||||
debug "request_public_address {:?}: Value({:?})",
|
||||
veilid_log!(self debug target:"network_result",
|
||||
"request_public_address {:?}: Value({:?})",
|
||||
node_ref,
|
||||
res.answer
|
||||
);
|
||||
@ -200,7 +202,7 @@ impl DiscoveryContext {
|
||||
filters,
|
||||
);
|
||||
if nodes.is_empty() {
|
||||
log_net!(debug
|
||||
veilid_log!(self debug
|
||||
"no external address detection peers of type {:?}:{:?}",
|
||||
protocol_type,
|
||||
address_type
|
||||
@ -262,7 +264,7 @@ impl DiscoveryContext {
|
||||
}
|
||||
}
|
||||
if external_address_infos.len() < EXTERNAL_INFO_VALIDATIONS {
|
||||
log_net!(debug "not enough peers ({}<{}) responded with an external address for type {:?}:{:?}",
|
||||
veilid_log!(self debug "not enough peers ({}<{}) responded with an external address for type {:?}:{:?}",
|
||||
external_address_infos.len(),
|
||||
EXTERNAL_INFO_VALIDATIONS,
|
||||
protocol_type,
|
||||
@ -291,7 +293,7 @@ impl DiscoveryContext {
|
||||
{
|
||||
let mut inner = self.inner.lock();
|
||||
inner.external_info = external_address_infos;
|
||||
log_net!(debug "External Addresses ({:?}:{:?}):\n{}",
|
||||
veilid_log!(self debug "External Addresses ({:?}:{:?}):\n{}",
|
||||
protocol_type,
|
||||
address_type,
|
||||
inner.external_info.iter().map(|x| format!(" {} <- {}",x.address, x.node)).collect::<Vec<_>>().join("\n"));
|
||||
@ -317,7 +319,7 @@ impl DiscoveryContext {
|
||||
.await
|
||||
{
|
||||
Err(e) => {
|
||||
log_net!("failed to send validate_dial_info to {:?}: {}", node_ref, e);
|
||||
veilid_log!(self trace "failed to send validate_dial_info to {:?}: {}", node_ref, e);
|
||||
false
|
||||
}
|
||||
Ok(v) => v,
|
||||
@ -380,7 +382,7 @@ impl DiscoveryContext {
|
||||
}
|
||||
|
||||
if validate_tries != PORT_MAP_VALIDATE_TRY_COUNT {
|
||||
log_net!(debug "UPNP port mapping succeeded but port {}/{} is still unreachable.\nretrying\n",
|
||||
veilid_log!(self debug "UPNP port mapping succeeded but port {}/{} is still unreachable.\nretrying\n",
|
||||
local_port, igd_protocol_type);
|
||||
sleep(PORT_MAP_VALIDATE_DELAY_MS).await
|
||||
} else {
|
||||
@ -398,7 +400,7 @@ impl DiscoveryContext {
|
||||
.await;
|
||||
|
||||
if tries == PORT_MAP_TRY_COUNT {
|
||||
warn!("UPNP port mapping succeeded but port {}/{} is still unreachable.\nYou may need to add a local firewall allowed port on this machine.\n",
|
||||
veilid_log!(self warn "UPNP port mapping succeeded but port {}/{} is still unreachable.\nYou may need to add a local firewall allowed port on this machine.\n",
|
||||
local_port, igd_protocol_type
|
||||
);
|
||||
break;
|
||||
|
@ -2,6 +2,8 @@ use super::*;
|
||||
use igd::*;
|
||||
use std::net::UdpSocket;
|
||||
|
||||
impl_veilid_log_facility!("net");
|
||||
|
||||
const UPNP_GATEWAY_DETECT_TIMEOUT_MS: u32 = 5_000;
|
||||
const UPNP_MAPPING_LIFETIME_MS: u32 = 120_000;
|
||||
const UPNP_MAPPING_ATTEMPTS: u32 = 3;
|
||||
@ -31,10 +33,12 @@ struct IGDManagerInner {
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct IGDManager {
|
||||
program_name: String,
|
||||
registry: VeilidComponentRegistry,
|
||||
inner: Arc<Mutex<IGDManagerInner>>,
|
||||
}
|
||||
|
||||
impl_veilid_component_registry_accessor!(IGDManager);
|
||||
|
||||
fn convert_protocol_type(igdpt: IGDProtocolType) -> PortMappingProtocol {
|
||||
match igdpt {
|
||||
IGDProtocolType::UDP => PortMappingProtocol::UDP,
|
||||
@ -76,9 +80,9 @@ impl IGDManager {
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
// Public Interface
|
||||
|
||||
pub fn new(program_name: String) -> Self {
|
||||
pub fn new(registry: VeilidComponentRegistry) -> Self {
|
||||
Self {
|
||||
program_name,
|
||||
registry,
|
||||
inner: Arc::new(Mutex::new(IGDManagerInner {
|
||||
local_ip_addrs: BTreeMap::new(),
|
||||
gateways: BTreeMap::new(),
|
||||
@ -118,17 +122,17 @@ impl IGDManager {
|
||||
.expect("key found but remove failed");
|
||||
|
||||
// Get local ip address
|
||||
let local_ip = Self::find_local_ip(&mut inner, address_type)?;
|
||||
let local_ip = this.find_local_ip_inner(&mut inner, address_type)?;
|
||||
|
||||
// Find gateway
|
||||
let gw = Self::find_gateway(&mut inner, local_ip)?;
|
||||
let gw = this.find_gateway_inner(&mut inner, local_ip)?;
|
||||
|
||||
// Unmap port
|
||||
match gw.remove_port(convert_protocol_type(protocol_type), mapped_port) {
|
||||
Ok(()) => (),
|
||||
Err(e) => {
|
||||
// Failed to map external port
|
||||
log_net!(debug "upnp failed to remove external port: {}", e);
|
||||
veilid_log!(this debug "upnp failed to remove external port: {}", e);
|
||||
return None;
|
||||
}
|
||||
};
|
||||
@ -162,32 +166,32 @@ impl IGDManager {
|
||||
}
|
||||
|
||||
// Get local ip address
|
||||
let local_ip = Self::find_local_ip(&mut inner, address_type)?;
|
||||
let local_ip = this.find_local_ip_inner(&mut inner, address_type)?;
|
||||
|
||||
// Find gateway
|
||||
let gw = Self::find_gateway(&mut inner, local_ip)?;
|
||||
let gw = this.find_gateway_inner(&mut inner, local_ip)?;
|
||||
|
||||
// Get external address
|
||||
let ext_ip = match gw.get_external_ip() {
|
||||
Ok(ip) => ip,
|
||||
Err(e) => {
|
||||
log_net!(debug "couldn't get external ip from igd: {}", e);
|
||||
veilid_log!(this debug "couldn't get external ip from igd: {}", e);
|
||||
return None;
|
||||
}
|
||||
};
|
||||
|
||||
// Ensure external IP matches address type
|
||||
if ext_ip.is_ipv4() && address_type != IGDAddressType::IPV4 {
|
||||
log_net!(debug "mismatched ip address type from igd, wanted v4, got v6");
|
||||
veilid_log!(this debug "mismatched ip address type from igd, wanted v4, got v6");
|
||||
return None;
|
||||
} else if ext_ip.is_ipv6() && address_type != IGDAddressType::IPV6 {
|
||||
log_net!(debug "mismatched ip address type from igd, wanted v6, got v4");
|
||||
veilid_log!(this debug "mismatched ip address type from igd, wanted v6, got v4");
|
||||
return None;
|
||||
}
|
||||
|
||||
if let Some(expected_external_address) = expected_external_address {
|
||||
if ext_ip != expected_external_address {
|
||||
log_net!(debug "gateway external address does not match calculated external address: expected={} vs gateway={}", expected_external_address, ext_ip);
|
||||
veilid_log!(this debug "gateway external address does not match calculated external address: expected={} vs gateway={}", expected_external_address, ext_ip);
|
||||
return None;
|
||||
}
|
||||
}
|
||||
@ -198,7 +202,7 @@ impl IGDManager {
|
||||
Ok(mapped_port) => mapped_port,
|
||||
Err(e) => {
|
||||
// Failed to map external port
|
||||
log_net!(debug "upnp failed to map external port: {}", e);
|
||||
veilid_log!(this debug "upnp failed to map external port: {}", e);
|
||||
return None;
|
||||
}
|
||||
};
|
||||
@ -268,7 +272,7 @@ impl IGDManager {
|
||||
// Process full renewals
|
||||
for (k, v) in full_renews {
|
||||
// Get local ip for address type
|
||||
let local_ip = match Self::get_local_ip(&mut inner, k.address_type) {
|
||||
let local_ip = match this.get_local_ip_inner(&mut inner, k.address_type) {
|
||||
Some(ip) => ip,
|
||||
None => {
|
||||
return Err(eyre!("local ip missing for address type"));
|
||||
@ -276,7 +280,7 @@ impl IGDManager {
|
||||
};
|
||||
|
||||
// Get gateway for interface
|
||||
let gw = match Self::get_gateway(&mut inner, local_ip) {
|
||||
let gw = match Self::get_gateway_inner(&mut inner, local_ip) {
|
||||
Some(gw) => gw,
|
||||
None => {
|
||||
return Err(eyre!("gateway missing for interface"));
|
||||
@ -295,7 +299,7 @@ impl IGDManager {
|
||||
&desc,
|
||||
) {
|
||||
Ok(mapped_port) => {
|
||||
log_net!(debug "full-renewed mapped port {:?} -> {:?}", v, k);
|
||||
veilid_log!(this debug "full-renewed mapped port {:?} -> {:?}", v, k);
|
||||
inner.port_maps.insert(
|
||||
k,
|
||||
PortMapValue {
|
||||
@ -309,7 +313,7 @@ impl IGDManager {
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
info!("failed to full-renew mapped port {:?} -> {:?}: {}", v, k, e);
|
||||
veilid_log!(this info "failed to full-renew mapped port {:?} -> {:?}: {}", v, k, e);
|
||||
|
||||
// Must restart network now :(
|
||||
return Ok(false);
|
||||
@ -319,7 +323,7 @@ impl IGDManager {
|
||||
// Process normal renewals
|
||||
for (k, mut v) in renews {
|
||||
// Get local ip for address type
|
||||
let local_ip = match Self::get_local_ip(&mut inner, k.address_type) {
|
||||
let local_ip = match this.get_local_ip_inner(&mut inner, k.address_type) {
|
||||
Some(ip) => ip,
|
||||
None => {
|
||||
return Err(eyre!("local ip missing for address type"));
|
||||
@ -327,7 +331,7 @@ impl IGDManager {
|
||||
};
|
||||
|
||||
// Get gateway for interface
|
||||
let gw = match Self::get_gateway(&mut inner, local_ip) {
|
||||
let gw = match Self::get_gateway_inner(&mut inner, local_ip) {
|
||||
Some(gw) => gw,
|
||||
None => {
|
||||
return Err(eyre!("gateway missing for address type"));
|
||||
@ -343,7 +347,7 @@ impl IGDManager {
|
||||
&desc,
|
||||
) {
|
||||
Ok(()) => {
|
||||
log_net!("renewed mapped port {:?} -> {:?}", v, k);
|
||||
veilid_log!(this trace "renewed mapped port {:?} -> {:?}", v, k);
|
||||
|
||||
inner.port_maps.insert(
|
||||
k,
|
||||
@ -358,7 +362,7 @@ impl IGDManager {
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
log_net!(debug "failed to renew mapped port {:?} -> {:?}: {}", v, k, e);
|
||||
veilid_log!(this debug "failed to renew mapped port {:?} -> {:?}: {}", v, k, e);
|
||||
|
||||
// Get closer to the maximum renewal timeline by a factor of two each time
|
||||
v.renewal_lifetime =
|
||||
@ -384,14 +388,14 @@ impl IGDManager {
|
||||
// Private Implementation
|
||||
|
||||
#[instrument(level = "trace", target = "net", skip_all)]
|
||||
fn get_routed_local_ip_address(address_type: IGDAddressType) -> Option<IpAddr> {
|
||||
fn get_routed_local_ip_address(&self, address_type: IGDAddressType) -> Option<IpAddr> {
|
||||
let socket = match UdpSocket::bind(match address_type {
|
||||
IGDAddressType::IPV4 => SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0),
|
||||
IGDAddressType::IPV6 => SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 0),
|
||||
}) {
|
||||
Ok(s) => s,
|
||||
Err(e) => {
|
||||
log_net!(debug "failed to bind to unspecified address: {}", e);
|
||||
veilid_log!(self debug "failed to bind to unspecified address: {}", e);
|
||||
return None;
|
||||
}
|
||||
};
|
||||
@ -408,7 +412,7 @@ impl IGDManager {
|
||||
),
|
||||
})
|
||||
.map_err(|e| {
|
||||
log_net!(debug "failed to connect to dummy address: {}", e);
|
||||
veilid_log!(self debug "failed to connect to dummy address: {}", e);
|
||||
e
|
||||
})
|
||||
.ok()?;
|
||||
@ -417,15 +421,19 @@ impl IGDManager {
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", target = "net", skip_all)]
|
||||
fn find_local_ip(inner: &mut IGDManagerInner, address_type: IGDAddressType) -> Option<IpAddr> {
|
||||
fn find_local_ip_inner(
|
||||
&self,
|
||||
inner: &mut IGDManagerInner,
|
||||
address_type: IGDAddressType,
|
||||
) -> Option<IpAddr> {
|
||||
if let Some(ip) = inner.local_ip_addrs.get(&address_type) {
|
||||
return Some(*ip);
|
||||
}
|
||||
|
||||
let ip = match Self::get_routed_local_ip_address(address_type) {
|
||||
let ip = match self.get_routed_local_ip_address(address_type) {
|
||||
Some(x) => x,
|
||||
None => {
|
||||
log_net!(debug "failed to get local ip address: address_type={:?}", address_type);
|
||||
veilid_log!(self debug "failed to get local ip address: address_type={:?}", address_type);
|
||||
return None;
|
||||
}
|
||||
};
|
||||
@ -435,7 +443,11 @@ impl IGDManager {
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", target = "net", skip_all)]
|
||||
fn get_local_ip(inner: &mut IGDManagerInner, address_type: IGDAddressType) -> Option<IpAddr> {
|
||||
fn get_local_ip_inner(
|
||||
&self,
|
||||
inner: &mut IGDManagerInner,
|
||||
address_type: IGDAddressType,
|
||||
) -> Option<IpAddr> {
|
||||
if let Some(ip) = inner.local_ip_addrs.get(&address_type) {
|
||||
return Some(*ip);
|
||||
}
|
||||
@ -443,7 +455,11 @@ impl IGDManager {
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", target = "net", skip_all)]
|
||||
fn find_gateway(inner: &mut IGDManagerInner, local_ip: IpAddr) -> Option<Arc<Gateway>> {
|
||||
fn find_gateway_inner(
|
||||
&self,
|
||||
inner: &mut IGDManagerInner,
|
||||
local_ip: IpAddr,
|
||||
) -> Option<Arc<Gateway>> {
|
||||
if let Some(gw) = inner.gateways.get(&local_ip) {
|
||||
return Some(gw.clone());
|
||||
}
|
||||
@ -456,7 +472,7 @@ impl IGDManager {
|
||||
match igd::search_gateway(opts) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
log_net!(debug "couldn't find ipv4 igd: {}", e);
|
||||
veilid_log!(self debug "couldn't find ipv4 igd: {}", e);
|
||||
return None;
|
||||
}
|
||||
}
|
||||
@ -471,7 +487,7 @@ impl IGDManager {
|
||||
match igd::search_gateway(opts) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
log_net!(debug "couldn't find ipv6 igd: {}", e);
|
||||
veilid_log!(self debug "couldn't find ipv6 igd: {}", e);
|
||||
return None;
|
||||
}
|
||||
}
|
||||
@ -483,7 +499,7 @@ impl IGDManager {
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", target = "net", skip_all)]
|
||||
fn get_gateway(inner: &mut IGDManagerInner, local_ip: IpAddr) -> Option<Arc<Gateway>> {
|
||||
fn get_gateway_inner(inner: &mut IGDManagerInner, local_ip: IpAddr) -> Option<Arc<Gateway>> {
|
||||
if let Some(gw) = inner.gateways.get(&local_ip) {
|
||||
return Some(gw.clone());
|
||||
}
|
||||
@ -493,7 +509,9 @@ impl IGDManager {
|
||||
fn get_description(&self, protocol_type: IGDProtocolType, local_port: u16) -> String {
|
||||
format!(
|
||||
"{} map {} for port {}",
|
||||
self.program_name, protocol_type, local_port
|
||||
self.registry.program_name(),
|
||||
protocol_type,
|
||||
local_port
|
||||
)
|
||||
}
|
||||
}
|
||||
|
@ -31,6 +31,8 @@ use std::path::{Path, PathBuf};
|
||||
|
||||
/////////////////////////////////////////////////////////////////
|
||||
|
||||
impl_veilid_log_facility!("net");
|
||||
|
||||
pub const MAX_DIAL_INFO_FAILURE_COUNT: usize = 100;
|
||||
pub const UPDATE_OUTBOUND_ONLY_NETWORK_CLASS_PERIOD_SECS: u32 = 10;
|
||||
pub const UPDATE_NETWORK_CLASS_TASK_TICK_PERIOD_SECS: u32 = 1;
|
||||
@ -171,8 +173,6 @@ impl Network {
|
||||
}
|
||||
|
||||
fn new_unlocked_inner(registry: VeilidComponentRegistry) -> NetworkUnlockedInner {
|
||||
let config = registry.config();
|
||||
let program_name = config.get().program_name.clone();
|
||||
NetworkUnlockedInner {
|
||||
startup_lock: StartupLock::new(),
|
||||
interfaces: NetworkInterfaces::new(),
|
||||
@ -186,7 +186,7 @@ impl Network {
|
||||
),
|
||||
upnp_task: TickTask::new("upnp_task", UPNP_TASK_TICK_PERIOD_SECS),
|
||||
network_task_lock: AsyncMutex::new(()),
|
||||
igd_manager: igd_manager::IGDManager::new(program_name),
|
||||
igd_manager: igd_manager::IGDManager::new(registry),
|
||||
}
|
||||
}
|
||||
|
||||
@ -234,22 +234,22 @@ impl Network {
|
||||
let config = self.config();
|
||||
let c = config.get();
|
||||
//
|
||||
log_net!(
|
||||
veilid_log!(self trace
|
||||
"loading certificate from {}",
|
||||
c.network.tls.certificate_path
|
||||
);
|
||||
let certs = Self::load_certs(&PathBuf::from(&c.network.tls.certificate_path))?;
|
||||
log_net!("loaded {} certificates", certs.len());
|
||||
veilid_log!(self trace "loaded {} certificates", certs.len());
|
||||
if certs.is_empty() {
|
||||
return Err(io::Error::new(io::ErrorKind::InvalidInput, format!("Certificates at {} could not be loaded.\nEnsure it is in PEM format, beginning with '-----BEGIN CERTIFICATE-----'",c.network.tls.certificate_path)));
|
||||
}
|
||||
//
|
||||
log_net!(
|
||||
veilid_log!(self trace
|
||||
"loading private key from {}",
|
||||
c.network.tls.private_key_path
|
||||
);
|
||||
let mut keys = Self::load_keys(&PathBuf::from(&c.network.tls.private_key_path))?;
|
||||
log_net!("loaded {} keys", keys.len());
|
||||
veilid_log!(self trace "loaded {} keys", keys.len());
|
||||
if keys.is_empty() {
|
||||
return Err(io::Error::new(io::ErrorKind::InvalidInput, format!("Private key at {} could not be loaded.\nEnsure it is unencrypted and in RSA or PKCS8 format, beginning with '-----BEGIN RSA PRIVATE KEY-----' or '-----BEGIN PRIVATE KEY-----'",c.network.tls.private_key_path)));
|
||||
}
|
||||
@ -337,7 +337,7 @@ impl Network {
|
||||
};
|
||||
|
||||
if dial_info_failure_count == MAX_DIAL_INFO_FAILURE_COUNT {
|
||||
log_net!(debug "Node may be offline. Exceeded maximum dial info failure count for {:?}", rd);
|
||||
veilid_log!(self debug "Node may be offline. Exceeded maximum dial info failure count for {:?}", rd);
|
||||
// todo: what operations should we perform here?
|
||||
// self.set_needs_dial_info_check(rd);
|
||||
}
|
||||
@ -400,6 +400,7 @@ impl Network {
|
||||
ProtocolType::TCP => {
|
||||
let peer_socket_addr = dial_info.to_socket_addr();
|
||||
let pnc = network_result_try!(RawTcpProtocolHandler::connect(
|
||||
self.registry(),
|
||||
None,
|
||||
peer_socket_addr,
|
||||
connect_timeout_ms
|
||||
@ -410,6 +411,7 @@ impl Network {
|
||||
}
|
||||
ProtocolType::WS | ProtocolType::WSS => {
|
||||
let pnc = network_result_try!(WebsocketProtocolHandler::connect(
|
||||
self.registry(),
|
||||
None,
|
||||
&dial_info,
|
||||
connect_timeout_ms
|
||||
@ -507,6 +509,7 @@ impl Network {
|
||||
ProtocolType::TCP => {
|
||||
let peer_socket_addr = dial_info.to_socket_addr();
|
||||
RawTcpProtocolHandler::connect(
|
||||
self.registry(),
|
||||
None,
|
||||
peer_socket_addr,
|
||||
connect_timeout_ms,
|
||||
@ -516,6 +519,7 @@ impl Network {
|
||||
}
|
||||
ProtocolType::WS | ProtocolType::WSS => {
|
||||
WebsocketProtocolHandler::connect(
|
||||
self.registry(),
|
||||
None,
|
||||
&dial_info,
|
||||
connect_timeout_ms,
|
||||
@ -571,7 +575,7 @@ impl Network {
|
||||
&peer_socket_addr,
|
||||
&flow.local().map(|sa| sa.socket_addr()),
|
||||
) {
|
||||
network_result_value_or_log!(ph.clone()
|
||||
network_result_value_or_log!(self ph.clone()
|
||||
.send_message(data.clone(), peer_socket_addr)
|
||||
.await
|
||||
.wrap_err("sending data to existing connection")? => [ format!(": data.len={}, flow={:?}", data.len(), flow) ]
|
||||
@ -837,7 +841,7 @@ impl Network {
|
||||
// with detect address changes turned off
|
||||
let pi = routing_table.get_current_peer_info(RoutingDomain::PublicInternet);
|
||||
if !pi.signed_node_info().has_any_dial_info() {
|
||||
warn!(
|
||||
veilid_log!(self warn
|
||||
"This node has no valid public dial info.\nConfigure this node with a static public IP address and correct firewall rules."
|
||||
);
|
||||
}
|
||||
@ -888,7 +892,7 @@ impl Network {
|
||||
|
||||
match self.startup_internal().await {
|
||||
Ok(StartupDisposition::Success) => {
|
||||
info!("network started");
|
||||
veilid_log!(self info "network started");
|
||||
guard.success();
|
||||
Ok(StartupDisposition::Success)
|
||||
}
|
||||
@ -927,17 +931,17 @@ impl Network {
|
||||
let mut inner = self.inner.lock();
|
||||
// take the join handles out
|
||||
for h in inner.join_handles.drain(..) {
|
||||
log_net!("joining: {:?}", h);
|
||||
veilid_log!(self trace "joining: {:?}", h);
|
||||
unord.push(h);
|
||||
}
|
||||
// Drop the stop
|
||||
drop(inner.stop_source.take());
|
||||
}
|
||||
log_net!(debug "stopping {} low level network tasks", unord.len());
|
||||
veilid_log!(self debug "stopping {} low level network tasks", unord.len());
|
||||
// Wait for everything to stop
|
||||
while unord.next().await.is_some() {}
|
||||
|
||||
log_net!(debug "clearing dial info");
|
||||
veilid_log!(self debug "clearing dial info");
|
||||
|
||||
routing_table
|
||||
.edit_public_internet_routing_domain()
|
||||
@ -955,23 +959,23 @@ impl Network {
|
||||
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub async fn shutdown(&self) {
|
||||
log_net!(debug "starting low level network shutdown");
|
||||
veilid_log!(self debug "starting low level network shutdown");
|
||||
let Ok(guard) = self.startup_lock.shutdown().await else {
|
||||
log_net!(debug "low level network is already shut down");
|
||||
veilid_log!(self debug "low level network is already shut down");
|
||||
return;
|
||||
};
|
||||
|
||||
self.shutdown_internal().await;
|
||||
|
||||
guard.success();
|
||||
log_net!(debug "finished low level network shutdown");
|
||||
veilid_log!(self debug "finished low level network shutdown");
|
||||
}
|
||||
|
||||
//////////////////////////////////////////
|
||||
|
||||
pub fn needs_update_network_class(&self) -> bool {
|
||||
let Ok(_guard) = self.startup_lock.enter() else {
|
||||
log_net!(debug "ignoring due to not started up");
|
||||
veilid_log!(self debug "ignoring due to not started up");
|
||||
return false;
|
||||
};
|
||||
|
||||
@ -980,7 +984,7 @@ impl Network {
|
||||
|
||||
pub fn trigger_update_network_class(&self, routing_domain: RoutingDomain) {
|
||||
let Ok(_guard) = self.startup_lock.enter() else {
|
||||
log_net!(debug "ignoring due to not started up");
|
||||
veilid_log!(self debug "ignoring due to not started up");
|
||||
return;
|
||||
};
|
||||
|
||||
|
@ -55,10 +55,20 @@ impl Network {
|
||||
|
||||
pub(super) async fn make_network_state(&self) -> EyreResult<NetworkState> {
|
||||
// refresh network interfaces
|
||||
self.interfaces
|
||||
let old_stable_addresses = self.interfaces.stable_addresses();
|
||||
if self
|
||||
.interfaces
|
||||
.refresh()
|
||||
.await
|
||||
.wrap_err("failed to refresh network interfaces")?;
|
||||
.wrap_err("failed to refresh network interfaces")?
|
||||
{
|
||||
let new_stable_addresses = self.interfaces.stable_addresses();
|
||||
|
||||
veilid_log!(self debug
|
||||
"Network interface addresses changed: \nFrom: {:?}\n To: {:?}\n",
|
||||
old_stable_addresses, new_stable_addresses
|
||||
);
|
||||
}
|
||||
|
||||
// build the set of networks we should consider for the 'LocalNetwork' routing domain
|
||||
let mut local_networks: HashSet<(IpAddr, IpAddr)> = HashSet::new();
|
||||
|
@ -116,7 +116,7 @@ impl Network {
|
||||
let peer_addr = match tcp_stream.peer_addr() {
|
||||
Ok(addr) => addr,
|
||||
Err(e) => {
|
||||
log_net!(debug "failed to get peer address: {}", e);
|
||||
veilid_log!(self debug "failed to get peer address: {}", e);
|
||||
return;
|
||||
}
|
||||
};
|
||||
@ -132,26 +132,26 @@ impl Network {
|
||||
let local_addr = match tcp_stream.local_addr() {
|
||||
Ok(addr) => addr,
|
||||
Err(e) => {
|
||||
log_net!(debug "failed to get local address: {}", e);
|
||||
veilid_log!(self debug "failed to get local address: {}", e);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
if let Err(e) = set_tcp_stream_linger(&tcp_stream, Some(core::time::Duration::from_secs(0)))
|
||||
{
|
||||
log_net!(debug "Couldn't set TCP linger: {}", e);
|
||||
veilid_log!(self debug "Couldn't set TCP linger: {}", e);
|
||||
return;
|
||||
}
|
||||
|
||||
if let Err(e) = tcp_stream.set_nodelay(true) {
|
||||
log_net!(debug "Couldn't set TCP nodelay: {}", e);
|
||||
veilid_log!(self debug "Couldn't set TCP nodelay: {}", e);
|
||||
return;
|
||||
}
|
||||
|
||||
let listener_state = listener_state.clone();
|
||||
let connection_manager = connection_manager.clone();
|
||||
|
||||
log_net!("TCP connection from: {}", peer_addr);
|
||||
veilid_log!(self trace "TCP connection from: {}", peer_addr);
|
||||
|
||||
// Create a stream we can peek on
|
||||
#[cfg(feature = "rt-tokio")]
|
||||
@ -171,7 +171,7 @@ impl Network {
|
||||
{
|
||||
// If we fail to get a packet within the connection initial timeout
|
||||
// then we punt this connection
|
||||
log_net!("connection initial timeout from: {:?}", peer_addr);
|
||||
veilid_log!(self trace "connection initial timeout from: {:?}", peer_addr);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -197,17 +197,17 @@ impl Network {
|
||||
|
||||
let conn = match conn {
|
||||
Ok(Some(c)) => {
|
||||
log_net!("protocol handler found for {:?}: {:?}", peer_addr, c);
|
||||
veilid_log!(self trace "protocol handler found for {:?}: {:?}", peer_addr, c);
|
||||
c
|
||||
}
|
||||
Ok(None) => {
|
||||
// No protocol handlers matched? drop it.
|
||||
log_net!(debug "no protocol handler for connection from {:?}", peer_addr);
|
||||
veilid_log!(self debug "no protocol handler for connection from {:?}", peer_addr);
|
||||
return;
|
||||
}
|
||||
Err(e) => {
|
||||
// Failed to negotiate connection? drop it.
|
||||
log_net!(debug "failed to negotiate connection from {:?}: {}", peer_addr, e);
|
||||
veilid_log!(self debug "failed to negotiate connection from {:?}: {}", peer_addr, e);
|
||||
return;
|
||||
}
|
||||
};
|
||||
@ -217,7 +217,7 @@ impl Network {
|
||||
.on_accepted_protocol_network_connection(conn)
|
||||
.await
|
||||
{
|
||||
log_net!(error "failed to register new connection: {}", e);
|
||||
veilid_log!(self error "failed to register new connection: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -237,7 +237,7 @@ impl Network {
|
||||
return Ok(false);
|
||||
};
|
||||
|
||||
log_net!(debug "spawn_socket_listener: binding successful to {}", addr);
|
||||
veilid_log!(self debug "spawn_socket_listener: binding successful to {}", addr);
|
||||
|
||||
// Create protocol handler records
|
||||
let listener_state = Arc::new(RwLock::new(ListenerState::new()));
|
||||
@ -275,10 +275,10 @@ impl Network {
|
||||
.timeout_at(stop_token)
|
||||
.await;
|
||||
|
||||
log_net!(debug "exited incoming loop for {}", addr);
|
||||
veilid_log!(this debug "exited incoming loop for {}", addr);
|
||||
// Remove our listener state from this address if we're stopping
|
||||
this.inner.lock().listener_states.remove(&addr);
|
||||
log_net!(debug "listener state removed for {}", addr);
|
||||
veilid_log!(this debug "listener state removed for {}", addr);
|
||||
});
|
||||
////////////////////////////////////////////////////////////
|
||||
|
||||
@ -329,17 +329,11 @@ impl Network {
|
||||
}
|
||||
ls.write()
|
||||
.tls_protocol_handlers
|
||||
.push(new_protocol_accept_handler(
|
||||
self.network_manager().config(),
|
||||
true,
|
||||
));
|
||||
.push(new_protocol_accept_handler(self.registry(), true));
|
||||
} else {
|
||||
ls.write()
|
||||
.protocol_accept_handlers
|
||||
.push(new_protocol_accept_handler(
|
||||
self.network_manager().config(),
|
||||
false,
|
||||
));
|
||||
.push(new_protocol_accept_handler(self.registry(), false));
|
||||
}
|
||||
|
||||
// Return interface dial infos we listen on
|
||||
@ -350,7 +344,7 @@ impl Network {
|
||||
.or_default();
|
||||
bapp.push(addr);
|
||||
|
||||
log_net!(
|
||||
veilid_log!(self
|
||||
debug
|
||||
"set_preferred_local_address: {:?} {:?} -> {:?}",
|
||||
protocol_type,
|
||||
@ -367,7 +361,7 @@ impl Network {
|
||||
}
|
||||
|
||||
if !bind_set.search {
|
||||
log_net!(debug "unable to bind to tcp {}", addr);
|
||||
veilid_log!(self debug "unable to bind to tcp {}", addr);
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
|
@ -14,16 +14,16 @@ impl Network {
|
||||
task_count = 1;
|
||||
}
|
||||
}
|
||||
log_net!("task_count: {}", task_count);
|
||||
veilid_log!(self trace "task_count: {}", task_count);
|
||||
for task_n in 0..task_count {
|
||||
log_net!("Spawning UDP listener task");
|
||||
veilid_log!(self trace "Spawning UDP listener task");
|
||||
|
||||
////////////////////////////////////////////////////////////
|
||||
// Run thread task to process stream of messages
|
||||
let this = self.clone();
|
||||
|
||||
let jh = spawn(&format!("UDP listener {}", task_n), async move {
|
||||
log_net!("UDP listener task spawned");
|
||||
veilid_log!(this trace "UDP listener task spawned");
|
||||
|
||||
// Collect all our protocol handlers into a vector
|
||||
let protocol_handlers: Vec<RawUdpProtocolHandler> = this
|
||||
@ -39,7 +39,7 @@ impl Network {
|
||||
let stop_token = {
|
||||
let inner = this.inner.lock();
|
||||
if inner.stop_source.is_none() {
|
||||
log_net!(debug "exiting UDP listener before it starts because we encountered an error");
|
||||
veilid_log!(this debug "exiting UDP listener before it starts because we encountered an error");
|
||||
return;
|
||||
}
|
||||
inner.stop_source.as_ref().unwrap().token()
|
||||
@ -70,7 +70,7 @@ impl Network {
|
||||
.on_recv_envelope(&mut data[..size], flow)
|
||||
.await
|
||||
{
|
||||
log_net!(debug "failed to process received udp envelope: {}", e);
|
||||
veilid_log!(network_manager debug "failed to process received udp envelope: {}", e);
|
||||
}
|
||||
}
|
||||
Ok(Err(_)) => {
|
||||
@ -96,7 +96,7 @@ impl Network {
|
||||
}
|
||||
}
|
||||
|
||||
log_net!("UDP listener task stopped");
|
||||
veilid_log!(this trace "UDP listener task stopped");
|
||||
}.instrument(trace_span!(parent: None, "UDP Listener")));
|
||||
////////////////////////////////////////////////////////////
|
||||
|
||||
@ -109,7 +109,7 @@ impl Network {
|
||||
|
||||
#[instrument(level = "trace", skip_all)]
|
||||
async fn create_udp_protocol_handler(&self, addr: SocketAddr) -> EyreResult<bool> {
|
||||
log_net!(debug "create_udp_protocol_handler on {:?}", &addr);
|
||||
veilid_log!(self debug "create_udp_protocol_handler on {:?}", &addr);
|
||||
|
||||
// Create a single-address-family UDP socket with default options bound to an address
|
||||
let Some(udp_socket) = bind_async_udp_socket(addr)? else {
|
||||
@ -158,7 +158,7 @@ impl Network {
|
||||
.or_default();
|
||||
bapp.push(addr);
|
||||
|
||||
log_net!(
|
||||
veilid_log!(self
|
||||
debug
|
||||
"set_preferred_local_address: {:?} {:?} -> {:?}",
|
||||
ProtocolType::UDP,
|
||||
@ -178,7 +178,7 @@ impl Network {
|
||||
}
|
||||
}
|
||||
if !bind_set.search {
|
||||
log_net!(debug "unable to bind to udp {}", addr);
|
||||
veilid_log!(self debug "unable to bind to udp {}", addr);
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
|
@ -18,6 +18,7 @@ pub(crate) enum ProtocolNetworkConnection {
|
||||
|
||||
impl ProtocolNetworkConnection {
|
||||
pub async fn connect(
|
||||
registry: VeilidComponentRegistry,
|
||||
local_address: Option<SocketAddr>,
|
||||
dial_info: &DialInfo,
|
||||
timeout_ms: u32,
|
||||
@ -32,6 +33,7 @@ impl ProtocolNetworkConnection {
|
||||
}
|
||||
ProtocolType::TCP => {
|
||||
tcp::RawTcpProtocolHandler::connect(
|
||||
registry,
|
||||
local_address,
|
||||
dial_info.to_socket_addr(),
|
||||
timeout_ms,
|
||||
@ -39,7 +41,13 @@ impl ProtocolNetworkConnection {
|
||||
.await
|
||||
}
|
||||
ProtocolType::WS | ProtocolType::WSS => {
|
||||
ws::WebsocketProtocolHandler::connect(local_address, dial_info, timeout_ms).await
|
||||
ws::WebsocketProtocolHandler::connect(
|
||||
registry,
|
||||
local_address,
|
||||
dial_info,
|
||||
timeout_ms,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2,19 +2,27 @@ use super::*;
|
||||
use futures_util::{AsyncReadExt, AsyncWriteExt};
|
||||
|
||||
pub struct RawTcpNetworkConnection {
|
||||
registry: VeilidComponentRegistry,
|
||||
flow: Flow,
|
||||
stream: Mutex<Option<AsyncPeekStream>>,
|
||||
}
|
||||
|
||||
impl fmt::Debug for RawTcpNetworkConnection {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("RawTCPNetworkConnection").finish()
|
||||
f.debug_struct("RawTcpNetworkConnection")
|
||||
//.field("registry", &self.registry)
|
||||
.field("flow", &self.flow)
|
||||
//.field("stream", &self.stream)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl_veilid_component_registry_accessor!(RawTcpNetworkConnection);
|
||||
|
||||
impl RawTcpNetworkConnection {
|
||||
pub fn new(flow: Flow, stream: AsyncPeekStream) -> Self {
|
||||
pub fn new(registry: VeilidComponentRegistry, flow: Flow, stream: AsyncPeekStream) -> Self {
|
||||
Self {
|
||||
registry,
|
||||
flow,
|
||||
stream: Mutex::new(Some(stream)),
|
||||
}
|
||||
@ -37,7 +45,6 @@ impl RawTcpNetworkConnection {
|
||||
stream: &mut AsyncPeekStream,
|
||||
message: Vec<u8>,
|
||||
) -> io::Result<NetworkResult<()>> {
|
||||
log_net!("sending TCP message of size {}", message.len());
|
||||
if message.len() > MAX_MESSAGE_SIZE {
|
||||
bail_io_error_other!("sending too large TCP message");
|
||||
}
|
||||
@ -108,14 +115,19 @@ pub struct RawTcpProtocolHandler
|
||||
where
|
||||
Self: ProtocolAcceptHandler,
|
||||
{
|
||||
registry: VeilidComponentRegistry,
|
||||
connection_initial_timeout_ms: u32,
|
||||
}
|
||||
|
||||
impl_veilid_component_registry_accessor!(RawTcpProtocolHandler);
|
||||
|
||||
impl RawTcpProtocolHandler {
|
||||
pub fn new(config: VeilidConfig) -> Self {
|
||||
let c = config.get();
|
||||
let connection_initial_timeout_ms = c.network.connection_initial_timeout_ms;
|
||||
pub fn new(registry: VeilidComponentRegistry) -> Self {
|
||||
let connection_initial_timeout_ms = registry
|
||||
.config()
|
||||
.with(|c| c.network.connection_initial_timeout_ms);
|
||||
Self {
|
||||
registry,
|
||||
connection_initial_timeout_ms,
|
||||
}
|
||||
}
|
||||
@ -127,7 +139,7 @@ impl RawTcpProtocolHandler {
|
||||
socket_addr: SocketAddr,
|
||||
local_addr: SocketAddr,
|
||||
) -> io::Result<Option<ProtocolNetworkConnection>> {
|
||||
log_net!("TCP: on_accept_async: enter");
|
||||
veilid_log!(self trace "TCP: on_accept_async: enter");
|
||||
let mut peekbuf: [u8; PEEK_DETECT_LEN] = [0u8; PEEK_DETECT_LEN];
|
||||
if (timeout(
|
||||
self.connection_initial_timeout_ms,
|
||||
@ -144,17 +156,19 @@ impl RawTcpProtocolHandler {
|
||||
ProtocolType::TCP,
|
||||
);
|
||||
let conn = ProtocolNetworkConnection::RawTcp(RawTcpNetworkConnection::new(
|
||||
self.registry(),
|
||||
Flow::new(peer_addr, SocketAddress::from_socket_addr(local_addr)),
|
||||
ps,
|
||||
));
|
||||
|
||||
log_net!("Connection accepted from: {} (TCP)", socket_addr);
|
||||
veilid_log!(self trace "Connection accepted from: {} (TCP)", socket_addr);
|
||||
|
||||
Ok(Some(conn))
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", target = "protocol", err)]
|
||||
pub async fn connect(
|
||||
registry: VeilidComponentRegistry,
|
||||
local_address: Option<SocketAddr>,
|
||||
remote_address: SocketAddr,
|
||||
timeout_ms: u32,
|
||||
@ -182,9 +196,10 @@ impl RawTcpProtocolHandler {
|
||||
),
|
||||
SocketAddress::from_socket_addr(actual_local_address),
|
||||
);
|
||||
log_net!("rawtcp::connect: {:?}", flow);
|
||||
veilid_log!(registry trace "rawtcp::connect: {:?}", flow);
|
||||
|
||||
let conn = ProtocolNetworkConnection::RawTcp(RawTcpNetworkConnection::new(flow, ps));
|
||||
let conn =
|
||||
ProtocolNetworkConnection::RawTcp(RawTcpNetworkConnection::new(registry, flow, ps));
|
||||
|
||||
Ok(NetworkResult::Value(conn))
|
||||
}
|
||||
|
@ -1,5 +1,7 @@
|
||||
use super::*;
|
||||
|
||||
impl_veilid_log_facility!("net");
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct RawUdpProtocolHandler {
|
||||
registry: VeilidComponentRegistry,
|
||||
@ -22,7 +24,7 @@ impl RawUdpProtocolHandler {
|
||||
pub async fn recv_message(&self, data: &mut [u8]) -> io::Result<(usize, Flow)> {
|
||||
let (message_len, flow) = loop {
|
||||
// Get a packet
|
||||
let (size, remote_addr) = network_result_value_or_log!(self.socket.recv_from(data).await.into_network_result()? => continue);
|
||||
let (size, remote_addr) = network_result_value_or_log!(self self.socket.recv_from(data).await.into_network_result()? => continue);
|
||||
|
||||
// Check to see if it is punished
|
||||
if self
|
||||
@ -43,7 +45,7 @@ impl RawUdpProtocolHandler {
|
||||
continue;
|
||||
}
|
||||
nres => {
|
||||
log_network_result!(debug
|
||||
veilid_log!(self debug target:"network_result",
|
||||
"UDP::recv_message insert_frame failed: {:?} <= size={} remote_addr={}",
|
||||
nres,
|
||||
size,
|
||||
@ -55,7 +57,7 @@ impl RawUdpProtocolHandler {
|
||||
|
||||
// Check length of reassembled message (same for all protocols)
|
||||
if message.len() > MAX_MESSAGE_SIZE {
|
||||
log_net!(debug "{}({}) at {}@{}:{}", "Invalid message", "received too large UDP message", file!(), line!(), column!());
|
||||
veilid_log!(self debug "{}({}) at {}@{}:{}", "Invalid message", "received too large UDP message", file!(), line!(), column!());
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -133,7 +135,7 @@ impl RawUdpProtocolHandler {
|
||||
SocketAddress::from_socket_addr(local_socket_addr),
|
||||
);
|
||||
|
||||
log_net!("udp::send_message: {:?}", flow);
|
||||
veilid_log!(self trace "udp::send_message: {:?}", flow);
|
||||
|
||||
#[cfg(feature = "verbose-tracing")]
|
||||
tracing::Span::current().record("ret.flow", format!("{:?}", flow).as_str());
|
||||
@ -190,7 +192,7 @@ impl RawUdpProtocolHandler {
|
||||
SocketAddress::from_socket_addr(local_socket_addr),
|
||||
);
|
||||
|
||||
log_net!("udp::send_hole_punch: {:?}", flow);
|
||||
veilid_log!(self trace "udp::send_hole_punch: {:?}", flow);
|
||||
|
||||
#[cfg(feature = "verbose-tracing")]
|
||||
tracing::Span::current().record("ret.flow", format!("{:?}", flow).as_str());
|
||||
|
@ -53,6 +53,7 @@ pub struct WebsocketNetworkConnection<T>
|
||||
where
|
||||
T: AsyncRead + AsyncWrite + Send + Unpin + 'static,
|
||||
{
|
||||
registry: VeilidComponentRegistry,
|
||||
flow: Flow,
|
||||
stream: CloneStream<WebSocketStream<T>>,
|
||||
}
|
||||
@ -62,7 +63,20 @@ where
|
||||
T: AsyncRead + AsyncWrite + Send + Unpin + 'static,
|
||||
{
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", std::any::type_name::<Self>())
|
||||
f.debug_struct("WebsocketNetworkConnection")
|
||||
//.field("registry", &self.registry)
|
||||
.field("flow", &self.flow)
|
||||
//.field("stream", &self.stream)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> VeilidComponentRegistryAccessor for WebsocketNetworkConnection<T>
|
||||
where
|
||||
T: AsyncRead + AsyncWrite + Send + Unpin + 'static,
|
||||
{
|
||||
fn registry(&self) -> VeilidComponentRegistry {
|
||||
self.registry.clone()
|
||||
}
|
||||
}
|
||||
|
||||
@ -70,8 +84,9 @@ impl<T> WebsocketNetworkConnection<T>
|
||||
where
|
||||
T: AsyncRead + AsyncWrite + Send + Unpin + 'static,
|
||||
{
|
||||
pub fn new(flow: Flow, stream: WebSocketStream<T>) -> Self {
|
||||
pub fn new(registry: VeilidComponentRegistry, flow: Flow, stream: WebSocketStream<T>) -> Self {
|
||||
Self {
|
||||
registry,
|
||||
flow,
|
||||
stream: CloneStream::new(stream),
|
||||
}
|
||||
@ -184,10 +199,15 @@ pub struct WebsocketProtocolHandler
|
||||
where
|
||||
Self: ProtocolAcceptHandler,
|
||||
{
|
||||
registry: VeilidComponentRegistry,
|
||||
arc: Arc<WebsocketProtocolHandlerArc>,
|
||||
}
|
||||
|
||||
impl_veilid_component_registry_accessor!(WebsocketProtocolHandler);
|
||||
|
||||
impl WebsocketProtocolHandler {
|
||||
pub fn new(config: VeilidConfig, tls: bool) -> Self {
|
||||
pub fn new(registry: VeilidComponentRegistry, tls: bool) -> Self {
|
||||
let config = registry.config();
|
||||
let c = config.get();
|
||||
let path = if tls {
|
||||
format!("GET /{}", c.network.protocol.wss.path.trim_end_matches('/'))
|
||||
@ -201,6 +221,7 @@ impl WebsocketProtocolHandler {
|
||||
};
|
||||
|
||||
Self {
|
||||
registry,
|
||||
arc: Arc::new(WebsocketProtocolHandlerArc {
|
||||
tls,
|
||||
request_path: path.as_bytes().to_vec(),
|
||||
@ -216,7 +237,7 @@ impl WebsocketProtocolHandler {
|
||||
socket_addr: SocketAddr,
|
||||
local_addr: SocketAddr,
|
||||
) -> io::Result<Option<ProtocolNetworkConnection>> {
|
||||
log_net!("WS: on_accept_async: enter");
|
||||
veilid_log!(self trace "WS: on_accept_async: enter");
|
||||
let request_path_len = self.arc.request_path.len() + 2;
|
||||
|
||||
let mut peek_buf = [0u8; MAX_WS_BEFORE_BODY];
|
||||
@ -266,7 +287,7 @@ impl WebsocketProtocolHandler {
|
||||
let ws_stream = match accept_hdr_async(ps, self.clone()).await {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
log_net!(debug "failed websockets handshake: {}", e);
|
||||
veilid_log!(self debug "failed websockets handshake: {}", e);
|
||||
return Ok(None);
|
||||
}
|
||||
};
|
||||
@ -282,11 +303,12 @@ impl WebsocketProtocolHandler {
|
||||
PeerAddress::new(SocketAddress::from_socket_addr(socket_addr), protocol_type);
|
||||
|
||||
let conn = ProtocolNetworkConnection::WsAccepted(WebsocketNetworkConnection::new(
|
||||
self.registry(),
|
||||
Flow::new(peer_addr, SocketAddress::from_socket_addr(local_addr)),
|
||||
ws_stream,
|
||||
));
|
||||
|
||||
log_net!(
|
||||
veilid_log!(self trace
|
||||
"Connection accepted from: {} ({})",
|
||||
socket_addr,
|
||||
if self.arc.tls { "WSS" } else { "WS" }
|
||||
@ -297,6 +319,7 @@ impl WebsocketProtocolHandler {
|
||||
|
||||
#[instrument(level = "trace", target = "protocol", ret, err)]
|
||||
pub async fn connect(
|
||||
registry: VeilidComponentRegistry,
|
||||
local_address: Option<SocketAddr>,
|
||||
dial_info: &DialInfo,
|
||||
timeout_ms: u32,
|
||||
@ -337,7 +360,7 @@ impl WebsocketProtocolHandler {
|
||||
dial_info.peer_address(),
|
||||
SocketAddress::from_socket_addr(actual_local_addr),
|
||||
);
|
||||
log_net!("{}::connect: {:?}", scheme, flow);
|
||||
veilid_log!(registry trace "{}::connect: {:?}", scheme, flow);
|
||||
|
||||
// Negotiate TLS if this is WSS
|
||||
if tls {
|
||||
@ -351,14 +374,14 @@ impl WebsocketProtocolHandler {
|
||||
.map_err(to_io_error_other)?;
|
||||
|
||||
Ok(NetworkResult::Value(ProtocolNetworkConnection::Wss(
|
||||
WebsocketNetworkConnection::new(flow, ws_stream),
|
||||
WebsocketNetworkConnection::new(registry, flow, ws_stream),
|
||||
)))
|
||||
} else {
|
||||
let (ws_stream, _response) = client_async(request, tcp_stream)
|
||||
.await
|
||||
.map_err(to_io_error_other)?;
|
||||
Ok(NetworkResult::Value(ProtocolNetworkConnection::Ws(
|
||||
WebsocketNetworkConnection::new(flow, ws_stream),
|
||||
WebsocketNetworkConnection::new(registry, flow, ws_stream),
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
@ -139,7 +139,7 @@ impl Network {
|
||||
|
||||
#[instrument(level = "trace", skip_all)]
|
||||
pub(super) async fn bind_udp_protocol_handlers(&self) -> EyreResult<StartupDisposition> {
|
||||
log_net!("UDP: binding protocol handlers");
|
||||
veilid_log!(self trace "UDP: binding protocol handlers");
|
||||
let (listen_address, public_address, detect_address_changes) = self.config().with(|c| {
|
||||
(
|
||||
c.network.protocol.udp.listen_address.clone(),
|
||||
@ -155,12 +155,12 @@ impl Network {
|
||||
|
||||
// Now create udp inbound sockets for whatever interfaces we're listening on
|
||||
if bind_set.search {
|
||||
info!(
|
||||
veilid_log!(self info
|
||||
"UDP: searching for free port starting with {} on {:?}",
|
||||
bind_set.port, bind_set.addrs
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
veilid_log!(self info
|
||||
"UDP: binding protocol handlers at port {} on {:?}",
|
||||
bind_set.port, bind_set.addrs
|
||||
);
|
||||
@ -189,7 +189,7 @@ impl Network {
|
||||
editor_public_internet: &mut RoutingDomainEditorPublicInternet<'_>,
|
||||
editor_local_network: &mut RoutingDomainEditorLocalNetwork<'_>,
|
||||
) -> EyreResult<()> {
|
||||
log_net!("UDP: registering dial info");
|
||||
veilid_log!(self trace "UDP: registering dial info");
|
||||
|
||||
let (public_address, detect_address_changes) = self.config().with(|c| {
|
||||
(
|
||||
@ -260,7 +260,7 @@ impl Network {
|
||||
|
||||
#[instrument(level = "trace", skip_all)]
|
||||
pub(super) async fn start_ws_listeners(&self) -> EyreResult<StartupDisposition> {
|
||||
log_net!("WS: binding protocol handlers");
|
||||
veilid_log!(self trace "WS: binding protocol handlers");
|
||||
let (listen_address, url, detect_address_changes) = self.config().with(|c| {
|
||||
(
|
||||
c.network.protocol.ws.listen_address.clone(),
|
||||
@ -275,12 +275,12 @@ impl Network {
|
||||
.await?;
|
||||
|
||||
if bind_set.search {
|
||||
info!(
|
||||
veilid_log!(self info
|
||||
"WS: searching for free port starting with {} on {:?}",
|
||||
bind_set.port, bind_set.addrs
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
veilid_log!(self info
|
||||
"WS: binding protocol handlers at port {} on {:?}",
|
||||
bind_set.port, bind_set.addrs
|
||||
);
|
||||
@ -290,7 +290,7 @@ impl Network {
|
||||
bind_set,
|
||||
false,
|
||||
ProtocolType::WS,
|
||||
Box::new(|c, t| Box::new(WebsocketProtocolHandler::new(c, t))),
|
||||
Box::new(|r, t| Box::new(WebsocketProtocolHandler::new(r, t))),
|
||||
)
|
||||
.await?
|
||||
{
|
||||
@ -313,7 +313,7 @@ impl Network {
|
||||
editor_public_internet: &mut RoutingDomainEditorPublicInternet<'_>,
|
||||
editor_local_network: &mut RoutingDomainEditorLocalNetwork<'_>,
|
||||
) -> EyreResult<()> {
|
||||
log_net!("WS: registering dial info");
|
||||
veilid_log!(self trace "WS: registering dial info");
|
||||
let (url, path, detect_address_changes) = self.config().with(|c| {
|
||||
(
|
||||
c.network.protocol.ws.url.clone(),
|
||||
@ -403,7 +403,7 @@ impl Network {
|
||||
|
||||
#[instrument(level = "trace", skip_all)]
|
||||
pub(super) async fn start_wss_listeners(&self) -> EyreResult<StartupDisposition> {
|
||||
log_net!("WSS: binding protocol handlers");
|
||||
veilid_log!(self trace "WSS: binding protocol handlers");
|
||||
|
||||
let (listen_address, url, detect_address_changes) = self.config().with(|c| {
|
||||
(
|
||||
@ -419,12 +419,12 @@ impl Network {
|
||||
.await?;
|
||||
|
||||
if bind_set.search {
|
||||
info!(
|
||||
veilid_log!(self info
|
||||
"WSS: searching for free port starting with {} on {:?}",
|
||||
bind_set.port, bind_set.addrs
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
veilid_log!(self info
|
||||
"WSS: binding protocol handlers at port {} on {:?}",
|
||||
bind_set.port, bind_set.addrs
|
||||
);
|
||||
@ -435,7 +435,7 @@ impl Network {
|
||||
bind_set,
|
||||
true,
|
||||
ProtocolType::WSS,
|
||||
Box::new(|c, t| Box::new(WebsocketProtocolHandler::new(c, t))),
|
||||
Box::new(|r, t| Box::new(WebsocketProtocolHandler::new(r, t))),
|
||||
)
|
||||
.await?
|
||||
{
|
||||
@ -458,7 +458,7 @@ impl Network {
|
||||
editor_public_internet: &mut RoutingDomainEditorPublicInternet<'_>,
|
||||
editor_local_network: &mut RoutingDomainEditorLocalNetwork<'_>,
|
||||
) -> EyreResult<()> {
|
||||
log_net!("WSS: registering dialinfo");
|
||||
veilid_log!(self trace "WSS: registering dialinfo");
|
||||
|
||||
let (url, _detect_address_changes) = self.config().with(|c| {
|
||||
(
|
||||
@ -512,7 +512,7 @@ impl Network {
|
||||
|
||||
#[instrument(level = "trace", skip_all)]
|
||||
pub(super) async fn start_tcp_listeners(&self) -> EyreResult<StartupDisposition> {
|
||||
log_net!("TCP: binding protocol handlers");
|
||||
veilid_log!(self trace "TCP: binding protocol handlers");
|
||||
|
||||
let (listen_address, public_address, detect_address_changes) = self.config().with(|c| {
|
||||
(
|
||||
@ -528,12 +528,12 @@ impl Network {
|
||||
.await?;
|
||||
|
||||
if bind_set.search {
|
||||
info!(
|
||||
veilid_log!(self info
|
||||
"TCP: searching for free port starting with {} on {:?}",
|
||||
bind_set.port, bind_set.addrs
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
veilid_log!(self info
|
||||
"TCP: binding protocol handlers at port {} on {:?}",
|
||||
bind_set.port, bind_set.addrs
|
||||
);
|
||||
@ -566,7 +566,7 @@ impl Network {
|
||||
editor_public_internet: &mut RoutingDomainEditorPublicInternet<'_>,
|
||||
editor_local_network: &mut RoutingDomainEditorLocalNetwork<'_>,
|
||||
) -> EyreResult<()> {
|
||||
log_net!("TCP: registering dialinfo");
|
||||
veilid_log!(self trace "TCP: registering dialinfo");
|
||||
|
||||
let (public_address, detect_address_changes) = self.config().with(|c| {
|
||||
(
|
||||
|
@ -78,7 +78,7 @@ impl Network {
|
||||
#[instrument(level = "trace", target = "net", name = "Network::tick", skip_all, err)]
|
||||
pub async fn tick(&self) -> EyreResult<()> {
|
||||
let Ok(_guard) = self.startup_lock.enter() else {
|
||||
log_net!(debug "ignoring due to not started up");
|
||||
veilid_log!(self debug "ignoring due to not started up");
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
@ -112,17 +112,17 @@ impl Network {
|
||||
}
|
||||
|
||||
pub async fn cancel_tasks(&self) {
|
||||
log_net!(debug "stopping upnp task");
|
||||
veilid_log!(self debug "stopping upnp task");
|
||||
if let Err(e) = self.upnp_task.stop().await {
|
||||
warn!("upnp_task not stopped: {}", e);
|
||||
veilid_log!(self warn "upnp_task not stopped: {}", e);
|
||||
}
|
||||
log_net!(debug "stopping network interfaces task");
|
||||
veilid_log!(self debug "stopping network interfaces task");
|
||||
if let Err(e) = self.network_interfaces_task.stop().await {
|
||||
warn!("network_interfaces_task not stopped: {}", e);
|
||||
veilid_log!(self warn "network_interfaces_task not stopped: {}", e);
|
||||
}
|
||||
log_net!(debug "stopping update network class task");
|
||||
veilid_log!(self debug "stopping update network class task");
|
||||
if let Err(e) = self.update_network_class_task.stop().await {
|
||||
warn!("update_network_class_task not stopped: {}", e);
|
||||
veilid_log!(self warn "update_network_class_task not stopped: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -33,7 +33,7 @@ impl Network {
|
||||
let new_network_state = match self.make_network_state().await {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
log_net!(debug "Skipping network state update: {}", e);
|
||||
veilid_log!(self debug "Skipping network state update: {}", e);
|
||||
return Ok(false);
|
||||
}
|
||||
};
|
||||
|
@ -225,7 +225,7 @@ impl Network {
|
||||
|
||||
// If we got no external address types, try again
|
||||
if external_address_types.is_empty() {
|
||||
log_net!(debug "Network class discovery failed, trying again, got no external address types");
|
||||
veilid_log!(self debug "Network class discovery failed, trying again, got no external address types");
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
@ -240,12 +240,12 @@ impl Network {
|
||||
}
|
||||
|
||||
if !success {
|
||||
log_net!(debug "Network class discovery failed, trying again, needed {:?}", context_configs);
|
||||
veilid_log!(self debug "Network class discovery failed, trying again, needed {:?}", context_configs);
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// All done
|
||||
log_net!(debug "Network class discovery finished with address_types {:?}", external_address_types);
|
||||
veilid_log!(self debug "Network class discovery finished with address_types {:?}", external_address_types);
|
||||
|
||||
// Set the address types we've seen and confirm the network class
|
||||
editor.setup_network(
|
||||
|
@ -9,7 +9,7 @@ impl Network {
|
||||
_t: Timestamp,
|
||||
) -> EyreResult<()> {
|
||||
if !self.igd_manager.tick().await? {
|
||||
info!("upnp failed, restarting local network");
|
||||
veilid_log!(self info "upnp failed, restarting local network");
|
||||
let mut inner = self.inner.lock();
|
||||
inner.network_needs_restart = true;
|
||||
}
|
||||
|
@ -39,7 +39,7 @@ cfg_if::cfg_if! {
|
||||
}
|
||||
|
||||
pub(crate) type NewProtocolAcceptHandler =
|
||||
dyn Fn(VeilidConfig, bool) -> Box<dyn ProtocolAcceptHandler> + Send;
|
||||
dyn Fn(VeilidComponentRegistry, bool) -> Box<dyn ProtocolAcceptHandler> + Send;
|
||||
}
|
||||
}
|
||||
///////////////////////////////////////////////////////////
|
||||
@ -83,8 +83,9 @@ pub struct NetworkConnectionStats {
|
||||
}
|
||||
|
||||
/// Represents a connection in the connection table for connection-oriented protocols
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct NetworkConnection {
|
||||
/// Registry accessor
|
||||
registry: VeilidComponentRegistry,
|
||||
/// A unique id for this connection
|
||||
connection_id: NetworkConnectionId,
|
||||
/// The dial info used to make this connection if it was made with 'connect'
|
||||
@ -108,20 +109,45 @@ pub(crate) struct NetworkConnection {
|
||||
ref_count: usize,
|
||||
}
|
||||
|
||||
impl_veilid_component_registry_accessor!(NetworkConnection);
|
||||
|
||||
impl fmt::Debug for NetworkConnection {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("NetworkConnection")
|
||||
//.field("registry", &self.registry)
|
||||
.field("connection_id", &self.connection_id)
|
||||
.field("opt_dial_info", &self.opt_dial_info)
|
||||
.field("flow", &self.flow)
|
||||
.field("processor", &self.processor)
|
||||
.field("established_time", &self.established_time)
|
||||
.field("stats", &self.stats)
|
||||
.field("sender", &self.sender)
|
||||
.field("stop_source", &self.stop_source)
|
||||
.field("protected_nr", &self.protected_nr)
|
||||
.field("ref_count", &self.ref_count)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for NetworkConnection {
|
||||
fn drop(&mut self) {
|
||||
if self.ref_count != 0 && self.stop_source.is_some() {
|
||||
log_net!(error "ref_count for network connection should be zero: {:?}", self);
|
||||
veilid_log!(self error "ref_count for network connection should be zero: {:?}", self);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl NetworkConnection {
|
||||
pub(super) fn dummy(id: NetworkConnectionId, flow: Flow) -> Self {
|
||||
pub(super) fn dummy(
|
||||
registry: VeilidComponentRegistry,
|
||||
id: NetworkConnectionId,
|
||||
flow: Flow,
|
||||
) -> Self {
|
||||
// Create handle for sending (dummy is immediately disconnected)
|
||||
let (sender, _receiver) = flume::bounded(get_concurrency() as usize);
|
||||
|
||||
Self {
|
||||
registry,
|
||||
connection_id: id,
|
||||
opt_dial_info: None,
|
||||
flow,
|
||||
@ -162,6 +188,7 @@ impl NetworkConnection {
|
||||
let local_stop_token = stop_source.token();
|
||||
|
||||
// Spawn connection processor and pass in protocol connection
|
||||
let registry = connection_manager.registry();
|
||||
let processor = spawn(
|
||||
"connection processor",
|
||||
Self::process_connection(
|
||||
@ -178,6 +205,7 @@ impl NetworkConnection {
|
||||
|
||||
// Return the connection
|
||||
Self {
|
||||
registry,
|
||||
connection_id,
|
||||
opt_dial_info,
|
||||
flow,
|
||||
@ -302,12 +330,13 @@ impl NetworkConnection {
|
||||
stats: Arc<Mutex<NetworkConnectionStats>>,
|
||||
) -> SendPinBoxFuture<()> {
|
||||
Box::pin(async move {
|
||||
log_net!(
|
||||
let registry = connection_manager.registry();
|
||||
|
||||
veilid_log!(registry trace
|
||||
"Starting process_connection loop for id={}, {:?}", connection_id,
|
||||
flow
|
||||
);
|
||||
|
||||
let registry = connection_manager.registry();
|
||||
let mut unord = FuturesUnordered::new();
|
||||
let mut need_receiver = true;
|
||||
let mut need_sender = true;
|
||||
@ -317,7 +346,7 @@ impl NetworkConnection {
|
||||
let new_timer = || {
|
||||
sleep(connection_manager.connection_inactivity_timeout_ms()).then(|_| async {
|
||||
// timeout
|
||||
log_net!("Connection timeout on {:?}", flow);
|
||||
veilid_log!(connection_manager trace "Connection timeout on {:?}", flow);
|
||||
RecvLoopAction::Timeout
|
||||
})
|
||||
};
|
||||
@ -344,7 +373,7 @@ impl NetworkConnection {
|
||||
.await
|
||||
{
|
||||
// Sending the packet along can fail, if so, this connection is dead
|
||||
log_net!(debug e);
|
||||
veilid_log!(connection_manager debug e);
|
||||
RecvLoopAction::Finish
|
||||
} else {
|
||||
RecvLoopAction::Send
|
||||
@ -352,7 +381,7 @@ impl NetworkConnection {
|
||||
}
|
||||
Err(e) => {
|
||||
// All senders gone, shouldn't happen since we store one alongside the join handle
|
||||
log_net!(warn e);
|
||||
veilid_log!(connection_manager warn e);
|
||||
RecvLoopAction::Finish
|
||||
}
|
||||
}
|
||||
@ -379,7 +408,7 @@ impl NetworkConnection {
|
||||
|
||||
// Check for connection close
|
||||
if v.is_no_connection() {
|
||||
log_net!("Connection closed from: {} ({})", peer_address.socket_addr(), peer_address.protocol_type());
|
||||
veilid_log!(registry trace "Connection closed from: {} ({})", peer_address.socket_addr(), peer_address.protocol_type());
|
||||
return RecvLoopAction::Finish;
|
||||
}
|
||||
|
||||
@ -390,7 +419,7 @@ impl NetworkConnection {
|
||||
}
|
||||
|
||||
// Log other network results
|
||||
let mut message = network_result_value_or_log!(v => [ format!(": protocol_connection={:?}", protocol_connection) ] {
|
||||
let mut message = network_result_value_or_log!(registry v => [ format!(": protocol_connection={:?}", protocol_connection) ] {
|
||||
return RecvLoopAction::Finish;
|
||||
});
|
||||
|
||||
@ -399,7 +428,7 @@ impl NetworkConnection {
|
||||
.on_recv_envelope(message.as_mut_slice(), flow)
|
||||
.await
|
||||
{
|
||||
log_net!(debug "failed to process received envelope: {}", e);
|
||||
veilid_log!(registry debug "failed to process received envelope: {}", e);
|
||||
RecvLoopAction::Finish
|
||||
} else {
|
||||
// Touch the LRU for this connection
|
||||
@ -410,7 +439,7 @@ impl NetworkConnection {
|
||||
}
|
||||
Err(e) => {
|
||||
// Connection unable to receive, closed
|
||||
log_net!(error "connection unable to receive: {}", e);
|
||||
veilid_log!(registry error "connection unable to receive: {}", e);
|
||||
RecvLoopAction::Finish
|
||||
}
|
||||
}
|
||||
@ -451,7 +480,7 @@ impl NetworkConnection {
|
||||
}
|
||||
}
|
||||
|
||||
log_net!(
|
||||
veilid_log!(registry trace
|
||||
"Connection loop finished flow={:?}",
|
||||
flow
|
||||
);
|
||||
@ -463,7 +492,7 @@ impl NetworkConnection {
|
||||
|
||||
// Close the low level socket
|
||||
if let Err(e) = protocol_connection.close().await {
|
||||
log_net!(debug "Protocol connection close error: {}", e);
|
||||
veilid_log!(registry debug "Protocol connection close error: {}", e);
|
||||
}
|
||||
}.in_current_span())
|
||||
}
|
||||
|
@ -6,6 +6,8 @@ use network_manager::*;
|
||||
use routing_table::*;
|
||||
use stop_token::future::FutureExt;
|
||||
|
||||
impl_veilid_log_facility!("receipt");
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum ReceiptEvent {
|
||||
ReturnedOutOfBand,
|
||||
@ -155,10 +157,13 @@ struct ReceiptManagerUnlockedInner {
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(super) struct ReceiptManager {
|
||||
registry: VeilidComponentRegistry,
|
||||
inner: Arc<Mutex<ReceiptManagerInner>>,
|
||||
unlocked_inner: Arc<ReceiptManagerUnlockedInner>,
|
||||
}
|
||||
|
||||
impl_veilid_component_registry_accessor!(ReceiptManager);
|
||||
|
||||
impl ReceiptManager {
|
||||
fn new_inner() -> ReceiptManagerInner {
|
||||
ReceiptManagerInner {
|
||||
@ -169,8 +174,9 @@ impl ReceiptManager {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new() -> Self {
|
||||
pub fn new(registry: VeilidComponentRegistry) -> Self {
|
||||
Self {
|
||||
registry,
|
||||
inner: Arc::new(Mutex::new(Self::new_inner())),
|
||||
unlocked_inner: Arc::new(ReceiptManagerUnlockedInner {
|
||||
startup_lock: StartupLock::new(),
|
||||
@ -180,7 +186,7 @@ impl ReceiptManager {
|
||||
|
||||
pub async fn startup(&self) -> EyreResult<()> {
|
||||
let guard = self.unlocked_inner.startup_lock.startup()?;
|
||||
log_net!(debug "startup receipt manager");
|
||||
veilid_log!(self debug "startup receipt manager");
|
||||
|
||||
// Retrieve config
|
||||
{
|
||||
@ -319,23 +325,23 @@ impl ReceiptManager {
|
||||
};
|
||||
|
||||
// Wait for everything to stop
|
||||
log_net!(debug "waiting for timeout task to stop");
|
||||
veilid_log!(self debug "waiting for timeout task to stop");
|
||||
if timeout_task.join().await.is_err() {
|
||||
panic!("joining timeout task failed");
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn shutdown(&self) {
|
||||
log_net!(debug "starting receipt manager shutdown");
|
||||
veilid_log!(self debug "starting receipt manager shutdown");
|
||||
let Ok(guard) = self.unlocked_inner.startup_lock.shutdown().await else {
|
||||
log_net!(debug "receipt manager is already shut down");
|
||||
veilid_log!(self debug "receipt manager is already shut down");
|
||||
return;
|
||||
};
|
||||
|
||||
*self.inner.lock() = Self::new_inner();
|
||||
|
||||
guard.success();
|
||||
log_net!(debug "finished receipt manager shutdown");
|
||||
veilid_log!(self debug "finished receipt manager shutdown");
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", target = "receipt", skip_all)]
|
||||
@ -347,7 +353,7 @@ impl ReceiptManager {
|
||||
callback: impl ReceiptCallback,
|
||||
) {
|
||||
let Ok(_guard) = self.unlocked_inner.startup_lock.enter() else {
|
||||
log_net!(debug "ignoring due to not started up");
|
||||
veilid_log!(self debug "ignoring due to not started up");
|
||||
return;
|
||||
};
|
||||
let receipt_nonce = receipt.get_nonce();
|
||||
@ -372,7 +378,7 @@ impl ReceiptManager {
|
||||
eventual: ReceiptSingleShotType,
|
||||
) {
|
||||
let Ok(_guard) = self.unlocked_inner.startup_lock.enter() else {
|
||||
log_net!(debug "ignoring due to not started up");
|
||||
veilid_log!(self debug "ignoring due to not started up");
|
||||
return;
|
||||
};
|
||||
let receipt_nonce = receipt.get_nonce();
|
||||
|
@ -56,7 +56,7 @@ impl NetworkManager {
|
||||
};
|
||||
|
||||
#[cfg(feature = "verbose-tracing")]
|
||||
log_net!(debug
|
||||
veilid_log!(self debug
|
||||
"ContactMethod: {:?} for {:?}",
|
||||
contact_method, destination_node_ref
|
||||
);
|
||||
@ -96,7 +96,7 @@ impl NetworkManager {
|
||||
let failure = REVERSE_CONNECT_FAILURE.fetch_add(1, Ordering::AcqRel) + 1;
|
||||
let rate = (success as f64 * 100.0) / ((success + failure) as f64);
|
||||
|
||||
log_network_result!(debug "Reverse connection failed ({:.2}% success) to {}, falling back to inbound relay via {}", rate, target_node_ref, relay_nr);
|
||||
veilid_log!(this debug target:"network_result", "Reverse connection failed ({:.2}% success) to {}, falling back to inbound relay via {}", rate, target_node_ref, relay_nr);
|
||||
network_result_try!(this.try_possibly_relayed_contact_method(NodeContactMethod::InboundRelay(relay_nr), destination_node_ref, data).await?)
|
||||
} else {
|
||||
if let NetworkResult::Value(sdm) = &nres {
|
||||
@ -106,7 +106,7 @@ impl NetworkManager {
|
||||
let failure = REVERSE_CONNECT_FAILURE.load(Ordering::Acquire);
|
||||
let rate = (success as f64 * 100.0) / ((success + failure) as f64);
|
||||
|
||||
log_network_result!(debug "Reverse connection successful ({:.2}% success) to {} via {}", rate, target_node_ref, relay_nr);
|
||||
veilid_log!(this debug target:"network_result", "Reverse connection successful ({:.2}% success) to {} via {}", rate, target_node_ref, relay_nr);
|
||||
}
|
||||
}
|
||||
network_result_try!(nres)
|
||||
@ -122,7 +122,7 @@ impl NetworkManager {
|
||||
let failure = HOLE_PUNCH_FAILURE.fetch_add(1, Ordering::AcqRel) + 1;
|
||||
let rate = (success as f64 * 100.0) / ((success + failure) as f64);
|
||||
|
||||
log_network_result!(debug "Hole punch failed ({:.2}% success) to {} , falling back to inbound relay via {}", rate, target_node_ref , relay_nr);
|
||||
veilid_log!(this debug target:"network_result", "Hole punch failed ({:.2}% success) to {} , falling back to inbound relay via {}", rate, target_node_ref , relay_nr);
|
||||
network_result_try!(this.try_possibly_relayed_contact_method(NodeContactMethod::InboundRelay(relay_nr), destination_node_ref, data).await?)
|
||||
} else {
|
||||
if let NetworkResult::Value(sdm) = &nres {
|
||||
@ -131,7 +131,7 @@ impl NetworkManager {
|
||||
let failure = HOLE_PUNCH_FAILURE.load(Ordering::Acquire);
|
||||
let rate = (success as f64 * 100.0) / ((success + failure) as f64);
|
||||
|
||||
log_network_result!(debug "Hole punch successful ({:.2}% success) to {} via {}", rate, target_node_ref, relay_nr);
|
||||
veilid_log!(this debug target:"network_result", "Hole punch successful ({:.2}% success) to {} via {}", rate, target_node_ref, relay_nr);
|
||||
}
|
||||
}
|
||||
network_result_try!(nres)
|
||||
@ -272,7 +272,7 @@ impl NetworkManager {
|
||||
} else {
|
||||
// No last connection
|
||||
#[cfg(feature = "verbose-tracing")]
|
||||
log_net!(debug
|
||||
veilid_log!(self debug
|
||||
"No last flow in reverse connect for {:?}",
|
||||
target_node_ref
|
||||
);
|
||||
@ -321,7 +321,7 @@ impl NetworkManager {
|
||||
} else {
|
||||
// No last connection
|
||||
#[cfg(feature = "verbose-tracing")]
|
||||
log_net!(debug
|
||||
veilid_log!(self debug
|
||||
"No last flow in hole punch for {:?}",
|
||||
target_node_ref
|
||||
);
|
||||
@ -354,7 +354,7 @@ impl NetworkManager {
|
||||
// First try to send data to the last flow we've seen this peer on
|
||||
let data = if let Some(flow) = node_ref.last_flow() {
|
||||
#[cfg(feature = "verbose-tracing")]
|
||||
log_net!(debug
|
||||
veilid_log!(self debug
|
||||
"ExistingConnection: {:?} for {:?}",
|
||||
flow, node_ref
|
||||
);
|
||||
@ -417,7 +417,7 @@ impl NetworkManager {
|
||||
let routing_domain = match target_node_ref.best_routing_domain() {
|
||||
Some(rd) => rd,
|
||||
None => {
|
||||
log_net!("no routing domain for node {:?}", target_node_ref);
|
||||
veilid_log!(self trace "no routing domain for node {:?}", target_node_ref);
|
||||
return Ok(NodeContactMethod::Unreachable);
|
||||
}
|
||||
};
|
||||
@ -429,7 +429,7 @@ impl NetworkManager {
|
||||
|
||||
// Peer B is the target node, get the whole peer info now
|
||||
let Some(peer_b) = target_node_ref.get_peer_info(routing_domain) else {
|
||||
log_net!("no node info for node {:?}", target_node_ref);
|
||||
veilid_log!(self trace "no node info for node {:?}", target_node_ref);
|
||||
return Ok(NodeContactMethod::Unreachable);
|
||||
};
|
||||
|
||||
@ -507,7 +507,7 @@ impl NetworkManager {
|
||||
// or other firewalling issues and may perform better with TCP.
|
||||
// let unreliable = target_node_ref.peer_stats().rpc_stats.failed_to_send > 2 || target_node_ref.peer_stats().rpc_stats.recent_lost_answers > 2;
|
||||
// if unreliable && sequencing < Sequencing::PreferOrdered {
|
||||
// log_net!(debug "Node contact failing over to Ordered for {}", target_node_ref.to_string().cyan());
|
||||
// veilid_log!(self debug "Node contact failing over to Ordered for {}", target_node_ref.to_string().cyan());
|
||||
// sequencing = Sequencing::PreferOrdered;
|
||||
// }
|
||||
|
||||
|
@ -52,21 +52,21 @@ impl NetworkManager {
|
||||
}
|
||||
|
||||
pub async fn cancel_tasks(&self) {
|
||||
log_net!(debug "stopping receipt manager tasks");
|
||||
veilid_log!(self debug "stopping receipt manager tasks");
|
||||
let receipt_manager = self.receipt_manager();
|
||||
receipt_manager.cancel_tasks().await;
|
||||
|
||||
let net = self.net();
|
||||
net.cancel_tasks().await;
|
||||
|
||||
log_net!(debug "stopping rolling transfers task");
|
||||
veilid_log!(self debug "stopping rolling transfers task");
|
||||
if let Err(e) = self.rolling_transfers_task.stop().await {
|
||||
warn!("rolling_transfers_task not stopped: {}", e);
|
||||
veilid_log!(self warn "rolling_transfers_task not stopped: {}", e);
|
||||
}
|
||||
|
||||
log_net!(debug "stopping address filter task");
|
||||
veilid_log!(self debug "stopping address filter task");
|
||||
if let Err(e) = self.address_filter_task.stop().await {
|
||||
warn!("address_filter_task not stopped: {}", e);
|
||||
veilid_log!(self warn "address_filter_task not stopped: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -9,7 +9,7 @@ impl NetworkManager {
|
||||
last_ts: Timestamp,
|
||||
cur_ts: Timestamp,
|
||||
) -> EyreResult<()> {
|
||||
// log_net!("--- network manager rolling_transfers task");
|
||||
// veilid_log!(self trace "--- network manager rolling_transfers task");
|
||||
{
|
||||
let inner = &mut *self.inner.lock();
|
||||
|
||||
|
@ -50,13 +50,13 @@ pub async fn test_add_get_remove() {
|
||||
))),
|
||||
);
|
||||
|
||||
let c1 = NetworkConnection::dummy(1.into(), a1);
|
||||
let c1b = NetworkConnection::dummy(10.into(), a1);
|
||||
let c1 = NetworkConnection::dummy(registry.clone(), 1.into(), a1);
|
||||
let c1b = NetworkConnection::dummy(registry.clone(), 10.into(), a1);
|
||||
let c1h = c1.get_handle();
|
||||
let c2 = NetworkConnection::dummy(2.into(), a2);
|
||||
let c3 = NetworkConnection::dummy(3.into(), a3);
|
||||
let c4 = NetworkConnection::dummy(4.into(), a4);
|
||||
let c5 = NetworkConnection::dummy(5.into(), a5);
|
||||
let c2 = NetworkConnection::dummy(registry.clone(), 2.into(), a2);
|
||||
let c3 = NetworkConnection::dummy(registry.clone(), 3.into(), a3);
|
||||
let c4 = NetworkConnection::dummy(registry.clone(), 4.into(), a4);
|
||||
let c5 = NetworkConnection::dummy(registry.clone(), 5.into(), a5);
|
||||
|
||||
assert_eq!(a1, c2.flow());
|
||||
assert_ne!(a3, c4.flow());
|
||||
@ -92,9 +92,9 @@ pub async fn test_add_get_remove() {
|
||||
assert_eq!(table.peek_connection_by_flow(a2), None);
|
||||
assert_eq!(table.peek_connection_by_flow(a1), None);
|
||||
assert_eq!(table.connection_count(), 0);
|
||||
let c1 = NetworkConnection::dummy(6.into(), a1);
|
||||
let c1 = NetworkConnection::dummy(registry.clone(), 6.into(), a1);
|
||||
table.add_connection(c1).unwrap();
|
||||
let c2 = NetworkConnection::dummy(7.into(), a2);
|
||||
let c2 = NetworkConnection::dummy(registry.clone(), 7.into(), a2);
|
||||
assert_err!(table.add_connection(c2));
|
||||
table.add_connection(c3).unwrap();
|
||||
table.add_connection(c4).unwrap();
|
||||
|
@ -6,6 +6,8 @@ use crate::routing_table::*;
|
||||
pub use protocol::*;
|
||||
use std::io;
|
||||
|
||||
impl_veilid_log_facility!("net");
|
||||
|
||||
/////////////////////////////////////////////////////////////////
|
||||
|
||||
cfg_if! {
|
||||
@ -158,7 +160,9 @@ impl Network {
|
||||
}
|
||||
ProtocolType::WS | ProtocolType::WSS => {
|
||||
let pnc = network_result_try!(ws::WebsocketProtocolHandler::connect(
|
||||
&dial_info, timeout_ms
|
||||
self.registry(),
|
||||
&dial_info,
|
||||
timeout_ms
|
||||
)
|
||||
.await
|
||||
.wrap_err("connect failure")?);
|
||||
@ -215,9 +219,13 @@ impl Network {
|
||||
ProtocolType::UDP => unreachable!(),
|
||||
ProtocolType::TCP => unreachable!(),
|
||||
ProtocolType::WS | ProtocolType::WSS => {
|
||||
ws::WebsocketProtocolHandler::connect(&dial_info, connect_timeout_ms)
|
||||
.await
|
||||
.wrap_err("connect failure")?
|
||||
ws::WebsocketProtocolHandler::connect(
|
||||
self.registry(),
|
||||
&dial_info,
|
||||
connect_timeout_ms,
|
||||
)
|
||||
.await
|
||||
.wrap_err("connect failure")?
|
||||
}
|
||||
});
|
||||
|
||||
@ -351,7 +359,7 @@ impl Network {
|
||||
/////////////////////////////////////////////////////////////////
|
||||
|
||||
pub async fn startup_internal(&self) -> EyreResult<StartupDisposition> {
|
||||
log_net!(debug "starting network");
|
||||
veilid_log!(self debug "starting network");
|
||||
// get protocol config
|
||||
let protocol_config = {
|
||||
let config = self.config();
|
||||
@ -418,7 +426,7 @@ impl Network {
|
||||
|
||||
match self.startup_internal().await {
|
||||
Ok(StartupDisposition::Success) => {
|
||||
info!("network started");
|
||||
veilid_log!(self info "network started");
|
||||
guard.success();
|
||||
Ok(StartupDisposition::Success)
|
||||
}
|
||||
@ -448,9 +456,9 @@ impl Network {
|
||||
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub async fn shutdown(&self) {
|
||||
log_net!(debug "starting low level network shutdown");
|
||||
veilid_log!(self debug "starting low level network shutdown");
|
||||
let Ok(guard) = self.startup_lock.shutdown().await else {
|
||||
log_net!(debug "low level network is already shut down");
|
||||
veilid_log!(self debug "low level network is already shut down");
|
||||
return;
|
||||
};
|
||||
|
||||
@ -465,7 +473,7 @@ impl Network {
|
||||
*self.inner.lock() = Self::new_inner();
|
||||
|
||||
guard.success();
|
||||
log_net!(debug "finished low level network shutdown");
|
||||
veilid_log!(self debug "finished low level network shutdown");
|
||||
}
|
||||
|
||||
pub fn get_preferred_local_address(&self, _dial_info: &DialInfo) -> Option<SocketAddr> {
|
||||
@ -485,7 +493,7 @@ impl Network {
|
||||
#[expect(dead_code)]
|
||||
pub fn needs_update_network_class(&self) -> bool {
|
||||
let Ok(_guard) = self.startup_lock.enter() else {
|
||||
log_net!(debug "ignoring due to not started up");
|
||||
veilid_log!(self debug "ignoring due to not started up");
|
||||
return false;
|
||||
};
|
||||
|
||||
@ -494,7 +502,7 @@ impl Network {
|
||||
|
||||
pub fn trigger_update_network_class(&self, _routing_domain: RoutingDomain) {
|
||||
let Ok(_guard) = self.startup_lock.enter() else {
|
||||
log_net!(debug "ignoring due to not started up");
|
||||
veilid_log!(self debug "ignoring due to not started up");
|
||||
return;
|
||||
};
|
||||
}
|
||||
@ -502,7 +510,7 @@ impl Network {
|
||||
#[instrument(level = "trace", target = "net", name = "Network::tick", skip_all, err)]
|
||||
pub async fn tick(&self) -> EyreResult<()> {
|
||||
let Ok(_guard) = self.startup_lock.enter() else {
|
||||
log_net!(debug "ignoring due to not started up");
|
||||
veilid_log!(self debug "ignoring due to not started up");
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
|
@ -13,6 +13,7 @@ pub(in crate::network_manager) enum ProtocolNetworkConnection {
|
||||
|
||||
impl ProtocolNetworkConnection {
|
||||
pub async fn connect(
|
||||
registry: VeilidComponentRegistry,
|
||||
_local_address: Option<SocketAddr>,
|
||||
dial_info: &DialInfo,
|
||||
timeout_ms: u32,
|
||||
@ -29,7 +30,7 @@ impl ProtocolNetworkConnection {
|
||||
panic!("TCP dial info is not supported on WASM targets");
|
||||
}
|
||||
ProtocolType::WS | ProtocolType::WSS => {
|
||||
ws::WebsocketProtocolHandler::connect(dial_info, timeout_ms).await
|
||||
ws::WebsocketProtocolHandler::connect(registry, dial_info, timeout_ms).await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -11,19 +11,30 @@ struct WebsocketNetworkConnectionInner {
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct WebsocketNetworkConnection {
|
||||
registry: VeilidComponentRegistry,
|
||||
flow: Flow,
|
||||
inner: Arc<WebsocketNetworkConnectionInner>,
|
||||
}
|
||||
|
||||
impl fmt::Debug for WebsocketNetworkConnection {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", core::any::type_name::<Self>())
|
||||
f.debug_struct("WebsocketNetworkConnection")
|
||||
.field("flow", &self.flow)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl_veilid_component_registry_accessor!(WebsocketNetworkConnection);
|
||||
|
||||
impl WebsocketNetworkConnection {
|
||||
pub fn new(flow: Flow, ws_meta: WsMeta, ws_stream: WsStream) -> Self {
|
||||
pub fn new(
|
||||
registry: VeilidComponentRegistry,
|
||||
flow: Flow,
|
||||
ws_meta: WsMeta,
|
||||
ws_stream: WsStream,
|
||||
) -> Self {
|
||||
Self {
|
||||
registry,
|
||||
flow,
|
||||
inner: Arc::new(WebsocketNetworkConnectionInner {
|
||||
ws_meta,
|
||||
@ -44,7 +55,7 @@ impl WebsocketNetworkConnection {
|
||||
#[allow(unused_variables)]
|
||||
let x = self.inner.ws_meta.close().await.map_err(ws_err_to_io_error);
|
||||
#[cfg(feature = "verbose-tracing")]
|
||||
log_net!(debug "close result: {:?}", x);
|
||||
veilid_log!(self debug "close result: {:?}", x);
|
||||
Ok(NetworkResult::value(()))
|
||||
}
|
||||
|
||||
@ -101,6 +112,7 @@ pub(in crate::network_manager) struct WebsocketProtocolHandler {}
|
||||
impl WebsocketProtocolHandler {
|
||||
#[instrument(level = "trace", target = "protocol", ret, err)]
|
||||
pub async fn connect(
|
||||
registry: VeilidComponentRegistry,
|
||||
dial_info: &DialInfo,
|
||||
timeout_ms: u32,
|
||||
) -> io::Result<NetworkResult<ProtocolNetworkConnection>> {
|
||||
@ -129,6 +141,7 @@ impl WebsocketProtocolHandler {
|
||||
|
||||
// Make our flow
|
||||
let wnc = WebsocketNetworkConnection::new(
|
||||
registry,
|
||||
Flow::new_no_local(dial_info.peer_address()),
|
||||
wsmeta,
|
||||
wsio,
|
||||
|
@ -1,12 +1,16 @@
|
||||
use super::*;
|
||||
use core::sync::atomic::Ordering;
|
||||
|
||||
impl_veilid_log_facility!("rtab");
|
||||
|
||||
/// Routing Table Bucket
|
||||
/// Stores map of public keys to entries, which may be in multiple routing tables per crypto kind
|
||||
/// Keeps entries at a particular 'dht distance' from this cryptokind's node id
|
||||
/// Helps to keep managed lists at particular distances so we can evict nodes by priority
|
||||
/// where the priority comes from liveness and age of the entry (older is better)
|
||||
pub struct Bucket {
|
||||
/// Component registryo accessor
|
||||
registry: VeilidComponentRegistry,
|
||||
/// Map of keys to entries for this bucket
|
||||
entries: BTreeMap<PublicKey, Arc<BucketEntry>>,
|
||||
/// The crypto kind in use for the public keys in this bucket
|
||||
@ -26,9 +30,12 @@ struct SerializedBucketData {
|
||||
entries: Vec<SerializedBucketEntryData>,
|
||||
}
|
||||
|
||||
impl_veilid_component_registry_accessor!(Bucket);
|
||||
|
||||
impl Bucket {
|
||||
pub fn new(kind: CryptoKind) -> Self {
|
||||
pub fn new(registry: VeilidComponentRegistry, kind: CryptoKind) -> Self {
|
||||
Self {
|
||||
registry,
|
||||
entries: BTreeMap::new(),
|
||||
kind,
|
||||
}
|
||||
@ -73,7 +80,7 @@ impl Bucket {
|
||||
|
||||
/// Create a new entry with a node_id of this crypto kind and return it
|
||||
pub(super) fn add_new_entry(&mut self, node_id_key: PublicKey) -> Arc<BucketEntry> {
|
||||
log_rtab!("Node added: {}:{}", self.kind, node_id_key);
|
||||
veilid_log!(self trace "Node added: {}:{}", self.kind, node_id_key);
|
||||
|
||||
// Add new entry
|
||||
let entry = Arc::new(BucketEntry::new(TypedKey::new(self.kind, node_id_key)));
|
||||
@ -85,7 +92,7 @@ impl Bucket {
|
||||
|
||||
/// Add an existing entry with a new node_id for this crypto kind
|
||||
pub(super) fn add_existing_entry(&mut self, node_id_key: PublicKey, entry: Arc<BucketEntry>) {
|
||||
log_rtab!("Existing node added: {}:{}", self.kind, node_id_key);
|
||||
veilid_log!(self trace "Existing node added: {}:{}", self.kind, node_id_key);
|
||||
|
||||
// Add existing entry
|
||||
self.entries.insert(node_id_key, entry);
|
||||
@ -93,7 +100,7 @@ impl Bucket {
|
||||
|
||||
/// Remove an entry with a node_id for this crypto kind from the bucket
|
||||
pub(super) fn remove_entry(&mut self, node_id_key: &PublicKey) {
|
||||
log_rtab!("Node removed: {}:{}", self.kind, node_id_key);
|
||||
veilid_log!(self trace "Node removed: {}:{}", self.kind, node_id_key);
|
||||
|
||||
// Remove the entry
|
||||
self.entries.remove(node_id_key);
|
||||
|
@ -1245,10 +1245,10 @@ impl Drop for BucketEntry {
|
||||
if self.ref_count.load(Ordering::Acquire) != 0 {
|
||||
#[cfg(feature = "tracking")]
|
||||
{
|
||||
info!("NodeRef Tracking");
|
||||
veilid_log!(self info "NodeRef Tracking");
|
||||
for (id, bt) in &mut self.node_ref_tracks {
|
||||
bt.resolve();
|
||||
info!("Id: {}\n----------------\n{:#?}", id, bt);
|
||||
veilid_log!(self info "Id: {}\n----------------\n{:#?}", id, bt);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -199,7 +199,8 @@ far (self): {}
|
||||
d_far,
|
||||
d_near.cmp(&d_far)
|
||||
);
|
||||
warn!("{}", warning);
|
||||
let crypto = vcrypto.crypto();
|
||||
veilid_log!(crypto warn "{}", warning);
|
||||
closer = false;
|
||||
break;
|
||||
}
|
||||
|
@ -31,6 +31,8 @@ use crate::rpc_processor::*;
|
||||
use bucket::*;
|
||||
use hashlink::LruCache;
|
||||
|
||||
impl_veilid_log_facility!("rtab");
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
|
||||
/// How many nodes in our routing table we require for a functional PublicInternet RoutingDomain
|
||||
@ -201,7 +203,7 @@ impl RoutingTable {
|
||||
|
||||
/// Called to initialize the routing table after it is created
|
||||
async fn init_async(&self) -> EyreResult<()> {
|
||||
log_rtab!(debug "starting routing table init");
|
||||
veilid_log!(self debug "starting routing table init");
|
||||
|
||||
// Set up routing buckets
|
||||
{
|
||||
@ -210,22 +212,22 @@ impl RoutingTable {
|
||||
}
|
||||
|
||||
// Load bucket entries from table db if possible
|
||||
log_rtab!(debug "loading routing table entries");
|
||||
veilid_log!(self debug "loading routing table entries");
|
||||
if let Err(e) = self.load_buckets().await {
|
||||
log_rtab!(debug "Error loading buckets from storage: {:#?}. Resetting.", e);
|
||||
veilid_log!(self debug "Error loading buckets from storage: {:#?}. Resetting.", e);
|
||||
let mut inner = self.inner.write();
|
||||
inner.init_buckets();
|
||||
}
|
||||
|
||||
// Set up routespecstore
|
||||
log_rtab!(debug "starting route spec store init");
|
||||
veilid_log!(self debug "starting route spec store init");
|
||||
if let Err(e) = self.route_spec_store().load().await {
|
||||
log_rtab!(debug "Error loading route spec store: {:#?}. Resetting.", e);
|
||||
veilid_log!(self debug "Error loading route spec store: {:#?}. Resetting.", e);
|
||||
self.route_spec_store().reset();
|
||||
};
|
||||
log_rtab!(debug "finished route spec store init");
|
||||
veilid_log!(self debug "finished route spec store init");
|
||||
|
||||
log_rtab!(debug "finished routing table init");
|
||||
veilid_log!(self debug "finished routing table init");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -239,7 +241,7 @@ impl RoutingTable {
|
||||
|
||||
pub(crate) async fn shutdown(&self) {
|
||||
// Stop tasks
|
||||
log_net!(debug "stopping routing table tasks");
|
||||
veilid_log!(self debug "stopping routing table tasks");
|
||||
self.cancel_tasks().await;
|
||||
}
|
||||
|
||||
@ -247,15 +249,15 @@ impl RoutingTable {
|
||||
|
||||
/// Called to shut down the routing table
|
||||
async fn terminate_async(&self) {
|
||||
log_rtab!(debug "starting routing table terminate");
|
||||
veilid_log!(self debug "starting routing table terminate");
|
||||
|
||||
// Load bucket entries from table db if possible
|
||||
log_rtab!(debug "saving routing table entries");
|
||||
veilid_log!(self debug "saving routing table entries");
|
||||
if let Err(e) = self.save_buckets().await {
|
||||
error!("failed to save routing table entries: {}", e);
|
||||
}
|
||||
|
||||
log_rtab!(debug "saving route spec store");
|
||||
veilid_log!(self debug "saving route spec store");
|
||||
let rss = {
|
||||
let mut inner = self.inner.write();
|
||||
inner.route_spec_store.take()
|
||||
@ -265,12 +267,12 @@ impl RoutingTable {
|
||||
error!("couldn't save route spec store: {}", e);
|
||||
}
|
||||
}
|
||||
log_rtab!(debug "shutting down routing table");
|
||||
veilid_log!(self debug "shutting down routing table");
|
||||
|
||||
let mut inner = self.inner.write();
|
||||
*inner = RoutingTableInner::new(self.registry());
|
||||
|
||||
log_rtab!(debug "finished routing table terminate");
|
||||
veilid_log!(self debug "finished routing table terminate");
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////
|
||||
@ -422,7 +424,7 @@ impl RoutingTable {
|
||||
};
|
||||
if !caches_valid {
|
||||
// Caches not valid, start over
|
||||
log_rtab!(debug "cache validity key changed, emptying routing table");
|
||||
veilid_log!(self debug "cache validity key changed, emptying routing table");
|
||||
drop(db);
|
||||
table_store.delete(ROUTING_TABLE).await?;
|
||||
let db = table_store.open(ROUTING_TABLE, 1).await?;
|
||||
@ -434,13 +436,13 @@ impl RoutingTable {
|
||||
let Some(serialized_bucket_map): Option<SerializedBucketMap> =
|
||||
db.load_json(0, SERIALIZED_BUCKET_MAP).await?
|
||||
else {
|
||||
log_rtab!(debug "no bucket map in saved routing table");
|
||||
veilid_log!(self debug "no bucket map in saved routing table");
|
||||
return Ok(());
|
||||
};
|
||||
let Some(all_entry_bytes): Option<SerializedBuckets> =
|
||||
db.load_json(0, ALL_ENTRY_BYTES).await?
|
||||
else {
|
||||
log_rtab!(debug "no all_entry_bytes in saved routing table");
|
||||
veilid_log!(self debug "no all_entry_bytes in saved routing table");
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
@ -480,11 +482,11 @@ impl RoutingTable {
|
||||
// Validate serialized bucket map
|
||||
for (k, v) in &serialized_bucket_map {
|
||||
if !VALID_CRYPTO_KINDS.contains(k) {
|
||||
warn!("crypto kind is not valid, not loading routing table");
|
||||
veilid_log!(inner warn "crypto kind is not valid, not loading routing table");
|
||||
return Ok(());
|
||||
}
|
||||
if v.len() != PUBLIC_KEY_LENGTH * 8 {
|
||||
warn!("bucket count is different, not loading routing table");
|
||||
veilid_log!(inner warn "bucket count is different, not loading routing table");
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
@ -1014,7 +1016,7 @@ impl RoutingTable {
|
||||
match self.register_node_with_peer_info(p, false) {
|
||||
Ok(nr) => out.push(nr.unfiltered()),
|
||||
Err(e) => {
|
||||
log_rtab!(debug "failed to register node with peer info from find node answer: {}", e);
|
||||
veilid_log!(self debug "failed to register node with peer info from find node answer: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1089,9 +1091,9 @@ impl RoutingTable {
|
||||
capabilities: Vec<Capability>,
|
||||
) {
|
||||
// Ask node for nodes closest to our own node
|
||||
let closest_nodes = network_result_value_or_log!(match self.find_nodes_close_to_self(crypto_kind, node_ref.clone(), capabilities.clone()).await {
|
||||
let closest_nodes = network_result_value_or_log!(self match self.find_nodes_close_to_self(crypto_kind, node_ref.clone(), capabilities.clone()).await {
|
||||
Err(e) => {
|
||||
log_rtab!(error
|
||||
veilid_log!(self error
|
||||
"find_self failed for {:?}: {:?}",
|
||||
&node_ref, e
|
||||
);
|
||||
@ -1105,9 +1107,9 @@ impl RoutingTable {
|
||||
// Ask each node near us to find us as well
|
||||
if wide {
|
||||
for closest_nr in closest_nodes {
|
||||
network_result_value_or_log!(match self.find_nodes_close_to_self(crypto_kind, closest_nr.clone(), capabilities.clone()).await {
|
||||
network_result_value_or_log!(self match self.find_nodes_close_to_self(crypto_kind, closest_nr.clone(), capabilities.clone()).await {
|
||||
Err(e) => {
|
||||
log_rtab!(error
|
||||
veilid_log!(self error
|
||||
"find_self failed for {:?}: {:?}",
|
||||
&closest_nr, e
|
||||
);
|
||||
|
@ -1,5 +1,7 @@
|
||||
use super::*;
|
||||
|
||||
impl_veilid_log_facility!("rtab");
|
||||
|
||||
pub(crate) struct FilteredNodeRef {
|
||||
registry: VeilidComponentRegistry,
|
||||
entry: Arc<BucketEntry>,
|
||||
|
@ -4,6 +4,8 @@ mod node_ref_lock;
|
||||
mod node_ref_lock_mut;
|
||||
mod traits;
|
||||
|
||||
impl_veilid_log_facility!("rtab");
|
||||
|
||||
use super::*;
|
||||
|
||||
pub(crate) use filtered_node_ref::*;
|
||||
|
@ -1,4 +1,5 @@
|
||||
use super::*;
|
||||
impl_veilid_log_facility!("rtab");
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Compiled Privacy Objects
|
||||
@ -49,7 +50,7 @@ impl RouteNode {
|
||||
match routing_table.lookup_node_ref(TypedKey::new(crypto_kind, *id)) {
|
||||
Ok(nr) => nr,
|
||||
Err(e) => {
|
||||
log_rtab!(debug "failed to look up route node: {}", e);
|
||||
veilid_log!(routing_table debug "failed to look up route node: {}", e);
|
||||
None
|
||||
}
|
||||
}
|
||||
@ -59,7 +60,7 @@ impl RouteNode {
|
||||
match routing_table.register_node_with_peer_info(pi.clone(), false) {
|
||||
Ok(nr) => Some(nr.unfiltered()),
|
||||
Err(e) => {
|
||||
log_rtab!(debug "failed to register route node: {}", e);
|
||||
veilid_log!(routing_table debug "failed to register route node: {}", e);
|
||||
None
|
||||
}
|
||||
}
|
||||
|
@ -17,6 +17,8 @@ use route_spec_store_content::*;
|
||||
pub(crate) use route_spec_store_cache::CompiledRoute;
|
||||
pub use route_stats::*;
|
||||
|
||||
impl_veilid_log_facility!("rtab");
|
||||
|
||||
/// The size of the remote private route cache
|
||||
const REMOTE_PRIVATE_ROUTE_CACHE_SIZE: usize = 1024;
|
||||
/// Remote private route cache entries expire in 5 minutes if they haven't been used
|
||||
@ -54,10 +56,10 @@ impl RouteSpecStore {
|
||||
let c = config.get();
|
||||
|
||||
Self {
|
||||
registry,
|
||||
registry: registry.clone(),
|
||||
inner: Mutex::new(RouteSpecStoreInner {
|
||||
content: RouteSpecStoreContent::new(),
|
||||
cache: Default::default(),
|
||||
content: RouteSpecStoreContent::default(),
|
||||
cache: RouteSpecStoreCache::new(registry.clone()),
|
||||
}),
|
||||
max_route_hop_count: c.network.rpc.max_route_hop_count.into(),
|
||||
default_route_hop_count: c.network.rpc.default_route_hop_count.into(),
|
||||
@ -67,8 +69,8 @@ impl RouteSpecStore {
|
||||
#[instrument(level = "trace", target = "route", skip_all)]
|
||||
pub fn reset(&self) {
|
||||
*self.inner.lock() = RouteSpecStoreInner {
|
||||
content: RouteSpecStoreContent::new(),
|
||||
cache: Default::default(),
|
||||
content: RouteSpecStoreContent::default(),
|
||||
cache: RouteSpecStoreCache::new(self.registry()),
|
||||
};
|
||||
}
|
||||
|
||||
@ -83,7 +85,7 @@ impl RouteSpecStore {
|
||||
|
||||
let mut inner = RouteSpecStoreInner {
|
||||
content,
|
||||
cache: Default::default(),
|
||||
cache: RouteSpecStoreCache::new(self.registry()),
|
||||
};
|
||||
|
||||
// Rebuild the routespecstore cache
|
||||
@ -145,7 +147,7 @@ impl RouteSpecStore {
|
||||
{
|
||||
let inner = &mut *self.inner.lock();
|
||||
inner.content = Default::default();
|
||||
inner.cache = Default::default();
|
||||
inner.cache = RouteSpecStoreCache::new(self.registry());
|
||||
}
|
||||
self.save().await.map_err(VeilidAPIError::internal)
|
||||
}
|
||||
@ -293,7 +295,7 @@ impl RouteSpecStore {
|
||||
// Since denylist is used, consider nodes with unknown countries to be automatically
|
||||
// excluded as well
|
||||
if geolocation_info.country_code().is_none() {
|
||||
log_rtab!(
|
||||
veilid_log!(self
|
||||
debug "allocate_route_inner: skipping node {} from unknown country",
|
||||
e.best_node_id()
|
||||
);
|
||||
@ -305,7 +307,7 @@ impl RouteSpecStore {
|
||||
.iter()
|
||||
.any(Option::is_none)
|
||||
{
|
||||
log_rtab!(
|
||||
veilid_log!(self
|
||||
debug "allocate_route_inner: skipping node {} using relay from unknown country",
|
||||
e.best_node_id()
|
||||
);
|
||||
@ -316,7 +318,7 @@ impl RouteSpecStore {
|
||||
// Safe to unwrap here, checked above
|
||||
if country_code_denylist.contains(&geolocation_info.country_code().unwrap())
|
||||
{
|
||||
log_rtab!(
|
||||
veilid_log!(self
|
||||
debug "allocate_route_inner: skipping node {} from excluded country {}",
|
||||
e.best_node_id(),
|
||||
geolocation_info.country_code().unwrap()
|
||||
@ -333,7 +335,7 @@ impl RouteSpecStore {
|
||||
.map(Option::unwrap)
|
||||
.any(|cc| country_code_denylist.contains(&cc))
|
||||
{
|
||||
log_rtab!(
|
||||
veilid_log!(self
|
||||
debug "allocate_route_inner: skipping node {} using relay from excluded country {:?}",
|
||||
e.best_node_id(),
|
||||
geolocation_info
|
||||
@ -713,27 +715,27 @@ impl RouteSpecStore {
|
||||
let inner = &*self.inner.lock();
|
||||
let crypto = self.crypto();
|
||||
let Some(vcrypto) = crypto.get(public_key.kind) else {
|
||||
log_rpc!(debug "can't handle route with public key: {:?}", public_key);
|
||||
veilid_log!(self debug "can't handle route with public key: {:?}", public_key);
|
||||
return None;
|
||||
};
|
||||
|
||||
let Some(rsid) = inner.content.get_id_by_key(&public_key.value) else {
|
||||
log_rpc!(debug "route id does not exist: {:?}", public_key.value);
|
||||
veilid_log!(self debug "route id does not exist: {:?}", public_key.value);
|
||||
return None;
|
||||
};
|
||||
let Some(rssd) = inner.content.get_detail(&rsid) else {
|
||||
log_rpc!(debug "route detail does not exist: {:?}", rsid);
|
||||
veilid_log!(self debug "route detail does not exist: {:?}", rsid);
|
||||
return None;
|
||||
};
|
||||
let Some(rsd) = rssd.get_route_by_key(&public_key.value) else {
|
||||
log_rpc!(debug "route set {:?} does not have key: {:?}", rsid, public_key.value);
|
||||
veilid_log!(self debug "route set {:?} does not have key: {:?}", rsid, public_key.value);
|
||||
return None;
|
||||
};
|
||||
|
||||
// Ensure we have the right number of signatures
|
||||
if signatures.len() != rsd.hops.len() - 1 {
|
||||
// Wrong number of signatures
|
||||
log_rpc!(debug "wrong number of signatures ({} should be {}) for routed operation on private route {}", signatures.len(), rsd.hops.len() - 1, public_key);
|
||||
veilid_log!(self debug "wrong number of signatures ({} should be {}) for routed operation on private route {}", signatures.len(), rsd.hops.len() - 1, public_key);
|
||||
return None;
|
||||
}
|
||||
// Validate signatures to ensure the route was handled by the nodes and not messed with
|
||||
@ -743,7 +745,7 @@ impl RouteSpecStore {
|
||||
if hop_n == signatures.len() {
|
||||
// Verify the node we received the routed operation from is the last hop in our route
|
||||
if *hop_public_key != last_hop_id {
|
||||
log_rpc!(debug "received routed operation from the wrong hop ({} should be {}) on private route {}", hop_public_key.encode(), last_hop_id.encode(), public_key);
|
||||
veilid_log!(self debug "received routed operation from the wrong hop ({} should be {}) on private route {}", hop_public_key.encode(), last_hop_id.encode(), public_key);
|
||||
return None;
|
||||
}
|
||||
} else {
|
||||
@ -751,11 +753,11 @@ impl RouteSpecStore {
|
||||
match vcrypto.verify(hop_public_key, data, &signatures[hop_n]) {
|
||||
Ok(true) => {}
|
||||
Ok(false) => {
|
||||
log_rpc!(debug "invalid signature for hop {} at {} on private route {}", hop_n, hop_public_key, public_key);
|
||||
veilid_log!(self debug "invalid signature for hop {} at {} on private route {}", hop_n, hop_public_key, public_key);
|
||||
return None;
|
||||
}
|
||||
Err(e) => {
|
||||
log_rpc!(debug "errir verifying signature for hop {} at {} on private route {}: {}", hop_n, hop_public_key, public_key, e);
|
||||
veilid_log!(self debug "errir verifying signature for hop {} at {} on private route {}: {}", hop_n, hop_public_key, public_key, e);
|
||||
return None;
|
||||
}
|
||||
}
|
||||
@ -1129,7 +1131,7 @@ impl RouteSpecStore {
|
||||
);
|
||||
|
||||
// Return the compiled safety route
|
||||
//info!("compile_safety_route profile (stub): {} us", (get_timestamp() - profile_start_ts));
|
||||
//veilid_log!(self info "compile_safety_route profile (stub): {} us", (get_timestamp() - profile_start_ts));
|
||||
return Ok(CompiledRoute {
|
||||
safety_route: SafetyRoute::new_stub(
|
||||
routing_table.node_id(crypto_kind),
|
||||
@ -1204,7 +1206,7 @@ impl RouteSpecStore {
|
||||
first_hop,
|
||||
};
|
||||
// Return compiled route
|
||||
//info!("compile_safety_route profile (cached): {} us", (get_timestamp() - profile_start_ts));
|
||||
//veilid_log!(self info "compile_safety_route profile (cached): {} us", (get_timestamp() - profile_start_ts));
|
||||
return Ok(compiled_route);
|
||||
}
|
||||
}
|
||||
@ -1323,7 +1325,7 @@ impl RouteSpecStore {
|
||||
};
|
||||
|
||||
// Return compiled route
|
||||
//info!("compile_safety_route profile (uncached): {} us", (get_timestamp() - profile_start_ts));
|
||||
//veilid_log!(self info "compile_safety_route profile (uncached): {} us", (get_timestamp() - profile_start_ts));
|
||||
Ok(compiled_route)
|
||||
}
|
||||
|
||||
@ -1767,7 +1769,7 @@ impl RouteSpecStore {
|
||||
/// Clear caches when local our local node info changes
|
||||
#[instrument(level = "trace", target = "route", skip(self))]
|
||||
pub fn reset_cache(&self) {
|
||||
log_rtab!(debug "resetting route cache");
|
||||
veilid_log!(self debug "resetting route cache");
|
||||
|
||||
let inner = &mut *self.inner.lock();
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
use super::*;
|
||||
impl_veilid_log_facility!("rtab");
|
||||
|
||||
// Compiled route key for caching
|
||||
#[derive(Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)]
|
||||
@ -22,6 +23,8 @@ pub struct CompiledRoute {
|
||||
/// Ephemeral data used to help the RouteSpecStore operate efficiently
|
||||
#[derive(Debug)]
|
||||
pub struct RouteSpecStoreCache {
|
||||
/// Registry accessor
|
||||
registry: VeilidComponentRegistry,
|
||||
/// How many times nodes have been used
|
||||
used_nodes: HashMap<PublicKey, usize>,
|
||||
/// How many times nodes have been used at the terminal point of a route
|
||||
@ -40,7 +43,23 @@ pub struct RouteSpecStoreCache {
|
||||
dead_remote_routes: Vec<RouteId>,
|
||||
}
|
||||
|
||||
impl_veilid_component_registry_accessor!(RouteSpecStoreCache);
|
||||
|
||||
impl RouteSpecStoreCache {
|
||||
pub fn new(registry: VeilidComponentRegistry) -> Self {
|
||||
Self {
|
||||
registry,
|
||||
used_nodes: Default::default(),
|
||||
used_end_nodes: Default::default(),
|
||||
hop_cache: Default::default(),
|
||||
remote_private_route_set_cache: LruCache::new(REMOTE_PRIVATE_ROUTE_CACHE_SIZE),
|
||||
remote_private_routes_by_key: HashMap::new(),
|
||||
compiled_route_cache: LruCache::new(COMPILED_ROUTE_CACHE_SIZE),
|
||||
dead_routes: Default::default(),
|
||||
dead_remote_routes: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// add an allocated route set to our cache via its cache key
|
||||
pub fn add_to_cache(&mut self, rti: &RoutingTableInner, rssd: &RouteSetSpecDetail) {
|
||||
let cache_key = rssd.make_cache_key(rti);
|
||||
@ -309,7 +328,7 @@ impl RouteSpecStoreCache {
|
||||
};
|
||||
|
||||
if let Some(v) = self.compiled_route_cache.insert(key, safety_route) {
|
||||
log_rtab!(error "route cache already contained key: sr_pubkey={:?}, pr_pubkey={:?}", v.public_key, pr_pubkey);
|
||||
veilid_log!(self error "route cache already contained key: sr_pubkey={:?}, pr_pubkey={:?}", v.public_key, pr_pubkey);
|
||||
}
|
||||
}
|
||||
|
||||
@ -372,18 +391,3 @@ impl RouteSpecStoreCache {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for RouteSpecStoreCache {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
used_nodes: Default::default(),
|
||||
used_end_nodes: Default::default(),
|
||||
hop_cache: Default::default(),
|
||||
remote_private_route_set_cache: LruCache::new(REMOTE_PRIVATE_ROUTE_CACHE_SIZE),
|
||||
remote_private_routes_by_key: HashMap::new(),
|
||||
compiled_route_cache: LruCache::new(COMPILED_ROUTE_CACHE_SIZE),
|
||||
dead_routes: Default::default(),
|
||||
dead_remote_routes: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -10,13 +10,6 @@ pub(super) struct RouteSpecStoreContent {
|
||||
}
|
||||
|
||||
impl RouteSpecStoreContent {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
id_by_key: HashMap::new(),
|
||||
details: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn load(
|
||||
table_store: &TableStore,
|
||||
routing_table: &RoutingTable,
|
||||
@ -54,7 +47,7 @@ impl RouteSpecStoreContent {
|
||||
rssd.set_hop_node_refs(hop_node_refs);
|
||||
}
|
||||
for id in dead_ids {
|
||||
log_rtab!(debug "no entry, killing off private route: {}", id);
|
||||
veilid_log!(table_store trace "no entry, killing off private route: {}", id);
|
||||
content.remove_detail(&id);
|
||||
}
|
||||
|
||||
|
@ -5,6 +5,8 @@ pub use routing_domains::*;
|
||||
|
||||
use weak_table::PtrWeakHashSet;
|
||||
|
||||
impl_veilid_log_facility!("rtab");
|
||||
|
||||
pub const RECENT_PEERS_TABLE_SIZE: usize = 64;
|
||||
|
||||
// Critical sections
|
||||
@ -49,10 +51,12 @@ impl_veilid_component_registry_accessor!(RoutingTableInner);
|
||||
impl RoutingTableInner {
|
||||
pub(super) fn new(registry: VeilidComponentRegistry) -> RoutingTableInner {
|
||||
RoutingTableInner {
|
||||
registry,
|
||||
registry: registry.clone(),
|
||||
buckets: BTreeMap::new(),
|
||||
public_internet_routing_domain: PublicInternetRoutingDomainDetail::default(),
|
||||
local_network_routing_domain: LocalNetworkRoutingDomainDetail::default(),
|
||||
public_internet_routing_domain: PublicInternetRoutingDomainDetail::new(
|
||||
registry.clone(),
|
||||
),
|
||||
local_network_routing_domain: LocalNetworkRoutingDomainDetail::new(registry.clone()),
|
||||
all_entries: PtrWeakHashSet::new(),
|
||||
live_entry_count: BTreeMap::new(),
|
||||
self_latency_stats_accounting: LatencyStatsAccounting::new(),
|
||||
@ -306,7 +310,7 @@ impl RoutingTableInner {
|
||||
for ck in VALID_CRYPTO_KINDS {
|
||||
let mut ckbuckets = Vec::with_capacity(PUBLIC_KEY_LENGTH * 8);
|
||||
for _ in 0..PUBLIC_KEY_LENGTH * 8 {
|
||||
let bucket = Bucket::new(ck);
|
||||
let bucket = Bucket::new(self.registry(), ck);
|
||||
ckbuckets.push(bucket);
|
||||
}
|
||||
self.buckets.insert(ck, ckbuckets);
|
||||
@ -316,7 +320,7 @@ impl RoutingTableInner {
|
||||
/// Attempt to empty the routing table
|
||||
/// should only be performed when there are no node_refs (detached)
|
||||
pub fn purge_buckets(&mut self) {
|
||||
log_rtab!(
|
||||
veilid_log!(self trace
|
||||
"Starting routing table buckets purge. Table currently has {} nodes",
|
||||
self.bucket_entry_count()
|
||||
);
|
||||
@ -328,7 +332,7 @@ impl RoutingTableInner {
|
||||
}
|
||||
self.all_entries.remove_expired();
|
||||
|
||||
log_rtab!(debug
|
||||
veilid_log!(self debug
|
||||
"Routing table buckets purge complete. Routing table now has {} nodes",
|
||||
self.bucket_entry_count()
|
||||
);
|
||||
@ -336,7 +340,7 @@ impl RoutingTableInner {
|
||||
|
||||
/// Attempt to remove last_connections from entries
|
||||
pub fn purge_last_connections(&mut self) {
|
||||
log_rtab!("Starting routing table last_connections purge.");
|
||||
veilid_log!(self trace "Starting routing table last_connections purge.");
|
||||
for ck in VALID_CRYPTO_KINDS {
|
||||
for bucket in &self.buckets[&ck] {
|
||||
for entry in bucket.entries() {
|
||||
@ -346,7 +350,7 @@ impl RoutingTableInner {
|
||||
}
|
||||
}
|
||||
}
|
||||
log_rtab!(debug "Routing table last_connections purge complete.");
|
||||
veilid_log!(self debug "Routing table last_connections purge complete.");
|
||||
}
|
||||
|
||||
/// Attempt to settle buckets and remove entries down to the desired number
|
||||
@ -359,7 +363,7 @@ impl RoutingTableInner {
|
||||
// Remove expired entries
|
||||
self.all_entries.remove_expired();
|
||||
|
||||
log_rtab!(debug "Bucket {}:{} kicked Routing table now has {} nodes\nKicked nodes:{:#?}", bucket_index.0, bucket_index.1, self.bucket_entry_count(), dead_node_ids);
|
||||
veilid_log!(self debug "Bucket {}:{} kicked Routing table now has {} nodes\nKicked nodes:{:#?}", bucket_index.0, bucket_index.1, self.bucket_entry_count(), dead_node_ids);
|
||||
}
|
||||
}
|
||||
|
||||
@ -763,7 +767,7 @@ impl RoutingTableInner {
|
||||
new_entry.with_mut_inner(|e| update_func(self, e));
|
||||
|
||||
// Kick the bucket
|
||||
log_rtab!(debug "Routing table now has {} nodes, {} live", self.bucket_entry_count(), self.get_entry_count(RoutingDomainSet::all(), BucketEntryState::Unreliable, &VALID_CRYPTO_KINDS));
|
||||
veilid_log!(self debug "Routing table now has {} nodes, {} live", self.bucket_entry_count(), self.get_entry_count(RoutingDomainSet::all(), BucketEntryState::Unreliable, &VALID_CRYPTO_KINDS));
|
||||
|
||||
Ok(nr)
|
||||
}
|
||||
@ -820,11 +824,11 @@ impl RoutingTableInner {
|
||||
F: FnOnce(Arc<BucketEntry>) -> R,
|
||||
{
|
||||
if self.routing_table().matches_own_node_id(&[node_id]) {
|
||||
log_rtab!(error "can't look up own node id in routing table");
|
||||
veilid_log!(self error "can't look up own node id in routing table");
|
||||
return None;
|
||||
}
|
||||
if !VALID_CRYPTO_KINDS.contains(&node_id.kind) {
|
||||
log_rtab!(error "can't look up node id with invalid crypto kind");
|
||||
veilid_log!(self error "can't look up node id with invalid crypto kind");
|
||||
return None;
|
||||
}
|
||||
let bucket_entry = self.routing_table().calculate_bucket_index(&node_id);
|
||||
@ -1345,7 +1349,7 @@ impl RoutingTableInner {
|
||||
|
||||
let out =
|
||||
self.find_peers_with_sort_and_filter(node_count, cur_ts, filters, sort, transform);
|
||||
log_rtab!(">> find_closest_nodes: node count = {}", out.len());
|
||||
veilid_log!(self trace ">> find_closest_nodes: node count = {}", out.len());
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
|
@ -160,7 +160,7 @@ impl<'a> RoutingDomainEditorCommonTrait for RoutingDomainEditorLocalNetwork<'a>
|
||||
.filter(|di| !new_dial_info_details.contains(di))
|
||||
.collect::<Vec<_>>();
|
||||
if !removed_dial_info.is_empty() {
|
||||
info!(
|
||||
veilid_log!(rti info
|
||||
"[LocalNetwork] removed dial info:\n{}",
|
||||
indent_all_string(&removed_dial_info.to_multiline_string())
|
||||
.strip_trailing_newline()
|
||||
@ -172,7 +172,7 @@ impl<'a> RoutingDomainEditorCommonTrait for RoutingDomainEditorLocalNetwork<'a>
|
||||
.filter(|di| !old_dial_info_details.contains(di))
|
||||
.collect::<Vec<_>>();
|
||||
if !added_dial_info.is_empty() {
|
||||
info!(
|
||||
veilid_log!(rti info
|
||||
"[LocalNetwork] added dial info:\n{}",
|
||||
indent_all_string(&added_dial_info.to_multiline_string())
|
||||
.strip_trailing_newline()
|
||||
@ -182,44 +182,44 @@ impl<'a> RoutingDomainEditorCommonTrait for RoutingDomainEditorLocalNetwork<'a>
|
||||
if let Some(nrn) = new_relay_node {
|
||||
if let Some(orn) = old_relay_node {
|
||||
if !nrn.same_entry(&orn) {
|
||||
info!("[LocalNetwork] change relay: {} -> {}", orn, nrn);
|
||||
veilid_log!(rti info "[LocalNetwork] change relay: {} -> {}", orn, nrn);
|
||||
peer_info_changed = true;
|
||||
}
|
||||
} else {
|
||||
info!("[LocalNetwork] set relay: {}", nrn);
|
||||
veilid_log!(rti info "[LocalNetwork] set relay: {}", nrn);
|
||||
peer_info_changed = true;
|
||||
}
|
||||
}
|
||||
if old_outbound_protocols != new_outbound_protocols {
|
||||
info!(
|
||||
veilid_log!(rti info
|
||||
"[LocalNetwork] changed network: outbound {:?}->{:?}",
|
||||
old_outbound_protocols, new_outbound_protocols
|
||||
);
|
||||
peer_info_changed = true;
|
||||
}
|
||||
if old_inbound_protocols != new_inbound_protocols {
|
||||
info!(
|
||||
veilid_log!(rti info
|
||||
"[LocalNetwork] changed network: inbound {:?}->{:?}",
|
||||
old_inbound_protocols, new_inbound_protocols,
|
||||
old_inbound_protocols, new_inbound_protocols
|
||||
);
|
||||
peer_info_changed = true;
|
||||
}
|
||||
if old_address_types != new_address_types {
|
||||
info!(
|
||||
veilid_log!(rti info
|
||||
"[LocalNetwork] changed network: address types {:?}->{:?}",
|
||||
old_address_types, new_address_types,
|
||||
old_address_types, new_address_types
|
||||
);
|
||||
peer_info_changed = true;
|
||||
}
|
||||
if old_capabilities != new_capabilities {
|
||||
info!(
|
||||
veilid_log!(rti info
|
||||
"[PublicInternet] changed network: capabilities {:?}->{:?}",
|
||||
old_capabilities, new_capabilities
|
||||
);
|
||||
peer_info_changed = true;
|
||||
}
|
||||
if old_network_class != new_network_class {
|
||||
info!(
|
||||
veilid_log!(rti info
|
||||
"[LocalNetwork] changed network class: {:?}->{:?}",
|
||||
old_network_class, new_network_class
|
||||
);
|
||||
|
@ -4,9 +4,13 @@ pub use editor::*;
|
||||
|
||||
use super::*;
|
||||
|
||||
impl_veilid_log_facility!("rtab");
|
||||
|
||||
/// Local Network routing domain internals
|
||||
#[derive(Debug)]
|
||||
pub struct LocalNetworkRoutingDomainDetail {
|
||||
/// Registry accessor
|
||||
registry: VeilidComponentRegistry,
|
||||
/// The local networks this domain will communicate with
|
||||
local_networks: Vec<(IpAddr, IpAddr)>,
|
||||
/// Common implementation for all routing domains
|
||||
@ -15,9 +19,12 @@ pub struct LocalNetworkRoutingDomainDetail {
|
||||
published_peer_info: Mutex<Option<Arc<PeerInfo>>>,
|
||||
}
|
||||
|
||||
impl Default for LocalNetworkRoutingDomainDetail {
|
||||
fn default() -> Self {
|
||||
impl_veilid_component_registry_accessor!(LocalNetworkRoutingDomainDetail);
|
||||
|
||||
impl LocalNetworkRoutingDomainDetail {
|
||||
pub fn new(registry: VeilidComponentRegistry) -> Self {
|
||||
Self {
|
||||
registry,
|
||||
local_networks: Default::default(),
|
||||
common: RoutingDomainDetailCommon::new(RoutingDomain::LocalNetwork),
|
||||
published_peer_info: Default::default(),
|
||||
@ -121,13 +128,13 @@ impl RoutingDomainDetail for LocalNetworkRoutingDomainDetail {
|
||||
|
||||
if pi.signed_node_info().node_info().network_class() == NetworkClass::Invalid {
|
||||
// If the network class is not yet determined, don't publish
|
||||
log_rtab!(debug "[LocalNetwork] Not publishing peer info with invalid network class");
|
||||
veilid_log!(rti debug "[LocalNetwork] Not publishing peer info with invalid network class");
|
||||
None
|
||||
} else if self.requires_relay().is_some()
|
||||
&& pi.signed_node_info().relay_ids().is_empty()
|
||||
{
|
||||
// If we need a relay and we don't have one, don't publish yet
|
||||
log_rtab!(debug "[LocalNetwork] Not publishing peer info that wants relay until we have a relay");
|
||||
veilid_log!(rti debug "[LocalNetwork] Not publishing peer info that wants relay until we have a relay");
|
||||
None
|
||||
} else {
|
||||
// This peerinfo is fit to publish
|
||||
@ -140,19 +147,19 @@ impl RoutingDomainDetail for LocalNetworkRoutingDomainDetail {
|
||||
if let Some(old_peer_info) = &*ppi_lock {
|
||||
if let Some(new_peer_info) = &opt_new_peer_info {
|
||||
if new_peer_info.equivalent(old_peer_info) {
|
||||
log_rtab!(debug "[LocalNetwork] Not publishing peer info because it is equivalent");
|
||||
veilid_log!(rti debug "[LocalNetwork] Not publishing peer info because it is equivalent");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
} else if opt_new_peer_info.is_none() {
|
||||
log_rtab!(debug "[LocalNetwork] Not publishing peer info because it is still None");
|
||||
veilid_log!(rti debug "[LocalNetwork] Not publishing peer info because it is still None");
|
||||
return false;
|
||||
}
|
||||
|
||||
if opt_new_peer_info.is_some() {
|
||||
log_rtab!(debug "[LocalNetwork] Published new peer info: {}", opt_new_peer_info.as_ref().unwrap());
|
||||
veilid_log!(rti debug "[LocalNetwork] Published new peer info: {}", opt_new_peer_info.as_ref().unwrap());
|
||||
} else {
|
||||
log_rtab!(debug "[LocalNetwork] Unpublishing because current peer info is invalid");
|
||||
veilid_log!(rti debug "[LocalNetwork] Unpublishing because current peer info is invalid");
|
||||
}
|
||||
*ppi_lock = opt_new_peer_info.clone();
|
||||
|
||||
@ -163,7 +170,7 @@ impl RoutingDomainDetail for LocalNetworkRoutingDomainDetail {
|
||||
routing_domain: RoutingDomain::LocalNetwork,
|
||||
opt_peer_info,
|
||||
}) {
|
||||
log_rtab!(debug "Failed to post event: {}", e);
|
||||
veilid_log!(rti debug "Failed to post event: {}", e);
|
||||
}
|
||||
|
||||
true
|
||||
@ -171,7 +178,7 @@ impl RoutingDomainDetail for LocalNetworkRoutingDomainDetail {
|
||||
|
||||
fn unpublish_peer_info(&self) {
|
||||
let mut ppi_lock = self.published_peer_info.lock();
|
||||
log_rtab!(debug "[LocalNetwork] Unpublished peer info");
|
||||
veilid_log!(self debug "[LocalNetwork] Unpublished peer info");
|
||||
*ppi_lock = None;
|
||||
}
|
||||
|
||||
@ -180,11 +187,11 @@ impl RoutingDomainDetail for LocalNetworkRoutingDomainDetail {
|
||||
let can_contain_address = self.can_contain_address(address);
|
||||
|
||||
if !can_contain_address {
|
||||
log_network_result!(debug "[LocalNetwork] can not add dial info to this routing domain: {:?}", dial_info);
|
||||
veilid_log!(self debug "[LocalNetwork] can not add dial info to this routing domain: {:?}", dial_info);
|
||||
return false;
|
||||
}
|
||||
if !dial_info.is_valid() {
|
||||
log_rtab!(debug
|
||||
veilid_log!(self debug
|
||||
"shouldn't be registering invalid addresses: {:?}",
|
||||
dial_info
|
||||
);
|
||||
|
@ -343,7 +343,7 @@ impl RoutingDomainDetailCommon {
|
||||
match relay_sni {
|
||||
SignedNodeInfo::Direct(d) => Some((relay_ids, d)),
|
||||
SignedNodeInfo::Relayed(_) => {
|
||||
warn!("relay node should not have a relay itself! if this happens, a relay updated its signed node info and became a relay, which should cause the relay to be dropped");
|
||||
veilid_log!(rti warn "relay node should not have a relay itself! if this happens, a relay updated its signed node info and became a relay, which should cause the relay to be dropped");
|
||||
None
|
||||
}
|
||||
}
|
||||
|
@ -169,7 +169,7 @@ impl<'a> RoutingDomainEditorCommonTrait for RoutingDomainEditorPublicInternet<'a
|
||||
.filter(|di| !new_dial_info_details.contains(di))
|
||||
.collect::<Vec<_>>();
|
||||
if !removed_dial_info.is_empty() {
|
||||
info!(
|
||||
veilid_log!(rti info
|
||||
"[PublicInternet] removed dial info:\n{}",
|
||||
indent_all_string(&removed_dial_info.to_multiline_string())
|
||||
.strip_trailing_newline()
|
||||
@ -181,7 +181,7 @@ impl<'a> RoutingDomainEditorCommonTrait for RoutingDomainEditorPublicInternet<'a
|
||||
.filter(|di| !old_dial_info_details.contains(di))
|
||||
.collect::<Vec<_>>();
|
||||
if !added_dial_info.is_empty() {
|
||||
info!(
|
||||
veilid_log!(rti info
|
||||
"[PublicInternet] added dial info:\n{}",
|
||||
indent_all_string(&added_dial_info.to_multiline_string())
|
||||
.strip_trailing_newline()
|
||||
@ -191,44 +191,44 @@ impl<'a> RoutingDomainEditorCommonTrait for RoutingDomainEditorPublicInternet<'a
|
||||
if let Some(nrn) = new_relay_node {
|
||||
if let Some(orn) = old_relay_node {
|
||||
if !nrn.same_entry(&orn) {
|
||||
info!("[PublicInternet] change relay: {} -> {}", orn, nrn);
|
||||
veilid_log!(rti info "[PublicInternet] change relay: {} -> {}", orn, nrn);
|
||||
peer_info_changed = true;
|
||||
}
|
||||
} else {
|
||||
info!("[PublicInternet] set relay: {}", nrn);
|
||||
veilid_log!(rti info "[PublicInternet] set relay: {}", nrn);
|
||||
peer_info_changed = true;
|
||||
}
|
||||
}
|
||||
if old_outbound_protocols != new_outbound_protocols {
|
||||
info!(
|
||||
veilid_log!(rti info
|
||||
"[PublicInternet] changed network: outbound {:?}->{:?}",
|
||||
old_outbound_protocols, new_outbound_protocols
|
||||
);
|
||||
peer_info_changed = true;
|
||||
}
|
||||
if old_inbound_protocols != new_inbound_protocols {
|
||||
info!(
|
||||
veilid_log!(rti info
|
||||
"[PublicInternet] changed network: inbound {:?}->{:?}",
|
||||
old_inbound_protocols, new_inbound_protocols,
|
||||
old_inbound_protocols, new_inbound_protocols
|
||||
);
|
||||
peer_info_changed = true;
|
||||
}
|
||||
if old_address_types != new_address_types {
|
||||
info!(
|
||||
veilid_log!(rti info
|
||||
"[PublicInternet] changed network: address types {:?}->{:?}",
|
||||
old_address_types, new_address_types,
|
||||
old_address_types, new_address_types
|
||||
);
|
||||
peer_info_changed = true;
|
||||
}
|
||||
if old_capabilities != new_capabilities {
|
||||
info!(
|
||||
veilid_log!(rti info
|
||||
"[PublicInternet] changed network: capabilities {:?}->{:?}",
|
||||
old_capabilities, new_capabilities
|
||||
);
|
||||
peer_info_changed = true;
|
||||
}
|
||||
if old_network_class != new_network_class {
|
||||
info!(
|
||||
veilid_log!(rti info
|
||||
"[PublicInternet] changed network class: {:?}->{:?}",
|
||||
old_network_class, new_network_class
|
||||
);
|
||||
|
@ -4,23 +4,20 @@ pub use editor::*;
|
||||
|
||||
use super::*;
|
||||
|
||||
impl_veilid_log_facility!("rtab");
|
||||
|
||||
/// Public Internet routing domain internals
|
||||
#[derive(Debug)]
|
||||
pub struct PublicInternetRoutingDomainDetail {
|
||||
/// Registry accessor
|
||||
registry: VeilidComponentRegistry,
|
||||
/// Common implementation for all routing domains
|
||||
common: RoutingDomainDetailCommon,
|
||||
/// Published peer info for this routing domain
|
||||
published_peer_info: Mutex<Option<Arc<PeerInfo>>>,
|
||||
}
|
||||
|
||||
impl Default for PublicInternetRoutingDomainDetail {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
common: RoutingDomainDetailCommon::new(RoutingDomain::PublicInternet),
|
||||
published_peer_info: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
impl_veilid_component_registry_accessor!(PublicInternetRoutingDomainDetail);
|
||||
|
||||
impl RoutingDomainDetailCommonAccessors for PublicInternetRoutingDomainDetail {
|
||||
fn common(&self) -> &RoutingDomainDetailCommon {
|
||||
@ -31,6 +28,16 @@ impl RoutingDomainDetailCommonAccessors for PublicInternetRoutingDomainDetail {
|
||||
}
|
||||
}
|
||||
|
||||
impl PublicInternetRoutingDomainDetail {
|
||||
pub fn new(registry: VeilidComponentRegistry) -> Self {
|
||||
Self {
|
||||
registry,
|
||||
common: RoutingDomainDetailCommon::new(RoutingDomain::PublicInternet),
|
||||
published_peer_info: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl RoutingDomainDetail for PublicInternetRoutingDomainDetail {
|
||||
fn routing_domain(&self) -> RoutingDomain {
|
||||
RoutingDomain::PublicInternet
|
||||
@ -99,13 +106,13 @@ impl RoutingDomainDetail for PublicInternetRoutingDomainDetail {
|
||||
|
||||
if pi.signed_node_info().node_info().network_class() == NetworkClass::Invalid {
|
||||
// If the network class is not yet determined, don't publish
|
||||
log_rtab!(debug "[PublicInternet] Not publishing peer info with invalid network class");
|
||||
veilid_log!(self debug "[PublicInternet] Not publishing peer info with invalid network class");
|
||||
None
|
||||
} else if self.requires_relay().is_some()
|
||||
&& pi.signed_node_info().relay_ids().is_empty()
|
||||
{
|
||||
// If we need a relay and we don't have one, don't publish yet
|
||||
log_rtab!(debug "[PublicInternet] Not publishing peer info that wants relay until we have a relay");
|
||||
veilid_log!(self debug "[PublicInternet] Not publishing peer info that wants relay until we have a relay");
|
||||
None
|
||||
} else {
|
||||
// This peerinfo is fit to publish
|
||||
@ -118,19 +125,19 @@ impl RoutingDomainDetail for PublicInternetRoutingDomainDetail {
|
||||
if let Some(old_peer_info) = &*ppi_lock {
|
||||
if let Some(new_peer_info) = &opt_new_peer_info {
|
||||
if new_peer_info.equivalent(old_peer_info) {
|
||||
log_rtab!(debug "[PublicInternet] Not publishing peer info because it is equivalent");
|
||||
veilid_log!(self debug "[PublicInternet] Not publishing peer info because it is equivalent");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
} else if opt_new_peer_info.is_none() {
|
||||
log_rtab!(debug "[PublicInternet] Not publishing peer info because it is still None");
|
||||
veilid_log!(self debug "[PublicInternet] Not publishing peer info because it is still None");
|
||||
return false;
|
||||
}
|
||||
|
||||
if opt_new_peer_info.is_some() {
|
||||
log_rtab!(debug "[PublicInternet] Published new peer info: {}", opt_new_peer_info.as_ref().unwrap());
|
||||
veilid_log!(self debug "[PublicInternet] Published new peer info: {}", opt_new_peer_info.as_ref().unwrap());
|
||||
} else {
|
||||
log_rtab!(debug "[PublicInternet] Unpublishing because current peer info is invalid");
|
||||
veilid_log!(self debug "[PublicInternet] Unpublishing because current peer info is invalid");
|
||||
}
|
||||
*ppi_lock = opt_new_peer_info.clone();
|
||||
|
||||
@ -141,7 +148,7 @@ impl RoutingDomainDetail for PublicInternetRoutingDomainDetail {
|
||||
routing_domain: RoutingDomain::PublicInternet,
|
||||
opt_peer_info,
|
||||
}) {
|
||||
log_rtab!(debug "Failed to post event: {}", e);
|
||||
veilid_log!(self debug "Failed to post event: {}", e);
|
||||
}
|
||||
|
||||
true
|
||||
@ -149,7 +156,7 @@ impl RoutingDomainDetail for PublicInternetRoutingDomainDetail {
|
||||
|
||||
fn unpublish_peer_info(&self) {
|
||||
let mut ppi_lock = self.published_peer_info.lock();
|
||||
log_rtab!(debug "[PublicInternet] Unpublished peer info");
|
||||
veilid_log!(self debug "[PublicInternet] Unpublished peer info");
|
||||
*ppi_lock = None;
|
||||
}
|
||||
|
||||
@ -158,11 +165,11 @@ impl RoutingDomainDetail for PublicInternetRoutingDomainDetail {
|
||||
let can_contain_address = self.can_contain_address(address);
|
||||
|
||||
if !can_contain_address {
|
||||
log_network_result!(debug "[PublicInternet] can not add dial info to this routing domain: {:?}", dial_info);
|
||||
veilid_log!(self debug "[PublicInternet] can not add dial info to this routing domain: {:?}", dial_info);
|
||||
return false;
|
||||
}
|
||||
if !dial_info.is_valid() {
|
||||
log_rtab!(debug
|
||||
veilid_log!(self debug
|
||||
"shouldn't be registering invalid addresses: {:?}",
|
||||
dial_info
|
||||
);
|
||||
|
@ -3,6 +3,8 @@ use super::*;
|
||||
use futures_util::stream::{FuturesUnordered, StreamExt};
|
||||
use stop_token::future::FutureExt as StopFutureExt;
|
||||
|
||||
impl_veilid_log_facility!("rtab");
|
||||
|
||||
pub const BOOTSTRAP_TXT_VERSION_0: u8 = 0;
|
||||
pub const MIN_BOOTSTRAP_PEERS: usize = 4;
|
||||
|
||||
@ -95,7 +97,7 @@ impl RoutingTable {
|
||||
let dial_infos = match DialInfo::try_vec_from_short(rec, hostname_str) {
|
||||
Ok(dis) => dis,
|
||||
Err(e) => {
|
||||
warn!("Couldn't resolve bootstrap node dial info {}: {}", rec, e);
|
||||
veilid_log!(self warn "Couldn't resolve bootstrap node dial info {}: {}", rec, e);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
@ -128,7 +130,7 @@ impl RoutingTable {
|
||||
let records = match intf::txt_lookup(&bh).await {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
warn!(
|
||||
veilid_log!(self warn
|
||||
"Network may be down. No bootstrap resolution for '{}': {}",
|
||||
bh, e
|
||||
);
|
||||
@ -162,7 +164,7 @@ impl RoutingTable {
|
||||
// look up bootstrap node txt records
|
||||
let bsnirecords = match intf::txt_lookup(&bsname).await {
|
||||
Err(e) => {
|
||||
warn!(
|
||||
veilid_log!(self warn
|
||||
"Network may be down. Bootstrap node txt lookup failed for {}: {}",
|
||||
bsname, e
|
||||
);
|
||||
@ -184,7 +186,7 @@ impl RoutingTable {
|
||||
let txt_version: u8 = match records[0].parse::<u8>() {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
log_rtab!(warn
|
||||
veilid_log!(self warn
|
||||
"invalid txt_version specified in bootstrap node txt record: {}",
|
||||
e
|
||||
);
|
||||
@ -195,7 +197,7 @@ impl RoutingTable {
|
||||
BOOTSTRAP_TXT_VERSION_0 => {
|
||||
match self.process_bootstrap_records_v0(records).await {
|
||||
Err(e) => {
|
||||
log_rtab!(error
|
||||
veilid_log!(self error
|
||||
"couldn't process v0 bootstrap records from {}: {}",
|
||||
bsname, e
|
||||
);
|
||||
@ -209,7 +211,7 @@ impl RoutingTable {
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
log_rtab!(warn "unsupported bootstrap txt record version");
|
||||
veilid_log!(self warn "unsupported bootstrap txt record version");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
@ -260,7 +262,7 @@ impl RoutingTable {
|
||||
pi: Arc<PeerInfo>,
|
||||
unord: &FuturesUnordered<SendPinBoxFuture<()>>,
|
||||
) {
|
||||
log_rtab!(
|
||||
veilid_log!(self trace
|
||||
"--- bootstrapping {} with {:?}",
|
||||
pi.node_ids(),
|
||||
pi.signed_node_info().node_info().dial_info_detail_list()
|
||||
@ -271,7 +273,7 @@ impl RoutingTable {
|
||||
let nr = match self.register_node_with_peer_info(pi, true) {
|
||||
Ok(nr) => nr,
|
||||
Err(e) => {
|
||||
log_rtab!(error "failed to register bootstrap peer info: {}", e);
|
||||
veilid_log!(self error "failed to register bootstrap peer info: {}", e);
|
||||
return;
|
||||
}
|
||||
};
|
||||
@ -291,14 +293,14 @@ impl RoutingTable {
|
||||
{
|
||||
Ok(NodeContactMethod::Direct(v)) => v,
|
||||
Ok(v) => {
|
||||
log_rtab!(debug "invalid contact method for bootstrap, ignoring peer: {:?}", v);
|
||||
veilid_log!(nr debug "invalid contact method for bootstrap, ignoring peer: {:?}", v);
|
||||
// let _ =
|
||||
// network_manager
|
||||
// .get_node_contact_method(nr.clone());
|
||||
return;
|
||||
}
|
||||
Err(e) => {
|
||||
log_rtab!(warn "unable to bootstrap: {}", e);
|
||||
veilid_log!(nr warn "unable to bootstrap: {}", e);
|
||||
return;
|
||||
}
|
||||
};
|
||||
@ -309,13 +311,13 @@ impl RoutingTable {
|
||||
|
||||
// Ensure we got the signed peer info
|
||||
if !nr.signed_node_info_has_valid_signature(routing_domain) {
|
||||
log_rtab!(warn "bootstrap server is not responding");
|
||||
log_rtab!(debug "bootstrap server is not responding for dialinfo: {}", bsdi);
|
||||
veilid_log!(nr warn "bootstrap server is not responding");
|
||||
veilid_log!(nr debug "bootstrap server is not responding for dialinfo: {}", bsdi);
|
||||
|
||||
// Try a different dialinfo next time
|
||||
network_manager.address_filter().set_dial_info_failed(bsdi);
|
||||
} else {
|
||||
info!("bootstrap of {} successful via {}", crypto_kind, nr);
|
||||
veilid_log!(nr info "bootstrap of {} successful via {}", crypto_kind, nr);
|
||||
|
||||
// otherwise this bootstrap is valid, lets ask it to find ourselves now
|
||||
routing_table.reverse_find_node(crypto_kind, nr, true, vec![]).await
|
||||
@ -332,12 +334,12 @@ impl RoutingTable {
|
||||
peers: Vec<Arc<PeerInfo>>,
|
||||
stop_token: StopToken,
|
||||
) -> EyreResult<()> {
|
||||
log_rtab!(debug " bootstrap peers: {:?}", &peers);
|
||||
veilid_log!(self debug " bootstrap peers: {:?}", &peers);
|
||||
|
||||
// Get crypto kinds to bootstrap
|
||||
let crypto_kinds = self.get_bootstrap_crypto_kinds();
|
||||
|
||||
log_rtab!(debug " bootstrap crypto kinds: {:?}", &crypto_kinds);
|
||||
veilid_log!(self debug " bootstrap crypto kinds: {:?}", &crypto_kinds);
|
||||
|
||||
// Run all bootstrap operations concurrently
|
||||
let mut unord = FuturesUnordered::<SendPinBoxFuture<()>>::new();
|
||||
@ -381,7 +383,7 @@ impl RoutingTable {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
log_rtab!(debug "--- bootstrap_task");
|
||||
veilid_log!(self debug "--- bootstrap_task");
|
||||
|
||||
// See if we are specifying a direct dialinfo for bootstrap, if so use the direct mechanism
|
||||
let mut bootstrap_dialinfos = Vec::<DialInfo>::new();
|
||||
@ -400,7 +402,7 @@ impl RoutingTable {
|
||||
|
||||
let mut peer_map = HashMap::<TypedKeyGroup, Arc<PeerInfo>>::new();
|
||||
for bootstrap_di in bootstrap_dialinfos {
|
||||
log_rtab!(debug "direct bootstrap with: {}", bootstrap_di);
|
||||
veilid_log!(self debug "direct bootstrap with: {}", bootstrap_di);
|
||||
let peers = network_manager.boot_request(bootstrap_di).await?;
|
||||
for peer in peers {
|
||||
if !peer_map.contains_key(peer.node_ids()) {
|
||||
|
@ -9,6 +9,8 @@ pub mod update_statistics;
|
||||
|
||||
use super::*;
|
||||
|
||||
impl_veilid_log_facility!("rtab");
|
||||
|
||||
impl RoutingTable {
|
||||
pub fn setup_tasks(&self) {
|
||||
// Set rolling transfers tick task
|
||||
@ -203,59 +205,59 @@ impl RoutingTable {
|
||||
|
||||
pub async fn cancel_tasks(&self) {
|
||||
// Cancel all tasks being ticked
|
||||
log_rtab!(debug "stopping rolling transfers task");
|
||||
veilid_log!(self debug "stopping rolling transfers task");
|
||||
if let Err(e) = self.rolling_transfers_task.stop().await {
|
||||
error!("rolling_transfers_task not stopped: {}", e);
|
||||
veilid_log!(self warn "rolling_transfers_task not stopped: {}", e);
|
||||
}
|
||||
log_rtab!(debug "stopping update state stats task");
|
||||
veilid_log!(self debug "stopping update state stats task");
|
||||
if let Err(e) = self.update_state_stats_task.stop().await {
|
||||
error!("update_state_stats_task not stopped: {}", e);
|
||||
veilid_log!(self warn "update_state_stats_task not stopped: {}", e);
|
||||
}
|
||||
log_rtab!(debug "stopping rolling answers task");
|
||||
veilid_log!(self debug "stopping rolling answers task");
|
||||
if let Err(e) = self.rolling_answers_task.stop().await {
|
||||
error!("rolling_answers_task not stopped: {}", e);
|
||||
veilid_log!(self warn "rolling_answers_task not stopped: {}", e);
|
||||
}
|
||||
log_rtab!(debug "stopping kick buckets task");
|
||||
veilid_log!(self debug "stopping kick buckets task");
|
||||
if let Err(e) = self.kick_buckets_task.stop().await {
|
||||
error!("kick_buckets_task not stopped: {}", e);
|
||||
veilid_log!(self warn "kick_buckets_task not stopped: {}", e);
|
||||
}
|
||||
log_rtab!(debug "stopping bootstrap task");
|
||||
veilid_log!(self debug "stopping bootstrap task");
|
||||
if let Err(e) = self.bootstrap_task.stop().await {
|
||||
error!("bootstrap_task not stopped: {}", e);
|
||||
veilid_log!(self warn "bootstrap_task not stopped: {}", e);
|
||||
}
|
||||
log_rtab!(debug "stopping peer minimum refresh task");
|
||||
veilid_log!(self debug "stopping peer minimum refresh task");
|
||||
if let Err(e) = self.peer_minimum_refresh_task.stop().await {
|
||||
error!("peer_minimum_refresh_task not stopped: {}", e);
|
||||
veilid_log!(self warn "peer_minimum_refresh_task not stopped: {}", e);
|
||||
}
|
||||
|
||||
log_rtab!(debug "stopping ping_validator tasks");
|
||||
veilid_log!(self debug "stopping ping_validator tasks");
|
||||
if let Err(e) = self.ping_validator_public_internet_task.stop().await {
|
||||
error!("ping_validator_public_internet_task not stopped: {}", e);
|
||||
veilid_log!(self warn "ping_validator_public_internet_task not stopped: {}", e);
|
||||
}
|
||||
if let Err(e) = self.ping_validator_local_network_task.stop().await {
|
||||
error!("ping_validator_local_network_task not stopped: {}", e);
|
||||
veilid_log!(self warn "ping_validator_local_network_task not stopped: {}", e);
|
||||
}
|
||||
if let Err(e) = self.ping_validator_public_internet_relay_task.stop().await {
|
||||
error!(
|
||||
veilid_log!(self warn
|
||||
"ping_validator_public_internet_relay_task not stopped: {}",
|
||||
e
|
||||
);
|
||||
}
|
||||
if let Err(e) = self.ping_validator_active_watch_task.stop().await {
|
||||
error!("ping_validator_active_watch_task not stopped: {}", e);
|
||||
veilid_log!(self warn "ping_validator_active_watch_task not stopped: {}", e);
|
||||
}
|
||||
|
||||
log_rtab!(debug "stopping relay management task");
|
||||
veilid_log!(self debug "stopping relay management task");
|
||||
if let Err(e) = self.relay_management_task.stop().await {
|
||||
warn!("relay_management_task not stopped: {}", e);
|
||||
veilid_log!(self warn "relay_management_task not stopped: {}", e);
|
||||
}
|
||||
log_rtab!(debug "stopping private route management task");
|
||||
veilid_log!(self debug "stopping private route management task");
|
||||
if let Err(e) = self.private_route_management_task.stop().await {
|
||||
warn!("private_route_management_task not stopped: {}", e);
|
||||
veilid_log!(self warn "private_route_management_task not stopped: {}", e);
|
||||
}
|
||||
log_rtab!(debug "stopping closest peers refresh task");
|
||||
veilid_log!(self debug "stopping closest peers refresh task");
|
||||
if let Err(e) = self.closest_peers_refresh_task.stop().await {
|
||||
warn!("closest_peers_refresh_task not stopped: {}", e);
|
||||
veilid_log!(self warn "closest_peers_refresh_task not stopped: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,9 @@
|
||||
use super::*;
|
||||
|
||||
use futures_util::FutureExt;
|
||||
|
||||
impl_veilid_log_facility!("rtab");
|
||||
|
||||
/// Keepalive pings are done occasionally to ensure holepunched public dialinfo
|
||||
/// remains valid, as well as to make sure we remain in any relay node's routing table
|
||||
const RELAY_KEEPALIVE_PING_INTERVAL_SECS: u32 = 10;
|
||||
@ -10,8 +14,6 @@ const ACTIVE_WATCH_KEEPALIVE_PING_INTERVAL_SECS: u32 = 10;
|
||||
/// Ping queue processing depth per validator
|
||||
const MAX_PARALLEL_PINGS: usize = 8;
|
||||
|
||||
use futures_util::FutureExt;
|
||||
|
||||
type PingValidatorFuture = SendPinBoxFuture<Result<(), RPCError>>;
|
||||
|
||||
impl RoutingTable {
|
||||
@ -181,7 +183,7 @@ impl RoutingTable {
|
||||
for relay_nr_filtered in relay_noderefs {
|
||||
futurequeue.push_back(
|
||||
async move {
|
||||
log_rtab!("--> PublicInternet Relay ping to {:?}", relay_nr_filtered);
|
||||
veilid_log!(relay_nr_filtered trace "--> PublicInternet Relay ping to {:?}", relay_nr_filtered);
|
||||
let rpc_processor = relay_nr_filtered.rpc_processor();
|
||||
let _ = rpc_processor
|
||||
.rpc_call_status(Destination::direct(relay_nr_filtered))
|
||||
@ -227,7 +229,7 @@ impl RoutingTable {
|
||||
let registry = self.registry();
|
||||
futurequeue.push_back(
|
||||
async move {
|
||||
log_rtab!("--> Watch Keepalive ping to {:?}", watch_destination);
|
||||
veilid_log!(registry trace "--> Watch Keepalive ping to {:?}", watch_destination);
|
||||
let rpc_processor = registry.rpc_processor();
|
||||
let _ = rpc_processor.rpc_call_status(watch_destination).await?;
|
||||
Ok(())
|
||||
@ -256,7 +258,7 @@ impl RoutingTable {
|
||||
futurequeue.push_back(
|
||||
async move {
|
||||
#[cfg(feature = "verbose-tracing")]
|
||||
log_rtab!(debug "--> PublicInternet Validator ping to {:?}", nr);
|
||||
veilid_log!(self debug "--> PublicInternet Validator ping to {:?}", nr);
|
||||
let rpc_processor = nr.rpc_processor();
|
||||
let _ = rpc_processor
|
||||
.rpc_call_status(Destination::direct(nr))
|
||||
@ -289,7 +291,7 @@ impl RoutingTable {
|
||||
futurequeue.push_back(
|
||||
async move {
|
||||
#[cfg(feature = "verbose-tracing")]
|
||||
log_rtab!(debug "--> LocalNetwork Validator ping to {:?}", nr);
|
||||
veilid_log!(self debug "--> LocalNetwork Validator ping to {:?}", nr);
|
||||
let rpc_processor = nr.rpc_processor();
|
||||
let _ = rpc_processor
|
||||
.rpc_call_status(Destination::direct(nr))
|
||||
@ -315,21 +317,21 @@ impl RoutingTable {
|
||||
if count == 0 {
|
||||
return;
|
||||
}
|
||||
log_rtab!(debug "[{}] Ping validation queue: {} remaining", name, count);
|
||||
veilid_log!(self debug "[{}] Ping validation queue: {} remaining", name, count);
|
||||
|
||||
let atomic_count = AtomicUsize::new(count);
|
||||
process_batched_future_queue(future_queue, MAX_PARALLEL_PINGS, stop_token, |res| async {
|
||||
if let Err(e) = res {
|
||||
log_rtab!(error "[{}] Error performing status ping: {}", name, e);
|
||||
veilid_log!(self error "[{}] Error performing status ping: {}", name, e);
|
||||
}
|
||||
let remaining = atomic_count.fetch_sub(1, Ordering::AcqRel) - 1;
|
||||
if remaining > 0 {
|
||||
log_rtab!(debug "[{}] Ping validation queue: {} remaining", name, remaining);
|
||||
veilid_log!(self debug "[{}] Ping validation queue: {} remaining", name, remaining);
|
||||
}
|
||||
})
|
||||
.await;
|
||||
let done_ts = Timestamp::now();
|
||||
log_rtab!(debug
|
||||
veilid_log!(self debug
|
||||
"[{}] Ping validation queue finished {} pings in {}",
|
||||
name,
|
||||
count,
|
||||
|
@ -4,6 +4,8 @@ use futures_util::stream::{FuturesUnordered, StreamExt};
|
||||
use futures_util::FutureExt;
|
||||
use stop_token::future::FutureExt as _;
|
||||
|
||||
impl_veilid_log_facility!("rtab");
|
||||
|
||||
const BACKGROUND_SAFETY_ROUTE_COUNT: usize = 2;
|
||||
|
||||
impl RoutingTable {
|
||||
@ -95,7 +97,7 @@ impl RoutingTable {
|
||||
|
||||
// Process dead routes
|
||||
for r in expired_routes {
|
||||
log_rtab!(debug "Expired route: {}", r);
|
||||
veilid_log!(self debug "Expired route: {}", r);
|
||||
self.route_spec_store().release_route(r);
|
||||
}
|
||||
|
||||
@ -113,7 +115,7 @@ impl RoutingTable {
|
||||
if routes_needing_testing.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
log_rtab!("Testing routes: {:?}", routes_needing_testing);
|
||||
veilid_log!(self trace "Testing routes: {:?}", routes_needing_testing);
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
struct TestRouteContext {
|
||||
@ -134,7 +136,7 @@ impl RoutingTable {
|
||||
Ok(None) => true,
|
||||
// Test failure
|
||||
Err(e) => {
|
||||
log_rtab!(error "Test route failed: {}", e);
|
||||
veilid_log!(self error "Test route failed: {}", e);
|
||||
return;
|
||||
}
|
||||
};
|
||||
@ -157,7 +159,7 @@ impl RoutingTable {
|
||||
// Process failed routes
|
||||
let ctx = Arc::try_unwrap(ctx).unwrap().into_inner();
|
||||
for r in ctx.dead_routes {
|
||||
log_rtab!(debug "Dead route failed to test: {}", r);
|
||||
veilid_log!(self debug "Dead route failed to test: {}", r);
|
||||
self.route_spec_store().release_route(r);
|
||||
}
|
||||
|
||||
@ -220,7 +222,7 @@ impl RoutingTable {
|
||||
true,
|
||||
) {
|
||||
Err(VeilidAPIError::TryAgain { message }) => {
|
||||
log_rtab!(debug "Route allocation unavailable: {}", message);
|
||||
veilid_log!(self debug "Route allocation unavailable: {}", message);
|
||||
}
|
||||
Err(e) => return Err(e.into()),
|
||||
Ok(v) => {
|
||||
|
@ -1,5 +1,7 @@
|
||||
use super::*;
|
||||
|
||||
impl_veilid_log_facility!("rtab");
|
||||
|
||||
impl RoutingTable {
|
||||
// Check if a relay is desired or not
|
||||
#[instrument(level = "trace", skip_all)]
|
||||
@ -64,13 +66,13 @@ impl RoutingTable {
|
||||
state_reason,
|
||||
BucketEntryStateReason::Dead(_) | BucketEntryStateReason::Punished(_)
|
||||
) {
|
||||
log_rtab!(debug "Relay node is now {:?}, dropping relay {}", state_reason, relay_node);
|
||||
veilid_log!(self debug "Relay node is now {:?}, dropping relay {}", state_reason, relay_node);
|
||||
editor.set_relay_node(None);
|
||||
false
|
||||
}
|
||||
// Relay node no longer can relay
|
||||
else if relay_node.operate(|_rti, e| !relay_node_filter(e)) {
|
||||
log_rtab!(debug
|
||||
veilid_log!(self debug
|
||||
"Relay node can no longer relay, dropping relay {}",
|
||||
relay_node
|
||||
);
|
||||
@ -79,7 +81,7 @@ impl RoutingTable {
|
||||
}
|
||||
// Relay node is no longer wanted
|
||||
else if relay_desired.is_none() {
|
||||
log_rtab!(debug
|
||||
veilid_log!(self debug
|
||||
"Relay node no longer desired, dropping relay {}",
|
||||
relay_node
|
||||
);
|
||||
@ -107,16 +109,16 @@ impl RoutingTable {
|
||||
// Register new outbound relay
|
||||
match self.register_node_with_peer_info(outbound_relay_peerinfo, false) {
|
||||
Ok(nr) => {
|
||||
log_rtab!(debug "Outbound relay node selected: {}", nr);
|
||||
veilid_log!(self debug "Outbound relay node selected: {}", nr);
|
||||
editor.set_relay_node(Some(nr.unfiltered()));
|
||||
got_outbound_relay = true;
|
||||
}
|
||||
Err(e) => {
|
||||
log_rtab!(error "failed to register node with peer info: {}", e);
|
||||
veilid_log!(self error "failed to register node with peer info: {}", e);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log_rtab!(debug "Outbound relay desired but not available");
|
||||
veilid_log!(self debug "Outbound relay desired but not available");
|
||||
}
|
||||
}
|
||||
if !got_outbound_relay {
|
||||
@ -126,7 +128,7 @@ impl RoutingTable {
|
||||
cur_ts,
|
||||
relay_node_filter,
|
||||
) {
|
||||
log_rtab!(debug "Inbound relay node selected: {}", nr);
|
||||
veilid_log!(self debug "Inbound relay node selected: {}", nr);
|
||||
editor.set_relay_node(Some(nr));
|
||||
}
|
||||
}
|
||||
|
@ -58,6 +58,8 @@ pub use typed_signature::*;
|
||||
|
||||
use super::*;
|
||||
|
||||
impl_veilid_log_facility!("rpc");
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
#[allow(clippy::enum_variant_names)]
|
||||
pub enum QuestionContext {
|
||||
|
@ -173,7 +173,7 @@ impl Destination {
|
||||
if opt_routing_domain.is_none() {
|
||||
// No routing domain for target, no node info
|
||||
// Only a stale connection or no connection exists
|
||||
log_rpc!(debug "No routing domain for node: node={}", node);
|
||||
veilid_log!(node warn "No routing domain for node: node={}", node);
|
||||
};
|
||||
(Some(node.unfiltered()), None, opt_routing_domain)
|
||||
}
|
||||
@ -206,7 +206,7 @@ impl Destination {
|
||||
}
|
||||
if opt_routing_domain.is_none() {
|
||||
// In the case of an unexpected relay, log it and don't pass any sender peer info into an unexpected relay
|
||||
log_rpc!(debug "Unexpected relay used for node: relay={}, node={}", relay, node);
|
||||
veilid_log!(node warn "No routing domain for relay: relay={}, node={}", relay, node);
|
||||
};
|
||||
|
||||
(
|
||||
|
@ -63,6 +63,8 @@ use network_manager::*;
|
||||
use routing_table::*;
|
||||
use storage_manager::*;
|
||||
|
||||
impl_veilid_log_facility!("rpc");
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
|
||||
#[derive(Debug)]
|
||||
@ -203,7 +205,7 @@ impl RPCProcessor {
|
||||
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
pub async fn startup(&self) -> EyreResult<()> {
|
||||
log_rpc!(debug "starting rpc processor startup");
|
||||
veilid_log!(self debug "starting rpc processor startup");
|
||||
|
||||
let guard = self.startup_context.startup_lock.startup()?;
|
||||
{
|
||||
@ -214,7 +216,7 @@ impl RPCProcessor {
|
||||
inner.stop_source = Some(StopSource::new());
|
||||
|
||||
// spin up N workers
|
||||
log_rpc!("Spinning up {} RPC workers", self.concurrency);
|
||||
veilid_log!(self trace "Spinning up {} RPC workers", self.concurrency);
|
||||
for task_n in 0..self.concurrency {
|
||||
let registry = self.registry();
|
||||
let receiver = channel.1.clone();
|
||||
@ -228,14 +230,14 @@ impl RPCProcessor {
|
||||
}
|
||||
guard.success();
|
||||
|
||||
log_rpc!(debug "finished rpc processor startup");
|
||||
veilid_log!(self debug "finished rpc processor startup");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub async fn shutdown(&self) {
|
||||
log_rpc!(debug "starting rpc processor shutdown");
|
||||
veilid_log!(self debug "starting rpc processor shutdown");
|
||||
let guard = self
|
||||
.startup_context
|
||||
.startup_lock
|
||||
@ -254,18 +256,18 @@ impl RPCProcessor {
|
||||
// drop the stop
|
||||
drop(inner.stop_source.take());
|
||||
}
|
||||
log_rpc!(debug "stopping {} rpc worker tasks", unord.len());
|
||||
veilid_log!(self debug "stopping {} rpc worker tasks", unord.len());
|
||||
|
||||
// Wait for them to complete
|
||||
while unord.next().await.is_some() {}
|
||||
|
||||
log_rpc!(debug "resetting rpc processor state");
|
||||
veilid_log!(self debug "resetting rpc processor state");
|
||||
|
||||
// Release the rpc processor
|
||||
*self.inner.lock() = Self::new_inner();
|
||||
|
||||
guard.success();
|
||||
log_rpc!(debug "finished rpc processor shutdown");
|
||||
veilid_log!(self debug "finished rpc processor shutdown");
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
@ -316,7 +318,7 @@ impl RPCProcessor {
|
||||
|
||||
// Sender PeerInfo was specified, update our routing table with it
|
||||
if !self.verify_node_info(routing_domain, peer_info.signed_node_info(), &[]) {
|
||||
log_network_result!(debug "Dropping invalid PeerInfo in {:?} for id {}: {:?}", routing_domain, sender_node_id, peer_info);
|
||||
veilid_log!(self debug target:"network_result", "Dropping invalid PeerInfo in {:?} for id {}: {:?}", routing_domain, sender_node_id, peer_info);
|
||||
// Don't punish for this because in the case of hairpin NAT
|
||||
// you can legally get LocalNetwork PeerInfo when you expect PublicInternet PeerInfo
|
||||
//
|
||||
@ -504,7 +506,7 @@ impl RPCProcessor {
|
||||
.await;
|
||||
match &out {
|
||||
Err(e) => {
|
||||
log_rpc!(debug "RPC Lost (id={} {}): {}", id, debug_string, e);
|
||||
veilid_log!(self debug "RPC Lost (id={} {}): {}", id, debug_string, e);
|
||||
self.record_lost_answer(
|
||||
waitable_reply.send_ts,
|
||||
waitable_reply.node_ref.clone(),
|
||||
@ -514,7 +516,7 @@ impl RPCProcessor {
|
||||
);
|
||||
}
|
||||
Ok(TimeoutOr::Timeout) => {
|
||||
log_rpc!(debug "RPC Lost (id={} {}): Timeout", id, debug_string);
|
||||
veilid_log!(self debug "RPC Lost (id={} {}): Timeout", id, debug_string);
|
||||
self.record_lost_answer(
|
||||
waitable_reply.send_ts,
|
||||
waitable_reply.node_ref.clone(),
|
||||
@ -1017,7 +1019,7 @@ impl RPCProcessor {
|
||||
// If we sent to a private route without a safety route
|
||||
// We need to mark our own node info as having been seen so we can optimize sending it
|
||||
if let Err(e) = rss.mark_remote_private_route_seen_our_node_info(rpr_pubkey, recv_ts) {
|
||||
log_rpc!(error "private route missing: {}", e);
|
||||
veilid_log!(self error "private route missing: {}", e);
|
||||
}
|
||||
|
||||
// We can't record local route latency if a remote private route was used because
|
||||
@ -1101,7 +1103,7 @@ impl RPCProcessor {
|
||||
let op_id = operation.op_id();
|
||||
|
||||
// Log rpc send
|
||||
debug!(target: "rpc_message", dir = "send", kind = "question", op_id = op_id.as_u64(), desc = operation.kind().desc(), ?dest);
|
||||
veilid_log!(self debug target: "rpc_message", dir = "send", kind = "question", op_id = op_id.as_u64(), desc = operation.kind().desc(), ?dest);
|
||||
|
||||
// Produce rendered operation
|
||||
let RenderedOperation {
|
||||
@ -1145,7 +1147,7 @@ impl RPCProcessor {
|
||||
);
|
||||
RPCError::network(e)
|
||||
})?;
|
||||
let send_data_method = network_result_value_or_log!( res => [ format!(": node_ref={}, destination_node_ref={}, message.len={}", node_ref, destination_node_ref, message_len) ] {
|
||||
let send_data_method = network_result_value_or_log!(self res => [ format!(": node_ref={}, destination_node_ref={}, message.len={}", node_ref, destination_node_ref, message_len) ] {
|
||||
// If we couldn't send we're still cleaning up
|
||||
self.record_send_failure(RPCKind::Question, send_ts, node_ref.unfiltered(), safety_route, remote_private_route);
|
||||
network_result_raise!(res);
|
||||
@ -1193,7 +1195,7 @@ impl RPCProcessor {
|
||||
let operation = RPCOperation::new_statement(statement, spi);
|
||||
|
||||
// Log rpc send
|
||||
debug!(target: "rpc_message", dir = "send", kind = "statement", op_id = operation.op_id().as_u64(), desc = operation.kind().desc(), ?dest);
|
||||
veilid_log!(self debug target: "rpc_message", dir = "send", kind = "statement", op_id = operation.op_id().as_u64(), desc = operation.kind().desc(), ?dest);
|
||||
|
||||
// Produce rendered operation
|
||||
let RenderedOperation {
|
||||
@ -1230,7 +1232,7 @@ impl RPCProcessor {
|
||||
);
|
||||
RPCError::network(e)
|
||||
})?;
|
||||
let _send_data_method = network_result_value_or_log!( res => [ format!(": node_ref={}, destination_node_ref={}, message.len={}", node_ref, destination_node_ref, message_len) ] {
|
||||
let _send_data_method = network_result_value_or_log!(self res => [ format!(": node_ref={}, destination_node_ref={}, message.len={}", node_ref, destination_node_ref, message_len) ] {
|
||||
// If we couldn't send we're still cleaning up
|
||||
self.record_send_failure(RPCKind::Statement, send_ts, node_ref.unfiltered(), safety_route, remote_private_route);
|
||||
network_result_raise!(res);
|
||||
@ -1263,7 +1265,7 @@ impl RPCProcessor {
|
||||
let operation = RPCOperation::new_answer(&request.operation, answer, spi);
|
||||
|
||||
// Log rpc send
|
||||
debug!(target: "rpc_message", dir = "send", kind = "answer", op_id = operation.op_id().as_u64(), desc = operation.kind().desc(), ?dest);
|
||||
veilid_log!(self debug target: "rpc_message", dir = "send", kind = "answer", op_id = operation.op_id().as_u64(), desc = operation.kind().desc(), ?dest);
|
||||
|
||||
// Produce rendered operation
|
||||
let RenderedOperation {
|
||||
@ -1300,7 +1302,7 @@ impl RPCProcessor {
|
||||
);
|
||||
RPCError::network(e)
|
||||
})?;
|
||||
let _send_data_kind = network_result_value_or_log!( res => [ format!(": node_ref={}, destination_node_ref={}, message.len={}", node_ref, destination_node_ref, message_len) ] {
|
||||
let _send_data_kind = network_result_value_or_log!(self res => [ format!(": node_ref={}, destination_node_ref={}, message.len={}", node_ref, destination_node_ref, message_len) ] {
|
||||
// If we couldn't send we're still cleaning up
|
||||
self.record_send_failure(RPCKind::Answer, send_ts, node_ref.unfiltered(), safety_route, remote_private_route);
|
||||
network_result_raise!(res);
|
||||
@ -1390,7 +1392,7 @@ impl RPCProcessor {
|
||||
match e {
|
||||
// Invalid messages that should be punished
|
||||
RPCError::Protocol(_) | RPCError::InvalidFormat(_) => {
|
||||
log_rpc!(debug "Invalid RPC Operation: {}", e);
|
||||
veilid_log!(self debug "Invalid RPC Operation: {}", e);
|
||||
|
||||
// Punish nodes that send direct undecodable crap
|
||||
self.network_manager().address_filter().punish_node_id(
|
||||
@ -1400,11 +1402,11 @@ impl RPCProcessor {
|
||||
}
|
||||
// Ignored messages that should be dropped
|
||||
RPCError::Ignore(_) | RPCError::Network(_) | RPCError::TryAgain(_) => {
|
||||
log_rpc!("Dropping RPC Operation: {}", e);
|
||||
veilid_log!(self trace "Dropping RPC Operation: {}", e);
|
||||
}
|
||||
// Internal errors that deserve louder logging
|
||||
RPCError::Unimplemented(_) | RPCError::Internal(_) => {
|
||||
log_rpc!(error "Error decoding RPC operation: {}", e);
|
||||
veilid_log!(self error "Error decoding RPC operation: {}", e);
|
||||
}
|
||||
};
|
||||
return Ok(NetworkResult::invalid_message(e));
|
||||
@ -1418,9 +1420,9 @@ impl RPCProcessor {
|
||||
let sender_peer_info = operation.sender_peer_info();
|
||||
let mut opt_sender_nr: Option<NodeRef> = network_result_try!(self
|
||||
.process_sender_peer_info(routing_domain, sender_node_id, &sender_peer_info)? => {
|
||||
log_network_result!(debug "Sender PeerInfo: {:?}", sender_peer_info);
|
||||
log_network_result!(debug "From Operation: {:?}", operation.kind());
|
||||
log_network_result!(debug "With Detail: {:?}", detail);
|
||||
veilid_log!(self debug target:"network_result", "Sender PeerInfo: {:?}", sender_peer_info);
|
||||
veilid_log!(self debug target:"network_result", "From Operation: {:?}", operation.kind());
|
||||
veilid_log!(self debug target:"network_result", "With Detail: {:?}", detail);
|
||||
});
|
||||
// look up sender node, in case it's different than our peer due to relaying
|
||||
if opt_sender_nr.is_none() {
|
||||
@ -1457,7 +1459,7 @@ impl RPCProcessor {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
// Debug on error
|
||||
log_rpc!(debug "Dropping routed RPC: {}", e);
|
||||
veilid_log!(self debug "Dropping routed RPC: {}", e);
|
||||
|
||||
// XXX: Punish routes that send routed undecodable crap
|
||||
// self.network_manager().address_filter().punish_route_id(xxx, PunishmentReason::FailedToDecodeRoutedMessage);
|
||||
@ -1484,7 +1486,7 @@ impl RPCProcessor {
|
||||
}
|
||||
|
||||
// Log rpc receive
|
||||
debug!(target: "rpc_message", dir = "recv", kind = "question", op_id = msg.operation.op_id().as_u64(), desc = msg.operation.kind().desc(), header = ?msg.header, operation = ?msg.operation.kind());
|
||||
veilid_log!(self debug target: "rpc_message", dir = "recv", kind = "question", op_id = msg.operation.op_id().as_u64(), desc = msg.operation.kind().desc(), header = ?msg.header, operation = ?msg.operation.kind());
|
||||
}
|
||||
RPCOperationKind::Statement(_) => {
|
||||
if let Some(sender_nr) = msg.opt_sender_nr.clone() {
|
||||
@ -1492,13 +1494,13 @@ impl RPCProcessor {
|
||||
}
|
||||
|
||||
// Log rpc receive
|
||||
debug!(target: "rpc_message", dir = "recv", kind = "statement", op_id = msg.operation.op_id().as_u64(), desc = msg.operation.kind().desc(), header = ?msg.header, operation = ?msg.operation.kind());
|
||||
veilid_log!(self debug target: "rpc_message", dir = "recv", kind = "statement", op_id = msg.operation.op_id().as_u64(), desc = msg.operation.kind().desc(), header = ?msg.header, operation = ?msg.operation.kind());
|
||||
}
|
||||
RPCOperationKind::Answer(_) => {
|
||||
// Answer stats are processed in wait_for_reply
|
||||
|
||||
// Log rpc receive
|
||||
debug!(target: "rpc_message", dir = "recv", kind = "answer", op_id = msg.operation.op_id().as_u64(), desc = msg.operation.kind().desc(), header = ?msg.header, operation = ?msg.operation.kind());
|
||||
veilid_log!(self debug target: "rpc_message", dir = "recv", kind = "answer", op_id = msg.operation.op_id().as_u64(), desc = msg.operation.kind().desc(), header = ?msg.header, operation = ?msg.operation.kind());
|
||||
}
|
||||
};
|
||||
|
||||
@ -1538,16 +1540,16 @@ impl RPCProcessor {
|
||||
if let Err(e) = self.waiting_rpc_table.complete_op_waiter(op_id, msg) {
|
||||
match e {
|
||||
RPCError::Unimplemented(_) | RPCError::Internal(_) => {
|
||||
log_rpc!(error "Could not complete rpc operation: id = {}: {}", op_id, e);
|
||||
veilid_log!(self error "Could not complete rpc operation: id = {}: {}", op_id, e);
|
||||
}
|
||||
RPCError::InvalidFormat(_)
|
||||
| RPCError::Protocol(_)
|
||||
| RPCError::Network(_)
|
||||
| RPCError::TryAgain(_) => {
|
||||
log_rpc!(debug "Could not complete rpc operation: id = {}: {}", op_id, e);
|
||||
veilid_log!(self debug "Could not complete rpc operation: id = {}: {}", op_id, e);
|
||||
}
|
||||
RPCError::Ignore(_) => {
|
||||
log_rpc!(debug "Answer late: id = {}", op_id);
|
||||
veilid_log!(self debug "Answer late: id = {}", op_id);
|
||||
}
|
||||
};
|
||||
// Don't throw an error here because it's okay if the original operation timed out
|
||||
@ -1568,12 +1570,12 @@ impl RPCProcessor {
|
||||
let rpc_message_span = tracing::trace_span!("rpc message");
|
||||
rpc_message_span.follows_from(prev_span);
|
||||
|
||||
network_result_value_or_log!(match self
|
||||
network_result_value_or_log!(self match self
|
||||
.process_rpc_message(msg).instrument(rpc_message_span)
|
||||
.await
|
||||
{
|
||||
Err(e) => {
|
||||
log_rpc!(error "couldn't process rpc message: {}", e);
|
||||
veilid_log!(self error "couldn't process rpc message: {}", e);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -133,7 +133,7 @@ impl RPCProcessor {
|
||||
let (message_a, _latency) = match res {
|
||||
TimeoutOr::Timeout => {
|
||||
// No message sent on timeout, but this isn't an error
|
||||
log_rpc!(debug "App call timed out for id {}", op_id);
|
||||
veilid_log!(self debug "App call timed out for id {}", op_id);
|
||||
return Ok(NetworkResult::timeout());
|
||||
}
|
||||
TimeoutOr::Value(v) => v,
|
||||
|
@ -78,7 +78,7 @@ impl RPCProcessor {
|
||||
crypto_kind: vcrypto.kind(),
|
||||
});
|
||||
|
||||
log_dht!(debug "{}", debug_string);
|
||||
veilid_log!(self debug "{}", debug_string);
|
||||
|
||||
let waitable_reply = network_result_try!(
|
||||
self.question(dest.clone(), question, Some(question_context))
|
||||
@ -128,13 +128,13 @@ impl RPCProcessor {
|
||||
dest
|
||||
);
|
||||
|
||||
log_dht!(debug "{}", debug_string_answer);
|
||||
veilid_log!(self debug "{}", debug_string_answer);
|
||||
|
||||
let peer_ids: Vec<String> = peers
|
||||
.iter()
|
||||
.filter_map(|p| p.node_ids().get(key.kind).map(|k| k.to_string()))
|
||||
.collect();
|
||||
log_dht!(debug "Peers: {:#?}", peer_ids);
|
||||
veilid_log!(self debug "Peers: {:#?}", peer_ids);
|
||||
}
|
||||
|
||||
// Validate peers returned are, in fact, closer to the key than the node we sent this to
|
||||
@ -228,7 +228,7 @@ impl RPCProcessor {
|
||||
msg.header.direct_sender_node_id()
|
||||
);
|
||||
|
||||
log_dht!(debug "{}", debug_string);
|
||||
veilid_log!(self debug "{}", debug_string);
|
||||
}
|
||||
|
||||
// See if we would have accepted this as a set
|
||||
@ -278,7 +278,7 @@ impl RPCProcessor {
|
||||
msg.header.direct_sender_node_id()
|
||||
);
|
||||
|
||||
log_dht!(debug "{}", debug_string_answer);
|
||||
veilid_log!(self debug "{}", debug_string_answer);
|
||||
}
|
||||
|
||||
// Make GetValue answer
|
||||
|
@ -81,7 +81,7 @@ impl RPCProcessor {
|
||||
crypto_kind: vcrypto.kind(),
|
||||
});
|
||||
|
||||
log_dht!(debug "{}", debug_string);
|
||||
veilid_log!(self debug "{}", debug_string);
|
||||
|
||||
let waitable_reply = network_result_try!(
|
||||
self.question(dest.clone(), question, Some(question_context))
|
||||
@ -118,13 +118,13 @@ impl RPCProcessor {
|
||||
debug_seqs(&seqs)
|
||||
);
|
||||
|
||||
log_dht!(debug "{}", debug_string_answer);
|
||||
veilid_log!(self debug "{}", debug_string_answer);
|
||||
|
||||
let peer_ids: Vec<String> = peers
|
||||
.iter()
|
||||
.filter_map(|p| p.node_ids().get(key.kind).map(|k| k.to_string()))
|
||||
.collect();
|
||||
log_dht!(debug "Peers: {:#?}", peer_ids);
|
||||
veilid_log!(self debug "Peers: {:#?}", peer_ids);
|
||||
}
|
||||
|
||||
// Validate peers returned are, in fact, closer to the key than the node we sent this to
|
||||
@ -209,7 +209,7 @@ impl RPCProcessor {
|
||||
msg.header.direct_sender_node_id()
|
||||
);
|
||||
|
||||
log_dht!(debug "{}", debug_string);
|
||||
veilid_log!(self debug "{}", debug_string);
|
||||
}
|
||||
|
||||
// See if we would have accepted this as a set
|
||||
@ -247,7 +247,7 @@ impl RPCProcessor {
|
||||
msg.header.direct_sender_node_id()
|
||||
);
|
||||
|
||||
log_dht!(debug "{}", debug_string_answer);
|
||||
veilid_log!(self debug "{}", debug_string_answer);
|
||||
}
|
||||
|
||||
// Make InspectValue answer
|
||||
|
@ -89,7 +89,7 @@ impl RPCProcessor {
|
||||
});
|
||||
|
||||
if debug_target_enabled!("dht") {
|
||||
log_dht!(debug "{}", debug_string);
|
||||
veilid_log!(self debug "{}", debug_string);
|
||||
}
|
||||
|
||||
let waitable_reply = network_result_try!(
|
||||
@ -140,13 +140,13 @@ impl RPCProcessor {
|
||||
dest,
|
||||
);
|
||||
|
||||
log_dht!(debug "{}", debug_string_answer);
|
||||
veilid_log!(self debug "{}", debug_string_answer);
|
||||
|
||||
let peer_ids: Vec<String> = peers
|
||||
.iter()
|
||||
.filter_map(|p| p.node_ids().get(key.kind).map(|k| k.to_string()))
|
||||
.collect();
|
||||
log_dht!(debug "Peers: {:#?}", peer_ids);
|
||||
veilid_log!(self debug "Peers: {:#?}", peer_ids);
|
||||
}
|
||||
|
||||
// Validate peers returned are, in fact, closer to the key than the node we sent this to
|
||||
@ -244,7 +244,7 @@ impl RPCProcessor {
|
||||
msg.header.direct_sender_node_id()
|
||||
);
|
||||
|
||||
log_rpc!(debug "{}", debug_string);
|
||||
veilid_log!(self debug "{}", debug_string);
|
||||
|
||||
// If there are less than 'set_value_count' peers that are closer, then store here too
|
||||
let set_value_count = self
|
||||
@ -296,7 +296,7 @@ impl RPCProcessor {
|
||||
msg.header.direct_sender_node_id()
|
||||
);
|
||||
|
||||
log_dht!(debug "{}", debug_string_answer);
|
||||
veilid_log!(self debug "{}", debug_string_answer);
|
||||
}
|
||||
|
||||
// Make SetValue answer
|
||||
|
@ -140,7 +140,7 @@ impl RPCProcessor {
|
||||
flow: send_data_method.unique_flow.flow,
|
||||
reporting_peer: target.unfiltered(),
|
||||
}) {
|
||||
log_rpc!(debug "Failed to post event: {}", e);
|
||||
veilid_log!(self debug "Failed to post event: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ impl RPCProcessor {
|
||||
|
||||
// Send the validate_dial_info request
|
||||
// This can only be sent directly, as relays can not validate dial info
|
||||
network_result_value_or_log!(self.statement(Destination::direct(peer.default_filtered()), statement)
|
||||
network_result_value_or_log!(self self.statement(Destination::direct(peer.default_filtered()), statement)
|
||||
.await? => [ format!(": peer={} statement={:?}", peer, statement) ] {
|
||||
return Ok(false);
|
||||
}
|
||||
@ -61,15 +61,15 @@ impl RPCProcessor {
|
||||
ReceiptEvent::ReturnedPrivate { private_route: _ }
|
||||
| ReceiptEvent::ReturnedInBand { inbound_noderef: _ }
|
||||
| ReceiptEvent::ReturnedSafety => {
|
||||
log_net!(debug "validate_dial_info receipt should be returned out-of-band");
|
||||
veilid_log!(self debug "validate_dial_info receipt should be returned out-of-band");
|
||||
Ok(false)
|
||||
}
|
||||
ReceiptEvent::ReturnedOutOfBand => {
|
||||
log_net!(debug "validate_dial_info receipt returned");
|
||||
veilid_log!(self debug "validate_dial_info receipt returned");
|
||||
Ok(true)
|
||||
}
|
||||
ReceiptEvent::Expired => {
|
||||
log_net!(debug "validate_dial_info receipt expired");
|
||||
veilid_log!(self debug "validate_dial_info receipt expired");
|
||||
Ok(false)
|
||||
}
|
||||
ReceiptEvent::Cancelled => {
|
||||
@ -187,7 +187,7 @@ impl RPCProcessor {
|
||||
|
||||
// Send the validate_dial_info request
|
||||
// This can only be sent directly, as relays can not validate dial info
|
||||
network_result_value_or_log!(self.statement(Destination::direct(peer.default_filtered()), statement)
|
||||
network_result_value_or_log!(self self.statement(Destination::direct(peer.default_filtered()), statement)
|
||||
.await? => [ format!(": peer={} statement={:?}", peer, statement) ] {
|
||||
continue;
|
||||
}
|
||||
|
@ -88,7 +88,7 @@ impl RPCProcessor {
|
||||
msg.header.direct_sender_node_id(),
|
||||
);
|
||||
|
||||
log_dht!(debug "{}", debug_string_stmt);
|
||||
veilid_log!(self debug "{}", debug_string_stmt);
|
||||
}
|
||||
|
||||
// Save the subkey, creating a new record if necessary
|
||||
|
@ -85,7 +85,7 @@ impl RPCProcessor {
|
||||
RPCQuestionDetail::WatchValueQ(Box::new(watch_value_q)),
|
||||
);
|
||||
|
||||
log_dht!(debug "{}", debug_string);
|
||||
veilid_log!(self debug "{}", debug_string);
|
||||
|
||||
let waitable_reply =
|
||||
network_result_try!(self.question(dest.clone(), question, None).await?);
|
||||
@ -122,13 +122,13 @@ impl RPCProcessor {
|
||||
dest
|
||||
);
|
||||
|
||||
log_dht!(debug "{}", debug_string_answer);
|
||||
veilid_log!(self debug "{}", debug_string_answer);
|
||||
|
||||
let peer_ids: Vec<String> = peers
|
||||
.iter()
|
||||
.filter_map(|p| p.node_ids().get(key.kind).map(|k| k.to_string()))
|
||||
.collect();
|
||||
log_dht!(debug "Peers: {:#?}", peer_ids);
|
||||
veilid_log!(self debug "Peers: {:#?}", peer_ids);
|
||||
}
|
||||
|
||||
// Validate accepted requests
|
||||
@ -249,7 +249,7 @@ impl RPCProcessor {
|
||||
watcher
|
||||
);
|
||||
|
||||
log_dht!(debug "{}", debug_string);
|
||||
veilid_log!(self debug "{}", debug_string);
|
||||
}
|
||||
|
||||
// Get the nodes that we know about that are closer to the the key than our own node
|
||||
@ -263,7 +263,7 @@ impl RPCProcessor {
|
||||
let (ret_accepted, ret_expiration, ret_watch_id) =
|
||||
if closer_to_key_peers.len() >= set_value_count {
|
||||
// Not close enough, not accepted
|
||||
log_dht!(debug "Not close enough for watch value");
|
||||
veilid_log!(self debug "Not close enough for watch value");
|
||||
|
||||
(false, 0, watch_id.unwrap_or_default())
|
||||
} else {
|
||||
@ -309,7 +309,7 @@ impl RPCProcessor {
|
||||
msg.header.direct_sender_node_id()
|
||||
);
|
||||
|
||||
log_dht!(debug "{}", debug_string_answer);
|
||||
veilid_log!(self debug "{}", debug_string_answer);
|
||||
}
|
||||
|
||||
// Make WatchValue answer
|
||||
|
@ -1,5 +1,7 @@
|
||||
use super::*;
|
||||
|
||||
impl_veilid_log_facility!("stor");
|
||||
|
||||
/// The context of the outbound_get_value operation
|
||||
struct OutboundGetValueContext {
|
||||
/// The latest value of the subkey, may be the value passed in
|
||||
@ -119,12 +121,12 @@ impl StorageManager {
|
||||
// Keep the value if we got one and it is newer and it passes schema validation
|
||||
let Some(value) = gva.answer.value else {
|
||||
// Return peers if we have some
|
||||
log_network_result!(debug "GetValue returned no value, fanout call returned peers {}", gva.answer.peers.len());
|
||||
veilid_log!(registry debug target:"network_result", "GetValue returned no value, fanout call returned peers {}", gva.answer.peers.len());
|
||||
|
||||
return Ok(NetworkResult::value(FanoutCallOutput{peer_info_list: gva.answer.peers}))
|
||||
};
|
||||
|
||||
log_dht!(debug "GetValue got value back: len={} seq={}", value.value_data().data().len(), value.value_data().seq());
|
||||
veilid_log!(registry debug "GetValue got value back: len={} seq={}", value.value_data().data().len(), value.value_data().seq());
|
||||
|
||||
// Ensure we have a schema and descriptor
|
||||
let (Some(descriptor), Some(schema)) = (&ctx.descriptor, &ctx.schema)
|
||||
@ -184,7 +186,7 @@ impl StorageManager {
|
||||
ctx.send_partial_update = true;
|
||||
}
|
||||
// Return peers if we have some
|
||||
log_network_result!(debug "GetValue fanout call returned peers {}", gva.answer.peers.len());
|
||||
veilid_log!(registry debug target:"network_result", "GetValue fanout call returned peers {}", gva.answer.peers.len());
|
||||
|
||||
Ok(NetworkResult::value(FanoutCallOutput{peer_info_list: gva.answer.peers}))
|
||||
}.instrument(tracing::trace_span!("outbound_get_value fanout routine"))
|
||||
@ -195,6 +197,7 @@ impl StorageManager {
|
||||
let check_done = {
|
||||
let context = context.clone();
|
||||
let out_tx = out_tx.clone();
|
||||
let registry = self.registry();
|
||||
move |_closest_nodes: &[NodeRef]| {
|
||||
let mut ctx = context.lock();
|
||||
|
||||
@ -214,7 +217,7 @@ impl StorageManager {
|
||||
opt_descriptor: ctx.descriptor.clone(),
|
||||
},
|
||||
})) {
|
||||
log_dht!(debug "Sending partial GetValue result failed: {}", e);
|
||||
veilid_log!(registry debug "Sending partial GetValue result failed: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -257,9 +260,9 @@ impl StorageManager {
|
||||
// Failed
|
||||
TimeoutOr::Value(Err(e)) => {
|
||||
// If we finished with an error, return that
|
||||
log_dht!(debug "GetValue fanout error: {}", e);
|
||||
veilid_log!(registry debug "GetValue fanout error: {}", e);
|
||||
if let Err(e) = out_tx.send(Err(e.into())) {
|
||||
log_dht!(debug "Sending GetValue fanout error failed: {}", e);
|
||||
veilid_log!(registry debug "Sending GetValue fanout error failed: {}", e);
|
||||
}
|
||||
return;
|
||||
}
|
||||
@ -270,7 +273,7 @@ impl StorageManager {
|
||||
kind,
|
||||
value_nodes: ctx.value_nodes.clone(),
|
||||
};
|
||||
log_dht!(debug "GetValue Fanout: {:?}", fanout_result);
|
||||
veilid_log!(registry debug "GetValue Fanout: {:?}", fanout_result);
|
||||
|
||||
if let Err(e) = out_tx.send(Ok(OutboundGetValueResult {
|
||||
fanout_result,
|
||||
@ -279,7 +282,7 @@ impl StorageManager {
|
||||
opt_descriptor: ctx.descriptor.clone(),
|
||||
},
|
||||
})) {
|
||||
log_dht!(debug "Sending GetValue result failed: {}", e);
|
||||
veilid_log!(registry debug "Sending GetValue result failed: {}", e);
|
||||
}
|
||||
}
|
||||
.instrument(tracing::trace_span!("outbound_get_value result")),
|
||||
@ -309,7 +312,7 @@ impl StorageManager {
|
||||
let result = match result {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
log_rtab!(debug "Deferred fanout error: {}", e);
|
||||
veilid_log!(this debug "Deferred fanout error: {}", e);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
@ -320,7 +323,7 @@ impl StorageManager {
|
||||
return is_partial;
|
||||
}
|
||||
Err(e) => {
|
||||
log_rtab!(debug "Deferred fanout error: {}", e);
|
||||
veilid_log!(this debug "Deferred fanout error: {}", e);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
@ -1,5 +1,7 @@
|
||||
use super::*;
|
||||
|
||||
impl_veilid_log_facility!("stor");
|
||||
|
||||
/// The fully parsed descriptor
|
||||
struct DescriptorInfo {
|
||||
/// The descriptor itself
|
||||
@ -157,7 +159,7 @@ impl StorageManager {
|
||||
|
||||
// Keep the value if we got one and it is newer and it passes schema validation
|
||||
if !answer.seqs.is_empty() {
|
||||
log_dht!(debug "Got seqs back: len={}", answer.seqs.len());
|
||||
veilid_log!(self debug "Got seqs back: len={}", answer.seqs.len());
|
||||
let mut ctx = context.lock();
|
||||
|
||||
// Ensure we have a schema and descriptor etc
|
||||
@ -234,7 +236,7 @@ impl StorageManager {
|
||||
}
|
||||
|
||||
// Return peers if we have some
|
||||
log_network_result!(debug "InspectValue fanout call returned peers {}", answer.peers.len());
|
||||
veilid_log!(registry debug target:"network_result", "InspectValue fanout call returned peers {}", answer.peers.len());
|
||||
|
||||
Ok(NetworkResult::value(FanoutCallOutput { peer_info_list: answer.peers}))
|
||||
}.instrument(tracing::trace_span!("outbound_inspect_value fanout call"))
|
||||
@ -281,7 +283,7 @@ impl StorageManager {
|
||||
// Failed
|
||||
TimeoutOr::Value(Err(e)) => {
|
||||
// If we finished with an error, return that
|
||||
log_dht!(debug "InspectValue Fanout Error: {}", e);
|
||||
veilid_log!(self debug "InspectValue Fanout Error: {}", e);
|
||||
return Err(e.into());
|
||||
}
|
||||
};
|
||||
@ -301,7 +303,7 @@ impl StorageManager {
|
||||
fanout_results.push(fanout_result);
|
||||
}
|
||||
|
||||
log_dht!(debug "InspectValue Fanout ({:?}):\n{}", kind, debug_fanout_results(&fanout_results));
|
||||
veilid_log!(self debug "InspectValue Fanout ({:?}):\n{}", kind, debug_fanout_results(&fanout_results));
|
||||
|
||||
Ok(OutboundInspectValueResult {
|
||||
fanout_results,
|
||||
|
@ -16,6 +16,8 @@ pub use record_store::{WatchParameters, WatchResult};
|
||||
|
||||
pub use types::*;
|
||||
|
||||
impl_veilid_log_facility!("stor");
|
||||
|
||||
/// The maximum size of a single subkey
|
||||
pub(crate) const MAX_SUBKEY_SIZE: usize = ValueData::MAX_LEN;
|
||||
/// The maximum total size of all subkeys of a record
|
||||
@ -212,7 +214,7 @@ impl StorageManager {
|
||||
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
async fn init_async(&self) -> EyreResult<()> {
|
||||
log_stor!(debug "startup storage manager");
|
||||
veilid_log!(self debug "startup storage manager");
|
||||
let table_store = self.table_store();
|
||||
let config = self.config();
|
||||
|
||||
@ -231,7 +233,7 @@ impl StorageManager {
|
||||
inner.metadata_db = Some(metadata_db);
|
||||
inner.local_record_store = Some(local_record_store);
|
||||
inner.remote_record_store = Some(remote_record_store);
|
||||
Self::load_metadata(&mut inner).await?;
|
||||
self.load_metadata_inner(&mut inner).await?;
|
||||
}
|
||||
|
||||
// Start deferred results processors
|
||||
@ -251,7 +253,7 @@ impl StorageManager {
|
||||
async move {
|
||||
let this = registry.storage_manager();
|
||||
if let Err(e) = this.tick().await {
|
||||
log_stor!(warn "storage manager tick failed: {}", e);
|
||||
veilid_log!(this warn "storage manager tick failed: {}", e);
|
||||
}
|
||||
}
|
||||
});
|
||||
@ -278,7 +280,7 @@ impl StorageManager {
|
||||
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
async fn terminate_async(&self) {
|
||||
log_stor!(debug "starting storage manager shutdown");
|
||||
veilid_log!(self debug "starting storage manager shutdown");
|
||||
|
||||
// Stop deferred result processor
|
||||
self.deferred_result_processor.terminate().await;
|
||||
@ -290,28 +292,28 @@ impl StorageManager {
|
||||
// Final flush on record stores
|
||||
if let Some(mut local_record_store) = inner.local_record_store.take() {
|
||||
if let Err(e) = local_record_store.flush().await {
|
||||
log_stor!(error "termination local record store tick failed: {}", e);
|
||||
veilid_log!(self error "termination local record store tick failed: {}", e);
|
||||
}
|
||||
}
|
||||
if let Some(mut remote_record_store) = inner.remote_record_store.take() {
|
||||
if let Err(e) = remote_record_store.flush().await {
|
||||
log_stor!(error "termination remote record store tick failed: {}", e);
|
||||
veilid_log!(self error "termination remote record store tick failed: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
// Save metadata
|
||||
if let Err(e) = Self::save_metadata(&mut inner).await {
|
||||
log_stor!(error "termination metadata save failed: {}", e);
|
||||
if let Err(e) = self.save_metadata_inner(&mut inner).await {
|
||||
veilid_log!(self error "termination metadata save failed: {}", e);
|
||||
}
|
||||
|
||||
// Reset inner state
|
||||
*inner = Self::new_inner();
|
||||
}
|
||||
|
||||
log_stor!(debug "finished storage manager shutdown");
|
||||
veilid_log!(self debug "finished storage manager shutdown");
|
||||
}
|
||||
|
||||
async fn save_metadata(inner: &mut StorageManagerInner) -> EyreResult<()> {
|
||||
async fn save_metadata_inner(&self, inner: &mut StorageManagerInner) -> EyreResult<()> {
|
||||
if let Some(metadata_db) = &inner.metadata_db {
|
||||
let tx = metadata_db.transact();
|
||||
tx.store_json(0, OFFLINE_SUBKEY_WRITES, &inner.offline_subkey_writes)?;
|
||||
@ -320,7 +322,7 @@ impl StorageManager {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn load_metadata(inner: &mut StorageManagerInner) -> EyreResult<()> {
|
||||
async fn load_metadata_inner(&self, inner: &mut StorageManagerInner) -> EyreResult<()> {
|
||||
if let Some(metadata_db) = &inner.metadata_db {
|
||||
inner.offline_subkey_writes = match metadata_db
|
||||
.load_json(0, OFFLINE_SUBKEY_WRITES)
|
||||
@ -329,7 +331,7 @@ impl StorageManager {
|
||||
Ok(v) => v.unwrap_or_default(),
|
||||
Err(_) => {
|
||||
if let Err(e) = metadata_db.delete(0, OFFLINE_SUBKEY_WRITES).await {
|
||||
log_stor!(debug "offline_subkey_writes format changed, clearing: {}", e);
|
||||
veilid_log!(self debug "offline_subkey_writes format changed, clearing: {}", e);
|
||||
}
|
||||
Default::default()
|
||||
}
|
||||
@ -416,7 +418,7 @@ impl StorageManager {
|
||||
|
||||
// Now that the record is made we should always succeed to open the existing record
|
||||
// The initial writer is the owner of the record
|
||||
Self::open_existing_record_inner(&mut inner, key, Some(owner), safety_selection)
|
||||
self.open_existing_record_inner(&mut inner, key, Some(owner), safety_selection)
|
||||
.await
|
||||
.map(|r| r.unwrap())
|
||||
}
|
||||
@ -432,8 +434,9 @@ impl StorageManager {
|
||||
let mut inner = self.inner.lock().await;
|
||||
|
||||
// See if we have a local record already or not
|
||||
if let Some(res) =
|
||||
Self::open_existing_record_inner(&mut inner, key, writer, safety_selection).await?
|
||||
if let Some(res) = self
|
||||
.open_existing_record_inner(&mut inner, key, writer, safety_selection)
|
||||
.await?
|
||||
{
|
||||
return Ok(res);
|
||||
}
|
||||
@ -477,14 +480,15 @@ impl StorageManager {
|
||||
// because waiting for the outbound_get_value action could result in the key being opened
|
||||
// via some parallel process
|
||||
|
||||
if let Some(res) =
|
||||
Self::open_existing_record_inner(&mut inner, key, writer, safety_selection).await?
|
||||
if let Some(res) = self
|
||||
.open_existing_record_inner(&mut inner, key, writer, safety_selection)
|
||||
.await?
|
||||
{
|
||||
return Ok(res);
|
||||
}
|
||||
|
||||
// Open the new record
|
||||
Self::open_new_record_inner(
|
||||
self.open_new_record_inner(
|
||||
&mut inner,
|
||||
key,
|
||||
writer,
|
||||
@ -522,7 +526,7 @@ impl StorageManager {
|
||||
|
||||
// Send a one-time cancel request for the watch if we have one and we're online
|
||||
if !self.dht_is_online() {
|
||||
log_stor!(debug "skipping last-ditch watch cancel because we are offline");
|
||||
veilid_log!(self debug "skipping last-ditch watch cancel because we are offline");
|
||||
return Ok(());
|
||||
}
|
||||
// Use the safety selection we opened the record with
|
||||
@ -540,7 +544,7 @@ impl StorageManager {
|
||||
{
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
log_stor!(debug
|
||||
veilid_log!(self debug
|
||||
"close record watch cancel failed: {}", e
|
||||
);
|
||||
None
|
||||
@ -548,12 +552,12 @@ impl StorageManager {
|
||||
};
|
||||
if let Some(owvresult) = opt_owvresult {
|
||||
if owvresult.expiration_ts.as_u64() != 0 {
|
||||
log_stor!(debug
|
||||
veilid_log!(self debug
|
||||
"close record watch cancel should have zero expiration"
|
||||
);
|
||||
}
|
||||
} else {
|
||||
log_stor!(debug "close record watch cancel unsuccessful");
|
||||
veilid_log!(self debug "close record watch cancel unsuccessful");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -722,7 +726,7 @@ impl StorageManager {
|
||||
)?);
|
||||
|
||||
// Write the value locally first
|
||||
log_stor!(debug "Writing subkey locally: {}:{} len={}", key, subkey, signed_value_data.value_data().data().len() );
|
||||
veilid_log!(self debug "Writing subkey locally: {}:{} len={}", key, subkey, signed_value_data.value_data().data().len() );
|
||||
Self::handle_set_local_value_inner(
|
||||
&mut inner,
|
||||
key,
|
||||
@ -733,7 +737,7 @@ impl StorageManager {
|
||||
.await?;
|
||||
|
||||
if !self.dht_is_online() {
|
||||
log_stor!(debug "Writing subkey offline: {}:{} len={}", key, subkey, signed_value_data.value_data().data().len() );
|
||||
veilid_log!(self debug "Writing subkey offline: {}:{} len={}", key, subkey, signed_value_data.value_data().data().len() );
|
||||
// Add to offline writes to flush
|
||||
Self::add_offline_subkey_write_inner(&mut inner, key, subkey, safety_selection);
|
||||
return Ok(None);
|
||||
@ -742,7 +746,7 @@ impl StorageManager {
|
||||
// Drop the lock for network access
|
||||
drop(inner);
|
||||
|
||||
log_stor!(debug "Writing subkey to the network: {}:{} len={}", key, subkey, signed_value_data.value_data().data().len() );
|
||||
veilid_log!(self debug "Writing subkey to the network: {}:{} len={}", key, subkey, signed_value_data.value_data().data().len() );
|
||||
|
||||
// Use the safety selection we opened the record with
|
||||
let res_rx = match self
|
||||
@ -904,7 +908,7 @@ impl StorageManager {
|
||||
if count == 0 {
|
||||
// Expiration returned should be zero if we requested a cancellation
|
||||
if expiration_ts.as_u64() != 0 {
|
||||
log_stor!(debug "got active watch despite asking for a cancellation");
|
||||
veilid_log!(self debug "got active watch despite asking for a cancellation");
|
||||
}
|
||||
return Ok(Timestamp::new(0));
|
||||
}
|
||||
@ -1122,7 +1126,7 @@ impl StorageManager {
|
||||
.await
|
||||
.map_err(VeilidAPIError::from)?;
|
||||
|
||||
network_result_value_or_log!(rpc_processor
|
||||
network_result_value_or_log!(self rpc_processor
|
||||
.rpc_call_value_changed(dest, vc.key, vc.subkeys.clone(), vc.count, vc.watch_id, vc.value.map(|v| (*v).clone()) )
|
||||
.await
|
||||
.map_err(VeilidAPIError::from)? => [format!(": dest={:?} vc={:?}", dest, vc)] {});
|
||||
@ -1163,12 +1167,12 @@ impl StorageManager {
|
||||
.with(|c| c.network.dht.get_value_count as usize);
|
||||
let value_node_count = fanout_result.value_nodes.len();
|
||||
if value_node_count < get_consensus {
|
||||
log_stor!(debug "timeout with insufficient consensus ({}<{}), adding offline subkey: {}:{}",
|
||||
veilid_log!(self debug "timeout with insufficient consensus ({}<{}), adding offline subkey: {}:{}",
|
||||
value_node_count, get_consensus,
|
||||
key, subkey);
|
||||
true
|
||||
} else {
|
||||
log_stor!(debug "timeout with sufficient consensus ({}>={}): set_value {}:{}",
|
||||
veilid_log!(self debug "timeout with sufficient consensus ({}>={}): set_value {}:{}",
|
||||
value_node_count, get_consensus,
|
||||
key, subkey);
|
||||
false
|
||||
@ -1180,12 +1184,12 @@ impl StorageManager {
|
||||
.with(|c| c.network.dht.get_value_count as usize);
|
||||
let value_node_count = fanout_result.value_nodes.len();
|
||||
if value_node_count < get_consensus {
|
||||
log_stor!(debug "exhausted with insufficient consensus ({}<{}), adding offline subkey: {}:{}",
|
||||
veilid_log!(self debug "exhausted with insufficient consensus ({}<{}), adding offline subkey: {}:{}",
|
||||
value_node_count, get_consensus,
|
||||
key, subkey);
|
||||
true
|
||||
} else {
|
||||
log_stor!(debug "exhausted with sufficient consensus ({}>={}): set_value {}:{}",
|
||||
veilid_log!(self debug "exhausted with sufficient consensus ({}>={}): set_value {}:{}",
|
||||
value_node_count, get_consensus,
|
||||
key, subkey);
|
||||
false
|
||||
@ -1261,6 +1265,7 @@ impl StorageManager {
|
||||
|
||||
#[instrument(level = "trace", target = "stor", skip_all, err)]
|
||||
async fn move_remote_record_to_local_inner(
|
||||
&self,
|
||||
inner: &mut StorageManagerInner,
|
||||
key: TypedKey,
|
||||
safety_selection: SafetySelection,
|
||||
@ -1297,12 +1302,12 @@ impl StorageManager {
|
||||
for subkey in remote_record.stored_subkeys().iter() {
|
||||
let Some(get_result) = remote_record_store.get_subkey(key, subkey, false).await? else {
|
||||
// Subkey was missing
|
||||
warn!("Subkey was missing: {} #{}", key, subkey);
|
||||
veilid_log!(self warn "Subkey was missing: {} #{}", key, subkey);
|
||||
continue;
|
||||
};
|
||||
let Some(subkey_data) = get_result.opt_value else {
|
||||
// Subkey was missing
|
||||
warn!("Subkey data was missing: {} #{}", key, subkey);
|
||||
veilid_log!(self warn "Subkey data was missing: {} #{}", key, subkey);
|
||||
continue;
|
||||
};
|
||||
local_record_store
|
||||
@ -1322,6 +1327,7 @@ impl StorageManager {
|
||||
|
||||
#[instrument(level = "trace", target = "stor", skip_all, err)]
|
||||
pub async fn open_existing_record_inner(
|
||||
&self,
|
||||
inner: &mut StorageManagerInner,
|
||||
key: TypedKey,
|
||||
writer: Option<KeyPair>,
|
||||
@ -1347,9 +1353,9 @@ impl StorageManager {
|
||||
None => {
|
||||
// If we don't have a local record yet, check to see if we have a remote record
|
||||
// if so, migrate it to a local record
|
||||
let Some(v) =
|
||||
Self::move_remote_record_to_local_inner(&mut *inner, key, safety_selection)
|
||||
.await?
|
||||
let Some(v) = self
|
||||
.move_remote_record_to_local_inner(&mut *inner, key, safety_selection)
|
||||
.await?
|
||||
else {
|
||||
// No remote record either
|
||||
return Ok(None);
|
||||
@ -1388,6 +1394,7 @@ impl StorageManager {
|
||||
|
||||
#[instrument(level = "trace", target = "stor", skip_all, err)]
|
||||
pub async fn open_new_record_inner(
|
||||
&self,
|
||||
inner: &mut StorageManagerInner,
|
||||
key: TypedKey,
|
||||
writer: Option<KeyPair>,
|
||||
|
@ -1,6 +1,8 @@
|
||||
use super::*;
|
||||
use num_traits::{PrimInt, Unsigned};
|
||||
|
||||
impl_veilid_log_facility!("stor");
|
||||
|
||||
#[derive(ThisError, Debug, Clone, Copy, Eq, PartialEq)]
|
||||
pub enum LimitError {
|
||||
#[error("limit overflow")]
|
||||
@ -15,17 +17,48 @@ pub enum NumericError {
|
||||
Underflow,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
#[derive(Clone)]
|
||||
pub struct LimitedSize<T: PrimInt + Unsigned + fmt::Display + fmt::Debug> {
|
||||
registry: VeilidComponentRegistry,
|
||||
description: String,
|
||||
value: T,
|
||||
limit: Option<T>,
|
||||
uncommitted_value: Option<T>,
|
||||
}
|
||||
|
||||
impl<T> VeilidComponentRegistryAccessor for LimitedSize<T>
|
||||
where
|
||||
T: PrimInt + Unsigned + fmt::Display + fmt::Debug,
|
||||
{
|
||||
fn registry(&self) -> VeilidComponentRegistry {
|
||||
self.registry.clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> fmt::Debug for LimitedSize<T>
|
||||
where
|
||||
T: PrimInt + Unsigned + fmt::Display + fmt::Debug,
|
||||
{
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("LimitedSize")
|
||||
//.field("registry", &self.registry)
|
||||
.field("description", &self.description)
|
||||
.field("value", &self.value)
|
||||
.field("limit", &self.limit)
|
||||
.field("uncommitted_value", &self.uncommitted_value)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: PrimInt + Unsigned + fmt::Display + fmt::Debug> LimitedSize<T> {
|
||||
pub fn new(description: &str, value: T, limit: Option<T>) -> Self {
|
||||
pub fn new(
|
||||
registry: VeilidComponentRegistry,
|
||||
description: &str,
|
||||
value: T,
|
||||
limit: Option<T>,
|
||||
) -> Self {
|
||||
Self {
|
||||
registry,
|
||||
description: description.to_owned(),
|
||||
value,
|
||||
limit,
|
||||
@ -65,7 +98,7 @@ impl<T: PrimInt + Unsigned + fmt::Display + fmt::Debug> LimitedSize<T> {
|
||||
let current_value = self.current_value();
|
||||
let max_v = current_value - T::min_value();
|
||||
if v > max_v {
|
||||
log_stor!(debug "Numeric underflow ({})", self.description);
|
||||
veilid_log!(self debug "Numeric underflow ({})", self.description);
|
||||
v = max_v;
|
||||
}
|
||||
let new_value = current_value - v;
|
||||
@ -88,11 +121,11 @@ impl<T: PrimInt + Unsigned + fmt::Display + fmt::Debug> LimitedSize<T> {
|
||||
if let Some(uncommitted_value) = self.uncommitted_value {
|
||||
if let Some(limit) = self.limit {
|
||||
if uncommitted_value > limit {
|
||||
log_stor!(debug "Commit over limit failed ({}): {} > {}", self.description, uncommitted_value, limit);
|
||||
veilid_log!(self debug "Commit over limit failed ({}): {} > {}", self.description, uncommitted_value, limit);
|
||||
return Err(LimitError::OverLimit);
|
||||
}
|
||||
}
|
||||
log_stor!(debug "Commit ({}): {} => {}", self.description, self.value, uncommitted_value);
|
||||
veilid_log!(self debug "Commit ({}): {} => {}", self.description, self.value, uncommitted_value);
|
||||
self.uncommitted_value = None;
|
||||
self.value = uncommitted_value;
|
||||
}
|
||||
@ -101,7 +134,7 @@ impl<T: PrimInt + Unsigned + fmt::Display + fmt::Debug> LimitedSize<T> {
|
||||
|
||||
pub fn rollback(&mut self) -> T {
|
||||
if let Some(uv) = self.uncommitted_value.take() {
|
||||
log_stor!(debug "Rollback ({}): {} (drop {})", self.description, self.value, uv);
|
||||
veilid_log!(self debug "Rollback ({}): {} (drop {})", self.description, self.value, uv);
|
||||
}
|
||||
self.value
|
||||
}
|
||||
|
@ -31,6 +31,8 @@ use record_data::*;
|
||||
|
||||
use hashlink::LruCache;
|
||||
|
||||
impl_veilid_log_facility!("stor");
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
/// A dead record that is yet to be purged from disk and statistics
|
||||
struct DeadRecord<D>
|
||||
@ -50,6 +52,7 @@ pub(super) struct RecordStore<D>
|
||||
where
|
||||
D: fmt::Debug + Clone + Serialize + for<'d> Deserialize<'d>,
|
||||
{
|
||||
registry: VeilidComponentRegistry,
|
||||
name: String,
|
||||
limits: RecordStoreLimits,
|
||||
|
||||
@ -79,6 +82,15 @@ where
|
||||
purge_dead_records_mutex: Arc<AsyncMutex<()>>,
|
||||
}
|
||||
|
||||
impl<D> VeilidComponentRegistryAccessor for RecordStore<D>
|
||||
where
|
||||
D: fmt::Debug + Clone + Serialize + for<'d> Deserialize<'d>,
|
||||
{
|
||||
fn registry(&self) -> VeilidComponentRegistry {
|
||||
self.registry.clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl<D> fmt::Debug for RecordStore<D>
|
||||
where
|
||||
D: fmt::Debug + Clone + Serialize + for<'d> Deserialize<'d>,
|
||||
@ -144,6 +156,7 @@ where
|
||||
let subkey_table = table_store.open(&format!("{}_subkeys", name), 1).await?;
|
||||
|
||||
let mut out = Self {
|
||||
registry: table_store.registry(),
|
||||
name: name.to_owned(),
|
||||
limits,
|
||||
record_table,
|
||||
@ -152,11 +165,13 @@ where
|
||||
subkey_cache: LruCache::new(subkey_cache_size),
|
||||
inspect_cache: InspectCache::new(subkey_cache_size),
|
||||
subkey_cache_total_size: LimitedSize::new(
|
||||
table_store.registry(),
|
||||
"subkey_cache_total_size",
|
||||
0,
|
||||
limit_subkey_cache_total_size,
|
||||
),
|
||||
total_storage_space: LimitedSize::new(
|
||||
table_store.registry(),
|
||||
"total_storage_space",
|
||||
0,
|
||||
limit_max_storage_space,
|
||||
@ -216,7 +231,7 @@ where
|
||||
});
|
||||
}) {
|
||||
// This shouldn't happen, but deduplicate anyway
|
||||
log_stor!(warn "duplicate record in table: {:?}", ri.0);
|
||||
veilid_log!(self warn "duplicate record in table: {:?}", ri.0);
|
||||
dead_records.push(DeadRecord {
|
||||
key: ri.0,
|
||||
record: v,
|
||||
@ -267,7 +282,7 @@ where
|
||||
} else {
|
||||
self.subkey_cache_total_size.rollback();
|
||||
|
||||
log_stor!(error "subkey cache should not be empty, has {} bytes unaccounted for",self.subkey_cache_total_size.get());
|
||||
veilid_log!(self error "subkey cache should not be empty, has {} bytes unaccounted for",self.subkey_cache_total_size.get());
|
||||
|
||||
self.subkey_cache_total_size.set(0);
|
||||
self.subkey_cache_total_size.commit().unwrap();
|
||||
@ -312,22 +327,22 @@ where
|
||||
for dr in dead_records {
|
||||
// Record should already be gone from index
|
||||
if self.record_index.contains_key(&dr.key) {
|
||||
log_stor!(error "dead record found in index: {:?}", dr.key);
|
||||
veilid_log!(self error "dead record found in index: {:?}", dr.key);
|
||||
}
|
||||
|
||||
// Record should have no watches now
|
||||
if self.watched_records.contains_key(&dr.key) {
|
||||
log_stor!(error "dead record found in watches: {:?}", dr.key);
|
||||
veilid_log!(self error "dead record found in watches: {:?}", dr.key);
|
||||
}
|
||||
|
||||
// Record should have no watch changes now
|
||||
if self.changed_watched_values.contains(&dr.key) {
|
||||
log_stor!(error "dead record found in watch changes: {:?}", dr.key);
|
||||
veilid_log!(self error "dead record found in watch changes: {:?}", dr.key);
|
||||
}
|
||||
|
||||
// Delete record
|
||||
if let Err(e) = rt_xact.delete(0, &dr.key.bytes()) {
|
||||
log_stor!(error "record could not be deleted: {}", e);
|
||||
veilid_log!(self error "record could not be deleted: {}", e);
|
||||
}
|
||||
|
||||
// Delete subkeys
|
||||
@ -340,7 +355,7 @@ where
|
||||
};
|
||||
let stkb = stk.bytes();
|
||||
if let Err(e) = st_xact.delete(0, &stkb) {
|
||||
log_stor!(error "subkey could not be deleted: {}", e);
|
||||
veilid_log!(self error "subkey could not be deleted: {}", e);
|
||||
}
|
||||
|
||||
// From cache
|
||||
@ -356,10 +371,10 @@ where
|
||||
}
|
||||
}
|
||||
if let Err(e) = rt_xact.commit().await {
|
||||
log_stor!(error "failed to commit record table transaction: {}", e);
|
||||
veilid_log!(self error "failed to commit record table transaction: {}", e);
|
||||
}
|
||||
if let Err(e) = st_xact.commit().await {
|
||||
log_stor!(error "failed to commit subkey table transaction: {}", e);
|
||||
veilid_log!(self error "failed to commit subkey table transaction: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -375,12 +390,12 @@ where
|
||||
// Get the changed record and save it to the table
|
||||
if let Some(r) = self.record_index.peek(&rtk) {
|
||||
if let Err(e) = rt_xact.store_json(0, &rtk.bytes(), r) {
|
||||
log_stor!(error "failed to save record: {}", e);
|
||||
veilid_log!(self error "failed to save record: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Err(e) = rt_xact.commit().await {
|
||||
log_stor!(error "failed to commit record table transaction: {}", e);
|
||||
veilid_log!(self error "failed to commit record table transaction: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -422,7 +437,7 @@ where
|
||||
dead_records.push((k, v));
|
||||
}) {
|
||||
// Shouldn't happen but log it
|
||||
log_stor!(warn "new duplicate record in table: {:?}", rtk);
|
||||
veilid_log!(self warn "new duplicate record in table: {:?}", rtk);
|
||||
self.add_dead_record(rtk, v);
|
||||
}
|
||||
for dr in dead_records {
|
||||
@ -1206,22 +1221,22 @@ where
|
||||
for evci in evcis {
|
||||
// Get the first subkey data
|
||||
let Some(first_subkey) = evci.subkeys.first() else {
|
||||
log_stor!(error "first subkey should exist for value change notification");
|
||||
veilid_log!(self error "first subkey should exist for value change notification");
|
||||
continue;
|
||||
};
|
||||
let get_result = match self.get_subkey(evci.key, first_subkey, false).await {
|
||||
Ok(Some(skr)) => skr,
|
||||
Ok(None) => {
|
||||
log_stor!(error "subkey should have data for value change notification");
|
||||
veilid_log!(self error "subkey should have data for value change notification");
|
||||
continue;
|
||||
}
|
||||
Err(e) => {
|
||||
log_stor!(error "error getting subkey data for value change notification: {}", e);
|
||||
veilid_log!(self error "error getting subkey data for value change notification: {}", e);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let Some(value) = get_result.opt_value else {
|
||||
log_stor!(error "first subkey should have had value for value change notification");
|
||||
veilid_log!(self error "first subkey should have had value for value change notification");
|
||||
continue;
|
||||
};
|
||||
|
||||
|
@ -1,5 +1,7 @@
|
||||
use super::*;
|
||||
|
||||
impl_veilid_log_facility!("stor");
|
||||
|
||||
/// The context of the outbound_set_value operation
|
||||
struct OutboundSetValueContext {
|
||||
/// The latest value of the subkey, may be the value passed in
|
||||
@ -116,7 +118,7 @@ impl StorageManager {
|
||||
ctx.missed_since_last_set += 1;
|
||||
|
||||
// Return peers if we have some
|
||||
log_network_result!(debug "SetValue missed: {}, fanout call returned peers {}", ctx.missed_since_last_set, sva.answer.peers.len());
|
||||
veilid_log!(registry debug target:"network_result", "SetValue missed: {}, fanout call returned peers {}", ctx.missed_since_last_set, sva.answer.peers.len());
|
||||
return Ok(NetworkResult::value(FanoutCallOutput{peer_info_list:sva.answer.peers}));
|
||||
}
|
||||
|
||||
@ -131,12 +133,12 @@ impl StorageManager {
|
||||
}
|
||||
|
||||
// Return peers if we have some
|
||||
log_network_result!(debug "SetValue returned no value, fanout call returned peers {}", sva.answer.peers.len());
|
||||
veilid_log!(registry debug target:"network_result", "SetValue returned no value, fanout call returned peers {}", sva.answer.peers.len());
|
||||
return Ok(NetworkResult::value(FanoutCallOutput{peer_info_list:sva.answer.peers}));
|
||||
};
|
||||
|
||||
// Keep the value if we got one and it is newer and it passes schema validation
|
||||
log_dht!(debug "SetValue got value back: len={} seq={}", value.value_data().data().len(), value.value_data().seq());
|
||||
veilid_log!(registry debug "SetValue got value back: len={} seq={}", value.value_data().data().len(), value.value_data().seq());
|
||||
|
||||
// Validate with schema
|
||||
if !ctx.schema.check_subkey_value_data(
|
||||
@ -193,6 +195,7 @@ impl StorageManager {
|
||||
let check_done = {
|
||||
let context = context.clone();
|
||||
let out_tx = out_tx.clone();
|
||||
let registry = self.registry();
|
||||
move |_closest_nodes: &[NodeRef]| {
|
||||
let mut ctx = context.lock();
|
||||
|
||||
@ -209,10 +212,10 @@ impl StorageManager {
|
||||
fanout_result,
|
||||
signed_value_data: ctx.value.clone(),
|
||||
};
|
||||
log_dht!(debug "Sending partial SetValue result: {:?}", out);
|
||||
veilid_log!(registry debug "Sending partial SetValue result: {:?}", out);
|
||||
|
||||
if let Err(e) = out_tx.send(Ok(out)) {
|
||||
log_dht!(debug "Sending partial SetValue result failed: {}", e);
|
||||
veilid_log!(registry debug "Sending partial SetValue result failed: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -261,9 +264,9 @@ impl StorageManager {
|
||||
// Failed
|
||||
TimeoutOr::Value(Err(e)) => {
|
||||
// If we finished with an error, return that
|
||||
log_dht!(debug "SetValue fanout error: {}", e);
|
||||
veilid_log!(registry debug "SetValue fanout error: {}", e);
|
||||
if let Err(e) = out_tx.send(Err(e.into())) {
|
||||
log_dht!(debug "Sending SetValue fanout error failed: {}", e);
|
||||
veilid_log!(registry debug "Sending SetValue fanout error failed: {}", e);
|
||||
}
|
||||
return;
|
||||
}
|
||||
@ -274,13 +277,13 @@ impl StorageManager {
|
||||
kind,
|
||||
value_nodes: ctx.value_nodes.clone(),
|
||||
};
|
||||
log_dht!(debug "SetValue Fanout: {:?}", fanout_result);
|
||||
veilid_log!(registry debug "SetValue Fanout: {:?}", fanout_result);
|
||||
|
||||
if let Err(e) = out_tx.send(Ok(OutboundSetValueResult {
|
||||
fanout_result,
|
||||
signed_value_data: ctx.value.clone(),
|
||||
})) {
|
||||
log_dht!(debug "Sending SetValue result failed: {}", e);
|
||||
veilid_log!(registry debug "Sending SetValue result failed: {}", e);
|
||||
}
|
||||
}
|
||||
.instrument(tracing::trace_span!("outbound_set_value fanout routine")),
|
||||
@ -314,7 +317,7 @@ impl StorageManager {
|
||||
let result = match result {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
log_rtab!(debug "Deferred fanout error: {}", e);
|
||||
veilid_log!(registry debug "Deferred fanout error: {}", e);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
@ -326,7 +329,7 @@ impl StorageManager {
|
||||
return is_partial;
|
||||
}
|
||||
Err(e) => {
|
||||
log_rtab!(debug "Deferred fanout error: {}", e);
|
||||
veilid_log!(registry debug "Deferred fanout error: {}", e);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
@ -9,7 +9,7 @@ use super::*;
|
||||
impl StorageManager {
|
||||
pub(super) fn setup_tasks(&self) {
|
||||
// Set flush records tick task
|
||||
log_stor!(debug "starting flush record stores task");
|
||||
veilid_log!(self debug "starting flush record stores task");
|
||||
impl_setup_task!(
|
||||
self,
|
||||
Self,
|
||||
@ -18,7 +18,7 @@ impl StorageManager {
|
||||
);
|
||||
|
||||
// Set offline subkey writes tick task
|
||||
log_stor!(debug "starting offline subkey writes task");
|
||||
veilid_log!(self debug "starting offline subkey writes task");
|
||||
impl_setup_task!(
|
||||
self,
|
||||
Self,
|
||||
@ -27,7 +27,7 @@ impl StorageManager {
|
||||
);
|
||||
|
||||
// Set send value changes tick task
|
||||
log_stor!(debug "starting send value changes task");
|
||||
veilid_log!(self debug "starting send value changes task");
|
||||
impl_setup_task!(
|
||||
self,
|
||||
Self,
|
||||
@ -36,7 +36,7 @@ impl StorageManager {
|
||||
);
|
||||
|
||||
// Set check active watches tick task
|
||||
log_stor!(debug "starting check active watches task");
|
||||
veilid_log!(self debug "starting check active watches task");
|
||||
impl_setup_task!(
|
||||
self,
|
||||
Self,
|
||||
@ -45,7 +45,7 @@ impl StorageManager {
|
||||
);
|
||||
|
||||
// Set check watched records tick task
|
||||
log_stor!(debug "starting checked watched records task");
|
||||
veilid_log!(self debug "starting checked watched records task");
|
||||
impl_setup_task!(
|
||||
self,
|
||||
Self,
|
||||
@ -80,25 +80,25 @@ impl StorageManager {
|
||||
|
||||
#[instrument(level = "trace", target = "stor", skip_all)]
|
||||
pub(super) async fn cancel_tasks(&self) {
|
||||
log_stor!(debug "stopping check watched records task");
|
||||
veilid_log!(self debug "stopping check watched records task");
|
||||
if let Err(e) = self.check_watched_records_task.stop().await {
|
||||
warn!("check_watched_records_task not stopped: {}", e);
|
||||
veilid_log!(self warn "check_watched_records_task not stopped: {}", e);
|
||||
}
|
||||
log_stor!(debug "stopping check active watches task");
|
||||
veilid_log!(self debug "stopping check active watches task");
|
||||
if let Err(e) = self.check_active_watches_task.stop().await {
|
||||
warn!("check_active_watches_task not stopped: {}", e);
|
||||
veilid_log!(self warn "check_active_watches_task not stopped: {}", e);
|
||||
}
|
||||
log_stor!(debug "stopping send value changes task");
|
||||
veilid_log!(self debug "stopping send value changes task");
|
||||
if let Err(e) = self.send_value_changes_task.stop().await {
|
||||
warn!("send_value_changes_task not stopped: {}", e);
|
||||
veilid_log!(self warn "send_value_changes_task not stopped: {}", e);
|
||||
}
|
||||
log_stor!(debug "stopping flush record stores task");
|
||||
veilid_log!(self debug "stopping flush record stores task");
|
||||
if let Err(e) = self.flush_record_stores_task.stop().await {
|
||||
warn!("flush_record_stores_task not stopped: {}", e);
|
||||
veilid_log!(self warn "flush_record_stores_task not stopped: {}", e);
|
||||
}
|
||||
log_stor!(debug "stopping offline subkey writes task");
|
||||
veilid_log!(self debug "stopping offline subkey writes task");
|
||||
if let Err(e) = self.offline_subkey_writes_task.stop().await {
|
||||
warn!("offline_subkey_writes_task not stopped: {}", e);
|
||||
veilid_log!(self warn "offline_subkey_writes_task not stopped: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2,6 +2,8 @@ use super::*;
|
||||
use futures_util::*;
|
||||
use stop_token::future::FutureExt as _;
|
||||
|
||||
impl_veilid_log_facility!("stor");
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct OfflineSubkeyWrite {
|
||||
pub safety_selection: SafetySelection,
|
||||
@ -50,20 +52,20 @@ impl StorageManager {
|
||||
Self::handle_get_local_value_inner(&mut inner, key, subkey, true).await
|
||||
};
|
||||
let Ok(get_result) = get_result else {
|
||||
log_stor!(debug "Offline subkey write had no subkey result: {}:{}", key, subkey);
|
||||
veilid_log!(self debug "Offline subkey write had no subkey result: {}:{}", key, subkey);
|
||||
// drop this one
|
||||
return Ok(OfflineSubkeyWriteResult::Dropped);
|
||||
};
|
||||
let Some(value) = get_result.opt_value else {
|
||||
log_stor!(debug "Offline subkey write had no subkey value: {}:{}", key, subkey);
|
||||
veilid_log!(self debug "Offline subkey write had no subkey value: {}:{}", key, subkey);
|
||||
// drop this one
|
||||
return Ok(OfflineSubkeyWriteResult::Dropped);
|
||||
};
|
||||
let Some(descriptor) = get_result.opt_descriptor else {
|
||||
log_stor!(debug "Offline subkey write had no descriptor: {}:{}", key, subkey);
|
||||
veilid_log!(self debug "Offline subkey write had no descriptor: {}:{}", key, subkey);
|
||||
return Ok(OfflineSubkeyWriteResult::Dropped);
|
||||
};
|
||||
log_stor!(debug "Offline subkey write: {}:{} len={}", key, subkey, value.value_data().data().len());
|
||||
veilid_log!(self debug "Offline subkey write: {}:{} len={}", key, subkey, value.value_data().data().len());
|
||||
let osvres = self
|
||||
.outbound_set_value(key, subkey, safety_selection, value.clone(), descriptor)
|
||||
.await;
|
||||
@ -96,16 +98,16 @@ impl StorageManager {
|
||||
return Ok(OfflineSubkeyWriteResult::Finished(result));
|
||||
}
|
||||
Err(e) => {
|
||||
log_stor!(debug "failed to get offline subkey write result: {}:{} {}", key, subkey, e);
|
||||
veilid_log!(self debug "failed to get offline subkey write result: {}:{} {}", key, subkey, e);
|
||||
return Ok(OfflineSubkeyWriteResult::Cancelled);
|
||||
}
|
||||
}
|
||||
}
|
||||
log_stor!(debug "writing offline subkey did not complete {}:{}", key, subkey);
|
||||
veilid_log!(self debug "writing offline subkey did not complete {}:{}", key, subkey);
|
||||
return Ok(OfflineSubkeyWriteResult::Cancelled);
|
||||
}
|
||||
Err(e) => {
|
||||
log_stor!(debug "failed to write offline subkey: {}:{} {}", key, subkey, e);
|
||||
veilid_log!(self debug "failed to write offline subkey: {}:{} {}", key, subkey, e);
|
||||
return Ok(OfflineSubkeyWriteResult::Cancelled);
|
||||
}
|
||||
}
|
||||
@ -187,7 +189,7 @@ impl StorageManager {
|
||||
let mut inner = self.inner.lock().await;
|
||||
|
||||
// Debug print the result
|
||||
log_stor!(debug "Offline write result: {:?}", result);
|
||||
veilid_log!(self debug "Offline write result: {:?}", result);
|
||||
|
||||
// Get the offline subkey write record
|
||||
match inner.offline_subkey_writes.entry(result.key) {
|
||||
@ -206,12 +208,12 @@ impl StorageManager {
|
||||
osw.subkeys.is_empty()
|
||||
};
|
||||
if finished {
|
||||
log_stor!(debug "Offline write finished key {}", result.key);
|
||||
veilid_log!(self debug "Offline write finished key {}", result.key);
|
||||
o.remove();
|
||||
}
|
||||
}
|
||||
std::collections::hash_map::Entry::Vacant(_) => {
|
||||
warn!("offline write work items should always be on offline_subkey_writes entries that exist: ignoring key {}", result.key)
|
||||
veilid_log!(self warn "offline write work items should always be on offline_subkey_writes entries that exist: ignoring key {}", result.key);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2,6 +2,8 @@ use super::*;
|
||||
use futures_util::StreamExt;
|
||||
use stop_token::future::FutureExt;
|
||||
|
||||
impl_veilid_log_facility!("stor");
|
||||
|
||||
impl StorageManager {
|
||||
// Send value change notifications across the network
|
||||
#[instrument(level = "trace", target = "stor", skip_all, err)]
|
||||
@ -34,7 +36,7 @@ impl StorageManager {
|
||||
unord.push(
|
||||
async move {
|
||||
if let Err(e) = self.send_value_change(vc).await {
|
||||
log_stor!(debug "Failed to send value change: {}", e);
|
||||
veilid_log!(self debug "Failed to send value change: {}", e);
|
||||
}
|
||||
}
|
||||
.in_current_span(),
|
||||
|
@ -1,5 +1,7 @@
|
||||
use super::*;
|
||||
|
||||
impl_veilid_log_facility!("stor");
|
||||
|
||||
/// The context of the outbound_watch_value operation
|
||||
struct OutboundWatchValueContext {
|
||||
/// A successful watch
|
||||
@ -55,7 +57,7 @@ impl StorageManager {
|
||||
)?;
|
||||
|
||||
if wva.answer.accepted {
|
||||
log_dht!(debug "WatchValue canceled: id={} expiration_ts={} ({})", wva.answer.watch_id, display_ts(wva.answer.expiration_ts.as_u64()), watch_node);
|
||||
veilid_log!(self debug "WatchValue canceled: id={} expiration_ts={} ({})", wva.answer.watch_id, display_ts(wva.answer.expiration_ts.as_u64()), watch_node);
|
||||
Ok(Some(OutboundWatchValueResult {
|
||||
expiration_ts: wva.answer.expiration_ts,
|
||||
watch_id: wva.answer.watch_id,
|
||||
@ -63,7 +65,7 @@ impl StorageManager {
|
||||
opt_value_changed_route: wva.reply_private_route,
|
||||
}))
|
||||
} else {
|
||||
log_dht!(debug "WatchValue not canceled: id={} ({})", watch_id, watch_node);
|
||||
veilid_log!(self debug "WatchValue not canceled: id={} ({})", watch_id, watch_node);
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
@ -113,9 +115,9 @@ impl StorageManager {
|
||||
|
||||
if wva.answer.accepted {
|
||||
if watch_id != wva.answer.watch_id {
|
||||
log_dht!(debug "WatchValue changed: id={}->{} expiration_ts={} ({})", watch_id, wva.answer.watch_id, display_ts(wva.answer.expiration_ts.as_u64()), watch_node);
|
||||
veilid_log!(self debug "WatchValue changed: id={}->{} expiration_ts={} ({})", watch_id, wva.answer.watch_id, display_ts(wva.answer.expiration_ts.as_u64()), watch_node);
|
||||
} else {
|
||||
log_dht!(debug "WatchValue renewed: id={} expiration_ts={} ({})", watch_id, display_ts(wva.answer.expiration_ts.as_u64()), watch_node);
|
||||
veilid_log!(self debug "WatchValue renewed: id={} expiration_ts={} ({})", watch_id, display_ts(wva.answer.expiration_ts.as_u64()), watch_node);
|
||||
}
|
||||
|
||||
Ok(Some(OutboundWatchValueResult {
|
||||
@ -125,7 +127,7 @@ impl StorageManager {
|
||||
opt_value_changed_route: wva.reply_private_route,
|
||||
}))
|
||||
} else {
|
||||
log_dht!(debug "WatchValue change failed: id={} ({})", wva.answer.watch_id, watch_node);
|
||||
veilid_log!(self debug "WatchValue change failed: id={} ({})", wva.answer.watch_id, watch_node);
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
@ -259,7 +261,7 @@ impl StorageManager {
|
||||
let mut done = false;
|
||||
if wva.answer.expiration_ts.as_u64() > 0 {
|
||||
// If the expiration time is greater than zero this watch is active
|
||||
log_dht!(debug "Watch created: id={} expiration_ts={} ({})", wva.answer.watch_id, display_ts(wva.answer.expiration_ts.as_u64()), next_node);
|
||||
veilid_log!(registry debug "Watch created: id={} expiration_ts={} ({})", wva.answer.watch_id, display_ts(wva.answer.expiration_ts.as_u64()), next_node);
|
||||
done = true;
|
||||
} else {
|
||||
// If the returned expiration time is zero, this watch was cancelled or rejected
|
||||
@ -277,7 +279,7 @@ impl StorageManager {
|
||||
}
|
||||
|
||||
// Return peers if we have some
|
||||
log_network_result!(debug "WatchValue fanout call returned peers {} ({})", wva.answer.peers.len(), next_node);
|
||||
veilid_log!(registry debug target:"network_result", "WatchValue fanout call returned peers {} ({})", wva.answer.peers.len(), next_node);
|
||||
|
||||
Ok(NetworkResult::value(FanoutCallOutput{peer_info_list: wva.answer.peers}))
|
||||
}.instrument(tracing::trace_span!("outbound_watch_value call routine"))
|
||||
@ -316,9 +318,9 @@ impl StorageManager {
|
||||
// Return the best answer we've got
|
||||
let ctx = context.lock();
|
||||
if ctx.opt_watch_value_result.is_some() {
|
||||
log_dht!(debug "WatchValue Fanout Timeout Success");
|
||||
veilid_log!(self debug "WatchValue Fanout Timeout Success");
|
||||
} else {
|
||||
log_dht!(debug "WatchValue Fanout Timeout Failure");
|
||||
veilid_log!(self debug "WatchValue Fanout Timeout Failure");
|
||||
}
|
||||
Ok(ctx.opt_watch_value_result.clone())
|
||||
}
|
||||
@ -327,9 +329,9 @@ impl StorageManager {
|
||||
// Return the best answer we've got
|
||||
let ctx = context.lock();
|
||||
if ctx.opt_watch_value_result.is_some() {
|
||||
log_dht!(debug "WatchValue Fanout Success");
|
||||
veilid_log!(self debug "WatchValue Fanout Success");
|
||||
} else {
|
||||
log_dht!(debug "WatchValue Fanout Failure");
|
||||
veilid_log!(self debug "WatchValue Fanout Failure");
|
||||
}
|
||||
Ok(ctx.opt_watch_value_result.clone())
|
||||
}
|
||||
@ -338,16 +340,16 @@ impl StorageManager {
|
||||
// Return the best answer we've got
|
||||
let ctx = context.lock();
|
||||
if ctx.opt_watch_value_result.is_some() {
|
||||
log_dht!(debug "WatchValue Fanout Exhausted Success");
|
||||
veilid_log!(self debug "WatchValue Fanout Exhausted Success");
|
||||
} else {
|
||||
log_dht!(debug "WatchValue Fanout Exhausted Failure");
|
||||
veilid_log!(self debug "WatchValue Fanout Exhausted Failure");
|
||||
}
|
||||
Ok(ctx.opt_watch_value_result.clone())
|
||||
}
|
||||
// Failed
|
||||
TimeoutOr::Value(Err(e)) => {
|
||||
// If we finished with an error, return that
|
||||
log_dht!(debug "WatchValue Fanout Error: {}", e);
|
||||
veilid_log!(self debug "WatchValue Fanout Error: {}", e);
|
||||
Err(e.into())
|
||||
}
|
||||
}
|
||||
@ -436,16 +438,16 @@ impl StorageManager {
|
||||
|
||||
if count > active_watch.count {
|
||||
// If count is greater than our requested count then this is invalid, cancel the watch
|
||||
log_stor!(debug "watch count went backward: {}: {}/{}", key, count, active_watch.count);
|
||||
veilid_log!(self debug "watch count went backward: {}: {}/{}", key, count, active_watch.count);
|
||||
// Force count to zero
|
||||
count = 0;
|
||||
opened_record.clear_active_watch();
|
||||
} else if count == 0 {
|
||||
// If count is zero, we're done, cancel the watch and the app can renew it if it wants
|
||||
log_stor!(debug "watch count finished: {}", key);
|
||||
veilid_log!(self debug "watch count finished: {}", key);
|
||||
opened_record.clear_active_watch();
|
||||
} else {
|
||||
log_stor!(debug
|
||||
veilid_log!(self debug
|
||||
"watch count decremented: {}: {}/{}",
|
||||
key,
|
||||
count,
|
||||
|
@ -16,6 +16,8 @@ use native::*;
|
||||
|
||||
use keyvaluedb::*;
|
||||
|
||||
impl_veilid_log_facility!("tstore");
|
||||
|
||||
const ALL_TABLE_NAMES: &[u8] = b"all_table_names";
|
||||
|
||||
/// Description of column
|
||||
@ -116,7 +118,7 @@ impl TableStore {
|
||||
}
|
||||
pub(crate) fn new(registry: VeilidComponentRegistry) -> Self {
|
||||
let inner = Self::new_inner();
|
||||
let table_store_driver = TableStoreDriver::new(registry.config());
|
||||
let table_store_driver = TableStoreDriver::new(registry.clone());
|
||||
|
||||
Self {
|
||||
registry,
|
||||
@ -318,7 +320,7 @@ impl TableStore {
|
||||
) -> EyreResult<Vec<u8>> {
|
||||
// Check if we are to protect the key
|
||||
if device_encryption_key_password.is_empty() {
|
||||
log_tstore!(debug "no dek password");
|
||||
veilid_log!(self debug "no dek password");
|
||||
// Return the unprotected key bytes
|
||||
let mut out = Vec::with_capacity(4 + SHARED_SECRET_LENGTH);
|
||||
out.extend_from_slice(&dek.kind.0);
|
||||
@ -357,7 +359,7 @@ impl TableStore {
|
||||
.load_user_secret("device_encryption_key")
|
||||
.await?;
|
||||
let Some(dek_bytes) = dek_bytes else {
|
||||
log_tstore!(debug "no device encryption key");
|
||||
veilid_log!(self debug "no device encryption key");
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
@ -383,7 +385,7 @@ impl TableStore {
|
||||
.protected_store()
|
||||
.remove_user_secret("device_encryption_key")
|
||||
.await?;
|
||||
log_tstore!(debug "removed device encryption key. existed: {}", existed);
|
||||
veilid_log!(self debug "removed device encryption key. existed: {}", existed);
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
@ -395,7 +397,7 @@ impl TableStore {
|
||||
let device_encryption_key_password =
|
||||
if let Some(new_device_encryption_key_password) = new_device_encryption_key_password {
|
||||
// Change password
|
||||
log_tstore!(debug "changing dek password");
|
||||
veilid_log!(self debug "changing dek password");
|
||||
self.config()
|
||||
.try_with_mut(|c| {
|
||||
c.protected_store
|
||||
@ -406,7 +408,7 @@ impl TableStore {
|
||||
.unwrap()
|
||||
} else {
|
||||
// Get device encryption key protection password if we have it
|
||||
log_tstore!(debug "saving with existing dek password");
|
||||
veilid_log!(self debug "saving with existing dek password");
|
||||
self.config()
|
||||
.with(|c| c.protected_store.device_encryption_key_password.clone())
|
||||
};
|
||||
@ -423,7 +425,7 @@ impl TableStore {
|
||||
.protected_store()
|
||||
.save_user_secret("device_encryption_key", &dek_bytes)
|
||||
.await?;
|
||||
log_tstore!(debug "saving device encryption key. existed: {}", existed);
|
||||
veilid_log!(self debug "saving device encryption key. existed: {}", existed);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -482,7 +484,7 @@ impl TableStore {
|
||||
},
|
||||
Ok(None) => {
|
||||
// No table names yet, that's okay
|
||||
log_tstore!("__veilid_all_tables is empty");
|
||||
veilid_log!(self trace "__veilid_all_tables is empty");
|
||||
}
|
||||
Err(e) => {
|
||||
error!("could not get __veilid_all_tables: {}", e);
|
||||
@ -537,7 +539,7 @@ impl TableStore {
|
||||
|
||||
#[instrument(level = "trace", target = "tstore", skip_all)]
|
||||
pub(crate) fn on_table_db_drop(&self, table: String) {
|
||||
log_rtab!("dropping table db: {}", table);
|
||||
veilid_log!(self trace "dropping table db: {}", table);
|
||||
let mut inner = self.inner.lock();
|
||||
if inner.opened.remove(&table).is_none() {
|
||||
unreachable!("should have removed an item");
|
||||
@ -667,7 +669,7 @@ impl TableStore {
|
||||
let deleted = self.table_store_driver.delete(&table_name).await?;
|
||||
if !deleted {
|
||||
// Table missing? Just remove name
|
||||
warn!(
|
||||
veilid_log!(self warn
|
||||
"table existed in name table but not in storage: {} : {}",
|
||||
name, table_name
|
||||
);
|
||||
@ -746,7 +748,7 @@ impl TableStore {
|
||||
apibail_not_initialized!();
|
||||
}
|
||||
}
|
||||
log_tstore!(debug "TableStore::rename {} -> {}", old_name, new_name);
|
||||
veilid_log!(self debug "TableStore::rename {} -> {}", old_name, new_name);
|
||||
self.name_rename(old_name, new_name).await?;
|
||||
self.flush().await;
|
||||
Ok(())
|
||||
|
@ -4,20 +4,22 @@ use std::path::PathBuf;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(in crate::table_store) struct TableStoreDriver {
|
||||
config: VeilidConfig,
|
||||
registry: VeilidComponentRegistry,
|
||||
}
|
||||
|
||||
impl_veilid_component_registry_accessor!(TableStoreDriver);
|
||||
|
||||
impl TableStoreDriver {
|
||||
pub fn new(config: VeilidConfig) -> Self {
|
||||
Self { config }
|
||||
pub fn new(registry: VeilidComponentRegistry) -> Self {
|
||||
Self { registry }
|
||||
}
|
||||
|
||||
fn get_dbpath(&self, table: &str) -> VeilidAPIResult<PathBuf> {
|
||||
let c = self.config.get();
|
||||
let config = self.registry().config();
|
||||
let c = config.get();
|
||||
let tablestoredir = c.table_store.directory.clone();
|
||||
std::fs::create_dir_all(&tablestoredir).map_err(VeilidAPIError::from)?;
|
||||
|
||||
let c = self.config.get();
|
||||
let namespace = c.namespace.clone();
|
||||
let dbpath: PathBuf = if namespace.is_empty() {
|
||||
[tablestoredir, String::from(table)].iter().collect()
|
||||
@ -41,7 +43,7 @@ impl TableStoreDriver {
|
||||
// Ensure permissions are correct
|
||||
ensure_file_private_owner(&dbpath).map_err(VeilidAPIError::internal)?;
|
||||
|
||||
log_tstore!(
|
||||
veilid_log!(self trace
|
||||
"opened table store '{}' at path '{:?}' with {} columns",
|
||||
table_name,
|
||||
dbpath,
|
||||
@ -53,9 +55,13 @@ impl TableStoreDriver {
|
||||
pub async fn delete(&self, table_name: &str) -> VeilidAPIResult<bool> {
|
||||
let dbpath = self.get_dbpath(table_name)?;
|
||||
if !dbpath.exists() {
|
||||
veilid_log!(self debug "TableStore::delete '{}' at path '{:?}' not deleted", table_name, dbpath);
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
veilid_log!(self trace "TableStore::delete '{}' at path '{:?}' deleted", table_name, dbpath);
|
||||
std::fs::remove_file(dbpath).map_err(VeilidAPIError::from)?;
|
||||
|
||||
Ok(true)
|
||||
}
|
||||
}
|
||||
|
@ -10,6 +10,8 @@ cfg_if! {
|
||||
}
|
||||
}
|
||||
|
||||
impl_veilid_log_facility!("tstore");
|
||||
|
||||
struct CryptInfo {
|
||||
typed_key: TypedSharedSecret,
|
||||
}
|
||||
@ -27,7 +29,6 @@ pub struct TableDBUnlockedInner {
|
||||
encrypt_info: Option<CryptInfo>,
|
||||
decrypt_info: Option<CryptInfo>,
|
||||
}
|
||||
impl_veilid_component_registry_accessor!(TableDBUnlockedInner);
|
||||
|
||||
impl fmt::Debug for TableDBUnlockedInner {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
@ -37,7 +38,7 @@ impl fmt::Debug for TableDBUnlockedInner {
|
||||
|
||||
impl Drop for TableDBUnlockedInner {
|
||||
fn drop(&mut self) {
|
||||
let table_store = self.table_store();
|
||||
let table_store = self.registry.table_store();
|
||||
table_store.on_table_db_drop(self.table.clone());
|
||||
}
|
||||
}
|
||||
@ -48,6 +49,11 @@ pub struct TableDB {
|
||||
unlocked_inner: Arc<TableDBUnlockedInner>,
|
||||
}
|
||||
|
||||
impl VeilidComponentRegistryAccessor for TableDB {
|
||||
fn registry(&self) -> VeilidComponentRegistry {
|
||||
self.unlocked_inner.registry.clone()
|
||||
}
|
||||
}
|
||||
impl TableDB {
|
||||
pub(super) fn new(
|
||||
table: String,
|
||||
@ -69,8 +75,8 @@ impl TableDB {
|
||||
opened_column_count
|
||||
},
|
||||
unlocked_inner: Arc::new(TableDBUnlockedInner {
|
||||
table,
|
||||
registry,
|
||||
table,
|
||||
database,
|
||||
encrypt_info,
|
||||
decrypt_info,
|
||||
@ -100,10 +106,6 @@ impl TableDB {
|
||||
Arc::downgrade(&self.unlocked_inner)
|
||||
}
|
||||
|
||||
pub(super) fn crypto(&self) -> VeilidComponentGuard<'_, Crypto> {
|
||||
self.unlocked_inner.crypto()
|
||||
}
|
||||
|
||||
/// Get the internal name of the table
|
||||
pub fn table_name(&self) -> String {
|
||||
self.unlocked_inner.table.clone()
|
||||
@ -326,6 +328,7 @@ impl TableDB {
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
struct TableDBTransactionInner {
|
||||
registry: VeilidComponentRegistry,
|
||||
dbt: Option<DBTransaction>,
|
||||
}
|
||||
|
||||
@ -342,6 +345,15 @@ impl fmt::Debug for TableDBTransactionInner {
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for TableDBTransactionInner {
|
||||
fn drop(&mut self) {
|
||||
if self.dbt.is_some() {
|
||||
let registry = &self.registry;
|
||||
veilid_log!(registry warn "Dropped transaction without commit or rollback");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A TableDB transaction
|
||||
/// Atomically commits a group of writes or deletes to the TableDB
|
||||
#[derive(Debug, Clone)]
|
||||
@ -352,9 +364,13 @@ pub struct TableDBTransaction {
|
||||
|
||||
impl TableDBTransaction {
|
||||
fn new(db: TableDB, dbt: DBTransaction) -> Self {
|
||||
let registry = db.registry();
|
||||
Self {
|
||||
db,
|
||||
inner: Arc::new(Mutex::new(TableDBTransactionInner { dbt: Some(dbt) })),
|
||||
inner: Arc::new(Mutex::new(TableDBTransactionInner {
|
||||
registry,
|
||||
dbt: Some(dbt),
|
||||
})),
|
||||
}
|
||||
}
|
||||
|
||||
@ -425,11 +441,3 @@ impl TableDBTransaction {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for TableDBTransactionInner {
|
||||
fn drop(&mut self) {
|
||||
if self.dbt.is_some() {
|
||||
warn!("Dropped transaction without commit or rollback");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -3,16 +3,19 @@ pub use keyvaluedb_web::*;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(in crate::table_store) struct TableStoreDriver {
|
||||
config: VeilidConfig,
|
||||
registry: VeilidComponentRegistry,
|
||||
}
|
||||
|
||||
impl_veilid_component_registry_accessor!(TableStoreDriver);
|
||||
|
||||
impl TableStoreDriver {
|
||||
pub fn new(config: VeilidConfig) -> Self {
|
||||
Self { config }
|
||||
pub fn new(registry: VeilidComponentRegistry) -> Self {
|
||||
Self { registry }
|
||||
}
|
||||
|
||||
fn get_namespaced_table_name(&self, table: &str) -> String {
|
||||
let c = self.config.get();
|
||||
let config = self.registry().config();
|
||||
let c = config.get();
|
||||
let namespace = c.namespace.clone();
|
||||
if namespace.is_empty() {
|
||||
table.to_owned()
|
||||
@ -26,7 +29,7 @@ impl TableStoreDriver {
|
||||
let db = Database::open(&namespaced_table_name, column_count, false)
|
||||
.await
|
||||
.map_err(VeilidAPIError::generic)?;
|
||||
log_tstore!(
|
||||
veilid_log!(self trace
|
||||
"opened table store '{}' with {} columns",
|
||||
namespaced_table_name,
|
||||
column_count
|
||||
@ -40,9 +43,9 @@ impl TableStoreDriver {
|
||||
let namespaced_table_name = self.get_namespaced_table_name(table_name);
|
||||
let out = Database::delete(&namespaced_table_name).await.is_ok();
|
||||
if out {
|
||||
log_tstore!("TableStore::delete {} deleted", namespaced_table_name);
|
||||
veilid_log!(self trace "TableStore::delete {} deleted", namespaced_table_name);
|
||||
} else {
|
||||
log_tstore!(debug "TableStore::delete {} not deleted", namespaced_table_name);
|
||||
veilid_log!(self debug "TableStore::delete {} not deleted", namespaced_table_name);
|
||||
}
|
||||
Ok(out)
|
||||
} else {
|
||||
|
@ -20,7 +20,7 @@ pub extern "system" fn Java_com_veilid_veilid_1core_1android_1tests_MainActivity
|
||||
|
||||
pub fn veilid_core_setup_android_tests(env: JNIEnv, ctx: JObject) {
|
||||
// Set up subscriber and layers
|
||||
let filter = VeilidLayerFilter::new(VeilidConfigLogLevel::Info, &[]);
|
||||
let filter = VeilidLayerFilter::new(VeilidConfigLogLevel::Info, &[], None);
|
||||
let layer = paranoid_android::layer("veilid-core");
|
||||
tracing_subscriber::registry()
|
||||
.with(layer.with_filter(filter))
|
||||
|
@ -16,7 +16,7 @@ pub extern "C" fn run_veilid_core_tests() {
|
||||
|
||||
pub fn veilid_core_setup_ios_tests() {
|
||||
// Set up subscriber and layers
|
||||
let filter = VeilidLayerFilter::new(VeilidConfigLogLevel::Info, &[]);
|
||||
let filter = VeilidLayerFilter::new(VeilidConfigLogLevel::Info, &[], None);
|
||||
tracing_subscriber::registry()
|
||||
.with(OsLogger::new("com.veilid.veilidcore-tests", "").with_filter(filter))
|
||||
.init();
|
||||
|
@ -9,6 +9,8 @@ use once_cell::sync::Lazy;
|
||||
use routing_table::*;
|
||||
use std::fmt::Write;
|
||||
|
||||
impl_veilid_log_facility!("veilid_debug");
|
||||
|
||||
#[derive(Default)]
|
||||
pub(crate) struct DebugCache {
|
||||
pub imported_routes: Vec<RouteId>,
|
||||
@ -1280,7 +1282,7 @@ impl VeilidAPI {
|
||||
let blob_data = RouteSpecStore::private_routes_to_blob(&private_routes)
|
||||
.map_err(VeilidAPIError::internal)?;
|
||||
let out = BASE64URL_NOPAD.encode(&blob_data);
|
||||
info!(
|
||||
veilid_log!(registry info
|
||||
"Published route {} as {} bytes:\n{}",
|
||||
route_id.encode(),
|
||||
blob_data.len(),
|
||||
@ -2277,7 +2279,7 @@ TableDB Operations:
|
||||
let Some(private_route) = rss.best_remote_private_route(&prid) else {
|
||||
// Remove imported route
|
||||
dc.imported_routes.remove(n);
|
||||
info!("removed dead imported route {}", n);
|
||||
veilid_log!(registry info "removed dead imported route {}", n);
|
||||
return None;
|
||||
};
|
||||
Some(private_route)
|
||||
|
@ -37,10 +37,10 @@ packages:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: change_case
|
||||
sha256: "99cfdf2018c627c8a3af5a23ea4c414eb69c75c31322d23b9660ebc3cf30b514"
|
||||
sha256: e41ef3df58521194ef8d7649928954805aeb08061917cf658322305e61568003
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "2.1.0"
|
||||
version: "2.2.0"
|
||||
characters:
|
||||
dependency: transitive
|
||||
description:
|
||||
@ -53,10 +53,10 @@ packages:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: charcode
|
||||
sha256: fb98c0f6d12c920a02ee2d998da788bca066ca5f148492b7085ee23372b12306
|
||||
sha256: fb0f1107cac15a5ea6ef0a6ef71a807b9e4267c713bb93e00e92d737cc8dbd8a
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "1.3.1"
|
||||
version: "1.4.0"
|
||||
clock:
|
||||
dependency: transitive
|
||||
description:
|
||||
@ -93,10 +93,10 @@ packages:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: equatable
|
||||
sha256: c2b87cb7756efdf69892005af546c56c0b5037f54d2a88269b4f347a505e3ca2
|
||||
sha256: "567c64b3cb4cf82397aac55f4f0cbd3ca20d77c6c03bedbc4ceaddc08904aef7"
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "2.0.5"
|
||||
version: "2.0.7"
|
||||
fake_async:
|
||||
dependency: transitive
|
||||
description:
|
||||
@ -235,10 +235,10 @@ packages:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: macos_window_utils
|
||||
sha256: "230be594d26f6dee92c5a1544f4242d25138a5bfb9f185b27f14de3949ef0be8"
|
||||
sha256: "3534f2af024f2f24112ca28789a44e6750083f8c0065414546c6593ee48a5009"
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "1.5.0"
|
||||
version: "1.6.1"
|
||||
matcher:
|
||||
dependency: transitive
|
||||
description:
|
||||
@ -275,26 +275,26 @@ packages:
|
||||
dependency: "direct main"
|
||||
description:
|
||||
name: path_provider
|
||||
sha256: fec0d61223fba3154d87759e3cc27fe2c8dc498f6386c6d6fc80d1afdd1bf378
|
||||
sha256: "50c5dd5b6e1aaf6fb3a78b33f6aa3afca52bf903a8a5298f53101fdaee55bbcd"
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "2.1.4"
|
||||
version: "2.1.5"
|
||||
path_provider_android:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: path_provider_android
|
||||
sha256: c464428172cb986b758c6d1724c603097febb8fb855aa265aeecc9280c294d4a
|
||||
sha256: "4adf4fd5423ec60a29506c76581bc05854c55e3a0b72d35bb28d661c9686edf2"
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "2.2.12"
|
||||
version: "2.2.15"
|
||||
path_provider_foundation:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: path_provider_foundation
|
||||
sha256: f234384a3fdd67f989b4d54a5d73ca2a6c422fa55ae694381ae0f4375cd1ea16
|
||||
sha256: "4843174df4d288f5e29185bd6e72a6fbdf5a4a4602717eed565497429f179942"
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "2.4.0"
|
||||
version: "2.4.1"
|
||||
path_provider_linux:
|
||||
dependency: transitive
|
||||
description:
|
||||
|
@ -238,7 +238,7 @@ pub extern "C" fn initialize_veilid_core(platform_config: FfiStr) {
|
||||
cfg_if! {
|
||||
if #[cfg(target_os = "android")] {
|
||||
let filter =
|
||||
veilid_core::VeilidLayerFilter::new(platform_config.logging.terminal.level, &platform_config.logging.terminal.ignore_log_targets);
|
||||
veilid_core::VeilidLayerFilter::new(platform_config.logging.terminal.level, &platform_config.logging.terminal.ignore_log_targets, None);
|
||||
let layer = paranoid_android::layer("veilid-flutter")
|
||||
.with_ansi(false)
|
||||
.with_filter(filter.clone());
|
||||
@ -246,7 +246,7 @@ pub extern "C" fn initialize_veilid_core(platform_config: FfiStr) {
|
||||
layers.push(layer.boxed());
|
||||
} else if #[cfg(target_os = "ios")] {
|
||||
let filter =
|
||||
veilid_core::VeilidLayerFilter::new(platform_config.logging.terminal.level, &platform_config.logging.terminal.ignore_log_targets);
|
||||
veilid_core::VeilidLayerFilter::new(platform_config.logging.terminal.level, &platform_config.logging.terminal.ignore_log_targets, None);
|
||||
let layer = tracing_subscriber::fmt::Layer::new()
|
||||
.compact()
|
||||
.with_ansi(false)
|
||||
@ -256,7 +256,7 @@ pub extern "C" fn initialize_veilid_core(platform_config: FfiStr) {
|
||||
layers.push(layer.boxed());
|
||||
} else {
|
||||
let filter =
|
||||
veilid_core::VeilidLayerFilter::new(platform_config.logging.terminal.level, &platform_config.logging.terminal.ignore_log_targets);
|
||||
veilid_core::VeilidLayerFilter::new(platform_config.logging.terminal.level, &platform_config.logging.terminal.ignore_log_targets, None);
|
||||
let layer = tracing_subscriber::fmt::Layer::new()
|
||||
.compact()
|
||||
.with_writer(std::io::stdout)
|
||||
@ -309,6 +309,7 @@ pub extern "C" fn initialize_veilid_core(platform_config: FfiStr) {
|
||||
let filter = veilid_core::VeilidLayerFilter::new(
|
||||
platform_config.logging.otlp.level,
|
||||
&platform_config.logging.otlp.ignore_log_targets,
|
||||
None,
|
||||
);
|
||||
let layer = tracing_opentelemetry::layer()
|
||||
.with_tracer(tracer)
|
||||
@ -325,6 +326,7 @@ pub extern "C" fn initialize_veilid_core(platform_config: FfiStr) {
|
||||
.iter()
|
||||
.map(|&x| x.to_string())
|
||||
.collect::<Vec<_>>(),
|
||||
None,
|
||||
);
|
||||
let (flame_layer, guard) =
|
||||
FlameLayer::with_file(&platform_config.logging.flame.path).unwrap();
|
||||
@ -347,6 +349,7 @@ pub extern "C" fn initialize_veilid_core(platform_config: FfiStr) {
|
||||
let filter = veilid_core::VeilidLayerFilter::new(
|
||||
platform_config.logging.api.level,
|
||||
&platform_config.logging.api.ignore_log_targets,
|
||||
None,
|
||||
);
|
||||
let layer = veilid_core::ApiTracingLayer::init().with_filter(filter.clone());
|
||||
filters.insert("api", filter);
|
||||
|
@ -22,10 +22,6 @@ use crate::settings::*;
|
||||
use clap::{Args, Parser};
|
||||
use server::*;
|
||||
use settings::LogLevel;
|
||||
use std::collections::HashMap;
|
||||
use std::ffi::OsString;
|
||||
use std::path::Path;
|
||||
use std::str::FromStr;
|
||||
use tools::*;
|
||||
use veilid_core::{TypedKeyGroup, TypedSecretGroup};
|
||||
use veilid_logs::*;
|
||||
@ -85,11 +81,12 @@ pub struct CmdlineArgs {
|
||||
otlp: Option<String>,
|
||||
|
||||
/// Turn on flamegraph tracing (experimental)
|
||||
#[cfg(feature = "flame")]
|
||||
#[arg(long, hide = true, value_name = "PATH", num_args=0..=1, require_equals=true, default_missing_value = "")]
|
||||
flame: Option<OsString>,
|
||||
|
||||
/// Turn on perfetto tracing (experimental)
|
||||
#[cfg(unix)]
|
||||
#[cfg(all(unix, feature = "perfetto"))]
|
||||
#[arg(long, hide = true, value_name = "PATH", num_args=0..=1, require_equals=true, default_missing_value = "")]
|
||||
perfetto: Option<OsString>,
|
||||
|
||||
|
@ -31,6 +31,8 @@ lazy_static! {
|
||||
};
|
||||
}
|
||||
|
||||
pub const PROGRAM_NAME: &str = "veilid-server";
|
||||
|
||||
pub fn load_default_config() -> EyreResult<config::Config> {
|
||||
#[cfg(not(feature = "geolocation"))]
|
||||
let privacy_section = "";
|
||||
@ -1259,12 +1261,8 @@ impl Settings {
|
||||
let inner = inner.read();
|
||||
|
||||
let out: ConfigCallbackReturn = match key.as_str() {
|
||||
"program_name" => Ok(Box::new("veilid-server".to_owned())),
|
||||
"namespace" => Ok(Box::new(if subnode == 0 {
|
||||
"".to_owned()
|
||||
} else {
|
||||
format!("subnode{}", subnode)
|
||||
})),
|
||||
"program_name" => Ok(Box::new(PROGRAM_NAME.to_owned())),
|
||||
"namespace" => Ok(Box::new(subnode_namespace(subnode))),
|
||||
"capabilities.disable" => {
|
||||
let mut caps = Vec::<FourCC>::new();
|
||||
for c in &inner.core.capabilities.disable {
|
||||
@ -1697,6 +1695,14 @@ impl Settings {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn subnode_namespace(subnode_index: u16) -> String {
|
||||
if subnode_index == 0 {
|
||||
"".to_owned()
|
||||
} else {
|
||||
format!("subnode{}", subnode_index)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
@ -2,6 +2,10 @@ pub use cfg_if::*;
|
||||
pub use color_eyre::eyre::{bail, eyre, Result as EyreResult, WrapErr};
|
||||
pub use core::future::Future;
|
||||
pub use parking_lot::*;
|
||||
pub use std::collections::{HashMap, HashSet};
|
||||
pub use std::ffi::OsString;
|
||||
pub use std::path::Path;
|
||||
pub use std::str::FromStr;
|
||||
pub use tracing::*;
|
||||
|
||||
cfg_if! {
|
||||
|
@ -22,6 +22,7 @@ use tracing_flame::FlameLayer;
|
||||
use tracing_perfetto::PerfettoLayer;
|
||||
use tracing_subscriber::prelude::*;
|
||||
use tracing_subscriber::*;
|
||||
use veilid_core::VEILID_LOG_KEY_FIELD;
|
||||
|
||||
struct VeilidLogsInner {
|
||||
_file_guard: Option<non_blocking::WorkerGuard>,
|
||||
@ -35,6 +36,20 @@ pub struct VeilidLogs {
|
||||
inner: Arc<Mutex<VeilidLogsInner>>,
|
||||
}
|
||||
|
||||
fn make_primary_log_key_filter(
|
||||
subnode_index: u16,
|
||||
empty_namespace_enabled: bool,
|
||||
) -> veilid_core::VeilidLayerLogKeyFilter {
|
||||
let namespace = subnode_namespace(subnode_index);
|
||||
let log_key = veilid_core::VeilidLayerFilter::make_veilid_log_key(PROGRAM_NAME, &namespace);
|
||||
Arc::new(move |ns| {
|
||||
if ns.is_empty() {
|
||||
return empty_namespace_enabled;
|
||||
}
|
||||
ns == log_key
|
||||
})
|
||||
}
|
||||
|
||||
impl VeilidLogs {
|
||||
pub fn setup(settings: Settings) -> EyreResult<VeilidLogs> {
|
||||
let settingsr = settings.read();
|
||||
@ -49,11 +64,15 @@ impl VeilidLogs {
|
||||
// XXX:
|
||||
//layers.push(tracing_error::ErrorLayer::default().boxed());
|
||||
|
||||
let mut fields_to_strip = HashSet::<&'static str>::new();
|
||||
fields_to_strip.insert(VEILID_LOG_KEY_FIELD);
|
||||
|
||||
#[cfg(feature = "rt-tokio")]
|
||||
if settingsr.logging.console.enabled {
|
||||
let filter = veilid_core::VeilidLayerFilter::new_no_default(
|
||||
veilid_core::VeilidConfigLogLevel::Trace,
|
||||
&[],
|
||||
None,
|
||||
);
|
||||
|
||||
let layer = ConsoleLayer::builder()
|
||||
@ -81,13 +100,19 @@ impl VeilidLogs {
|
||||
let filter = veilid_core::VeilidLayerFilter::new(
|
||||
convert_loglevel(settingsr.logging.terminal.level),
|
||||
&settingsr.logging.terminal.ignore_log_targets,
|
||||
Some(make_primary_log_key_filter(
|
||||
settingsr.testing.subnode_index,
|
||||
true,
|
||||
)),
|
||||
);
|
||||
let layer = fmt::Layer::new()
|
||||
.compact()
|
||||
.map_fmt_fields(|f| veilid_core::FmtStripFields::new(f, fields_to_strip.clone()))
|
||||
.with_timer(timer)
|
||||
.with_ansi(true)
|
||||
.with_writer(std::io::stdout)
|
||||
.with_filter(filter.clone());
|
||||
|
||||
filters.insert("terminal", filter);
|
||||
layers.push(layer.boxed());
|
||||
}
|
||||
@ -103,6 +128,7 @@ impl VeilidLogs {
|
||||
.iter()
|
||||
.map(|&x| x.to_string())
|
||||
.collect::<Vec<_>>(),
|
||||
None,
|
||||
);
|
||||
let (flame_layer, guard) = FlameLayer::with_file(&settingsr.logging.flame.path)?;
|
||||
flame_guard = Some(guard);
|
||||
@ -126,6 +152,7 @@ impl VeilidLogs {
|
||||
.iter()
|
||||
.map(|&x| x.to_string())
|
||||
.collect::<Vec<_>>(),
|
||||
None,
|
||||
);
|
||||
let perfetto_layer = PerfettoLayer::new(std::sync::Mutex::new(std::fs::File::create(
|
||||
&settingsr.logging.perfetto.path,
|
||||
@ -182,6 +209,7 @@ impl VeilidLogs {
|
||||
let filter = veilid_core::VeilidLayerFilter::new(
|
||||
convert_loglevel(settingsr.logging.otlp.level),
|
||||
&settingsr.logging.otlp.ignore_log_targets,
|
||||
None,
|
||||
);
|
||||
let layer = tracing_opentelemetry::layer()
|
||||
.with_tracer(tracer)
|
||||
@ -218,6 +246,10 @@ impl VeilidLogs {
|
||||
let filter = veilid_core::VeilidLayerFilter::new(
|
||||
convert_loglevel(settingsr.logging.file.level),
|
||||
&settingsr.logging.file.ignore_log_targets,
|
||||
Some(make_primary_log_key_filter(
|
||||
settingsr.testing.subnode_index,
|
||||
true,
|
||||
)),
|
||||
);
|
||||
let layer = fmt::Layer::new()
|
||||
.compact()
|
||||
@ -233,6 +265,7 @@ impl VeilidLogs {
|
||||
let filter = veilid_core::VeilidLayerFilter::new(
|
||||
convert_loglevel(settingsr.logging.api.level),
|
||||
&settingsr.logging.api.ignore_log_targets,
|
||||
None,
|
||||
);
|
||||
let layer = veilid_core::ApiTracingLayer::init().with_filter(filter.clone());
|
||||
filters.insert("api", filter);
|
||||
@ -246,6 +279,10 @@ impl VeilidLogs {
|
||||
let filter = veilid_core::VeilidLayerFilter::new(
|
||||
convert_loglevel(settingsr.logging.system.level),
|
||||
&settingsr.logging.system.ignore_log_targets,
|
||||
Some(make_primary_log_key_filter(
|
||||
settingsr.testing.subnode_index,
|
||||
true,
|
||||
)),
|
||||
);
|
||||
let layer = tracing_journald::layer().wrap_err("failed to set up journald logging")?
|
||||
.with_filter(filter.clone());
|
||||
|
@ -386,10 +386,6 @@ impl NetworkInterfaces {
|
||||
|
||||
// See if our best addresses have changed
|
||||
if old_stable_addresses != inner.interface_address_cache {
|
||||
debug!(
|
||||
"Network interface addresses changed: \nFrom: {:?}\n To: {:?}\n",
|
||||
old_stable_addresses, inner.interface_address_cache
|
||||
);
|
||||
return Ok(true);
|
||||
}
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user