mirror of
https://gitlab.com/veilid/veilid.git
synced 2024-12-24 14:59:31 -05:00
Merge branch 'improved-address-detection' into 'main'
Improved Address Detection See merge request veilid/veilid!322
This commit is contained in:
commit
14cd561e09
33
CHANGELOG.md
33
CHANGELOG.md
@ -1,3 +1,36 @@
|
||||
**Changed in Veilid 0.3.5** _WIP_
|
||||
- Dialinfo detection issues:
|
||||
- Add a publish() as well as a commit() for routing domain editor
|
||||
- Should only publish our peer info after we're sure we done editing it (end of public address detection task)
|
||||
- Publish should happen after relay selection as well
|
||||
- Publish should happen if the relay's peerinfo has changed
|
||||
- Publish should not do anything if the peerinfo hasn't changed
|
||||
- PeerInfo -> Arc<PeerInfo> everywhere to minimize deep clones and ensure read-only PeerInfo
|
||||
- Routing domain editing is now more atomic
|
||||
- When a node selects a relay it now immediately protects its connections.
|
||||
- Made dial info port (for port restricted nat) more resilient to changes, in the case there are multiple mappings
|
||||
- Relays that drop protected connections should be deprioritized for relay selection (table saturation detection)
|
||||
- clear_network_callback in do_public_dial_info_check is a kludge, removed
|
||||
- Raised the bar for dialinfo changes when its just the port
|
||||
- Pinging node on the same network works again
|
||||
- resolve_node() never returns a dead node even when we want to try to communicate with it again
|
||||
- Removed 'bad public address' detection as it wasn't working anyway
|
||||
- Added separate parallelism lanes for relay keepalive pings from peer liveness check pings, as they are higher priority
|
||||
- Change send_data to always check cache for contact method first instead of going with filtered active flows first, avoids choosing UDP when a preferable TCP connection could be made
|
||||
- Nodes that are not relay capable should drop relayed packets
|
||||
|
||||
- DHT issues:
|
||||
- Make setvalue more likely to succeed by accepting a getvalue consensus if a full setvalue consensus is not reached.
|
||||
- Offline subkey writes are cleared too fast and should be thought as 'subkeys not yet synchronized'
|
||||
- If set_value is partial / in-flight, it should still be in offline_subkey_writes
|
||||
- Make inflight_subkey_writes list and probably some bit for 'written_while_inflight' so we dont clear the offline_subkey_writes until they're really written
|
||||
|
||||
|
||||
- API Additions:
|
||||
- VeilidConfigInner::new parameteriztion for easier config from rust apps
|
||||
- Remove veilid-server specific paths from veilid-core defaults
|
||||
- Lots more stats about node performance in PeerStats
|
||||
|
||||
**Changed in Veilid 0.3.4**
|
||||
- Crates updates
|
||||
- Update crates to newer versions
|
||||
|
4
Cargo.lock
generated
4
Cargo.lock
generated
@ -5955,9 +5955,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "veilid-hashlink"
|
||||
version = "0.1.0"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6a3dabbda02cfe176635dcaa18a021416ff2eb4d0b47a913e3fdc7f62049d7b1"
|
||||
checksum = "2070d1d09dad90091d23e49743408f82f8874994dec5ae0a8d3689b061bba426"
|
||||
dependencies = [
|
||||
"hashbrown 0.14.5",
|
||||
"serde",
|
||||
|
@ -143,7 +143,7 @@ impl CommandProcessor {
|
||||
disable [flag] unset a flag
|
||||
valid flags in include:
|
||||
app_messages
|
||||
Server Debug Commands:
|
||||
Core Debug Commands:
|
||||
{}
|
||||
"#,
|
||||
indent_all_by(4, out)
|
||||
|
@ -88,7 +88,7 @@ enumset = { version = "1.1.3", features = ["serde"] }
|
||||
keyvaluedb = "0.1.2"
|
||||
range-set-blaze = "0.1.16"
|
||||
weak-table = "0.3.2"
|
||||
hashlink = { package = "veilid-hashlink", version = "0.1.0", features = [
|
||||
hashlink = { package = "veilid-hashlink", version = "0.1.1", features = [
|
||||
"serde_impl",
|
||||
] }
|
||||
|
||||
|
@ -97,6 +97,7 @@ use enumset::*;
|
||||
use eyre::{bail, eyre, Report as EyreReport, Result as EyreResult, WrapErr};
|
||||
#[allow(unused_imports)]
|
||||
use futures_util::stream::{FuturesOrdered, FuturesUnordered};
|
||||
use indent::*;
|
||||
use parking_lot::*;
|
||||
use schemars::{schema_for, JsonSchema};
|
||||
use serde::*;
|
||||
|
313
veilid-core/src/network_manager/address_check.rs
Normal file
313
veilid-core/src/network_manager/address_check.rs
Normal file
@ -0,0 +1,313 @@
|
||||
/// Address checker - keep track of how other nodes are seeing our node's address on a per-protocol basis
|
||||
/// Used to determine if our address has changed and if we should re-publish new PeerInfo
|
||||
use super::*;
|
||||
|
||||
/// Number of 'existing dialinfo inconsistent' results in the cache during inbound-capable to trigger detection
|
||||
pub const ADDRESS_INCONSISTENCY_DETECTION_COUNT: usize = 5;
|
||||
|
||||
/// Number of consistent results in the cache during outbound-only to trigger detection
|
||||
pub const ADDRESS_CONSISTENCY_DETECTION_COUNT: usize = 5;
|
||||
|
||||
/// Length of consistent/inconsistent result cache for detection
|
||||
pub const ADDRESS_CHECK_CACHE_SIZE: usize = 10;
|
||||
|
||||
/// Length of consistent/inconsistent result cache for detection
|
||||
// pub const ADDRESS_CHECK_PEER_COUNT: usize = 256;
|
||||
// /// Frequency of address checks
|
||||
// pub const PUBLIC_ADDRESS_CHECK_TASK_INTERVAL_SECS: u32 = 60;
|
||||
// /// Duration we leave nodes in the inconsistencies table
|
||||
// pub const PUBLIC_ADDRESS_INCONSISTENCY_TIMEOUT_US: TimestampDuration =
|
||||
// TimestampDuration::new(300_000_000u64); // 5 minutes
|
||||
// /// How long we punish nodes for lying about our address
|
||||
// pub const PUBLIC_ADDRESS_INCONSISTENCY_PUNISHMENT_TIMEOUT_US: TimestampDuration =
|
||||
// TimestampDuration::new(3_600_000_000_u64); // 60 minutes
|
||||
|
||||
/// Address checker config
|
||||
pub(crate) struct AddressCheckConfig {
|
||||
pub(crate) detect_address_changes: bool,
|
||||
pub(crate) ip6_prefix_size: usize,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Ord, PartialOrd, Hash)]
|
||||
struct AddressCheckCacheKey(RoutingDomain, ProtocolType, AddressType);
|
||||
|
||||
/// Address checker - keep track of how other nodes are seeing our node's address on a per-protocol basis
|
||||
/// Used to determine if our address has changed and if we should re-publish new PeerInfo
|
||||
pub(crate) struct AddressCheck {
|
||||
config: AddressCheckConfig,
|
||||
net: Network,
|
||||
current_network_class: BTreeMap<RoutingDomain, NetworkClass>,
|
||||
current_addresses: BTreeMap<AddressCheckCacheKey, HashSet<SocketAddress>>,
|
||||
// Used by InboundCapable to determine if we have changed our address or re-do our network class
|
||||
address_inconsistency_table: BTreeMap<AddressCheckCacheKey, usize>,
|
||||
// Used by OutboundOnly to determine if we should re-do our network class
|
||||
address_consistency_table: BTreeMap<AddressCheckCacheKey, LruCache<IpAddr, SocketAddress>>,
|
||||
}
|
||||
|
||||
impl AddressCheck {
|
||||
pub fn new(config: AddressCheckConfig, net: Network) -> Self {
|
||||
Self {
|
||||
config,
|
||||
net,
|
||||
current_network_class: BTreeMap::new(),
|
||||
current_addresses: BTreeMap::new(),
|
||||
address_inconsistency_table: BTreeMap::new(),
|
||||
address_consistency_table: BTreeMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Accept a report of any peerinfo that has changed
|
||||
pub fn report_peer_info_change(&mut self, peer_info: Arc<PeerInfo>) {
|
||||
let routing_domain = peer_info.routing_domain();
|
||||
let network_class = peer_info.signed_node_info().node_info().network_class();
|
||||
|
||||
self.current_network_class
|
||||
.insert(routing_domain, network_class);
|
||||
for protocol_type in ProtocolTypeSet::all() {
|
||||
for address_type in AddressTypeSet::all() {
|
||||
let acck = AddressCheckCacheKey(routing_domain, protocol_type, address_type);
|
||||
|
||||
// Clear our current addresses so we can rebuild them for this routing domain
|
||||
self.current_addresses.remove(&acck);
|
||||
|
||||
// Clear our history as well now so we start fresh when we get a new peer info
|
||||
self.address_inconsistency_table.remove(&acck);
|
||||
self.address_consistency_table.remove(&acck);
|
||||
}
|
||||
}
|
||||
|
||||
for did in peer_info
|
||||
.signed_node_info()
|
||||
.node_info()
|
||||
.dial_info_detail_list()
|
||||
{
|
||||
// Strip port from direct and mapped addresses
|
||||
// as the incoming dialinfo may not match the outbound
|
||||
// connections' NAT mapping. In this case we only check for IP address changes.
|
||||
let socket_address =
|
||||
if did.class == DialInfoClass::Direct || did.class == DialInfoClass::Mapped {
|
||||
did.dial_info.socket_address().with_port(0)
|
||||
} else {
|
||||
did.dial_info.socket_address()
|
||||
};
|
||||
|
||||
let address_type = did.dial_info.address_type();
|
||||
let protocol_type = did.dial_info.protocol_type();
|
||||
let acck = AddressCheckCacheKey(routing_domain, protocol_type, address_type);
|
||||
|
||||
self.current_addresses
|
||||
.entry(acck)
|
||||
.or_default()
|
||||
.insert(socket_address);
|
||||
}
|
||||
}
|
||||
|
||||
/// Accept a report of our address as seen by the other end of a flow, such
|
||||
/// as the StatusA response from a StatusQ
|
||||
pub fn report_socket_address_change(
|
||||
&mut self,
|
||||
routing_domain: RoutingDomain, // the routing domain used by this flow
|
||||
socket_address: SocketAddress, // the socket address as seen by the remote peer
|
||||
old_socket_address: Option<SocketAddress>, // the socket address previously for this peer
|
||||
flow: Flow, // the flow used
|
||||
reporting_peer: NodeRef, // the peer's noderef reporting the socket address
|
||||
) {
|
||||
// Don't accept any reports if we're already in the middle of a public dial info check
|
||||
if self.net.needs_public_dial_info_check() {
|
||||
return;
|
||||
}
|
||||
|
||||
// Ignore the LocalNetwork routing domain because we know if our local addresses change
|
||||
// from our interfaces
|
||||
if matches!(routing_domain, RoutingDomain::LocalNetwork) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Ignore flows that do not start from our listening port (unbound connections etc),
|
||||
// because a router is going to map these differently
|
||||
let Some(pla) = self
|
||||
.net
|
||||
.get_preferred_local_address_by_key(flow.protocol_type(), flow.address_type())
|
||||
else {
|
||||
return;
|
||||
};
|
||||
let Some(local) = flow.local() else {
|
||||
return;
|
||||
};
|
||||
if local.port() != pla.port() {
|
||||
log_network_result!(debug "ignoring address report because local port did not match listener: {} != {}", local.port(), pla.port());
|
||||
return;
|
||||
}
|
||||
|
||||
// Get the ip(block) this report is coming from
|
||||
let reporting_ipblock =
|
||||
ip_to_ipblock(self.config.ip6_prefix_size, flow.remote_address().ip_addr());
|
||||
|
||||
// Reject public address reports from nodes that we know are behind symmetric nat or
|
||||
// nodes that must be using a relay for everything
|
||||
let Some(reporting_node_info) = reporting_peer.node_info(routing_domain) else {
|
||||
return;
|
||||
};
|
||||
if reporting_node_info.network_class() != NetworkClass::InboundCapable {
|
||||
return;
|
||||
}
|
||||
|
||||
// If the socket address reported is the same as the reporter, then this is coming through a relay
|
||||
// or it should be ignored due to local proximity (nodes on the same network block should not be trusted as
|
||||
// public ip address reporters, only disinterested parties)
|
||||
if reporting_ipblock == ip_to_ipblock(self.config.ip6_prefix_size, socket_address.ip_addr())
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
// Get current network class / dial info
|
||||
// If we haven't gotten our own network class yet we're done for now
|
||||
let Some(network_class) = self.current_network_class.get(&routing_domain) else {
|
||||
return;
|
||||
};
|
||||
|
||||
// Process the state of the address checker and see if we need to
|
||||
// perform a full address check for this routing domain
|
||||
let needs_address_detection = match network_class {
|
||||
NetworkClass::InboundCapable => self.detect_for_inbound_capable(
|
||||
routing_domain,
|
||||
socket_address,
|
||||
old_socket_address,
|
||||
flow,
|
||||
reporting_peer,
|
||||
),
|
||||
NetworkClass::OutboundOnly => self.detect_for_outbound_only(
|
||||
routing_domain,
|
||||
socket_address,
|
||||
flow,
|
||||
reporting_ipblock,
|
||||
),
|
||||
NetworkClass::WebApp | NetworkClass::Invalid => {
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
if needs_address_detection {
|
||||
if self.config.detect_address_changes {
|
||||
// Reset the address check cache now so we can start detecting fresh
|
||||
info!(
|
||||
"{:?} address has changed, detecting dial info",
|
||||
routing_domain
|
||||
);
|
||||
|
||||
// Re-detect the public dialinfo
|
||||
self.net.set_needs_public_dial_info_check(None);
|
||||
} else {
|
||||
warn!(
|
||||
"{:?} address may have changed. Restarting the server may be required.",
|
||||
routing_domain
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn matches_current_address(
|
||||
&self,
|
||||
acckey: AddressCheckCacheKey,
|
||||
socket_address: SocketAddress,
|
||||
) -> bool {
|
||||
self.current_addresses
|
||||
.get(&acckey)
|
||||
.map(|current_addresses| {
|
||||
current_addresses.contains(&socket_address)
|
||||
|| current_addresses.contains(&socket_address.with_port(0))
|
||||
})
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
// If we are inbound capable, but start to see places where our sender info used to match our dial info
|
||||
// but no longer matches our dial info (count up the number of changes -away- from our dial info)
|
||||
// then trigger a detection of dial info and network class
|
||||
fn detect_for_inbound_capable(
|
||||
&mut self,
|
||||
routing_domain: RoutingDomain, // the routing domain used by this flow
|
||||
socket_address: SocketAddress, // the socket address as seen by the remote peer
|
||||
old_socket_address: Option<SocketAddress>, // the socket address previously for this peer
|
||||
flow: Flow, // the flow used
|
||||
reporting_peer: NodeRef, // the peer's noderef reporting the socket address
|
||||
) -> bool {
|
||||
let acckey =
|
||||
AddressCheckCacheKey(routing_domain, flow.protocol_type(), flow.address_type());
|
||||
|
||||
// Check the current socket address and see if it matches our current dial info
|
||||
let new_matches_current = self.matches_current_address(acckey, socket_address);
|
||||
|
||||
// If we have something that matches our current dial info at all, consider it a validation
|
||||
if new_matches_current {
|
||||
self.address_inconsistency_table
|
||||
.entry(acckey)
|
||||
.and_modify(|ait| {
|
||||
if *ait != 0 {
|
||||
log_net!(debug "Resetting address inconsistency for {:?} due to match on flow {:?} from {}", acckey, flow, reporting_peer);
|
||||
}
|
||||
*ait = 0;
|
||||
})
|
||||
.or_insert(0);
|
||||
return false;
|
||||
}
|
||||
|
||||
// See if we have a case of switching away from our dial info
|
||||
let old_matches_current = old_socket_address
|
||||
.map(|osa| self.matches_current_address(acckey, osa))
|
||||
.unwrap_or(false);
|
||||
|
||||
if old_matches_current {
|
||||
let val = *self
|
||||
.address_inconsistency_table
|
||||
.entry(acckey)
|
||||
.and_modify(|ait| {
|
||||
*ait += 1;
|
||||
})
|
||||
.or_insert(1);
|
||||
log_net!(debug "Adding address inconsistency ({}) for {:?} due to address {} on flow {:?} from {}", val, acckey, socket_address, flow, reporting_peer);
|
||||
return val >= ADDRESS_INCONSISTENCY_DETECTION_COUNT;
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
// If we are currently outbound only, we don't have any public dial info
|
||||
// but if we are starting to see consistent socket address from multiple reporting peers
|
||||
// then we may be become inbound capable, so zap the network class so we can re-detect it and any public dial info
|
||||
// lru the addresses we're seeing and if they all match (same ip only?) then trigger
|
||||
fn detect_for_outbound_only(
|
||||
&mut self,
|
||||
routing_domain: RoutingDomain, // the routing domain used by this flow
|
||||
socket_address: SocketAddress, // the socket address as seen by the remote peer
|
||||
flow: Flow, // the flow used
|
||||
reporting_ipblock: IpAddr, // the IP block this report came from
|
||||
) -> bool {
|
||||
let acckey =
|
||||
AddressCheckCacheKey(routing_domain, flow.protocol_type(), flow.address_type());
|
||||
|
||||
// Add the currently seen socket address into the consistency table
|
||||
let cache = self
|
||||
.address_consistency_table
|
||||
.entry(acckey)
|
||||
.and_modify(|act| {
|
||||
act.insert(reporting_ipblock, socket_address);
|
||||
})
|
||||
.or_insert_with(|| {
|
||||
let mut lruc = LruCache::new(ADDRESS_CHECK_CACHE_SIZE);
|
||||
lruc.insert(reporting_ipblock, socket_address);
|
||||
lruc
|
||||
});
|
||||
|
||||
// If we have at least N consistencies then trigger a detect
|
||||
let mut consistencies = HashMap::<SocketAddress, usize>::new();
|
||||
for (_k, v) in cache.iter() {
|
||||
let count = *consistencies.entry(*v).and_modify(|e| *e += 1).or_insert(1);
|
||||
if count >= ADDRESS_CONSISTENCY_DETECTION_COUNT {
|
||||
log_net!(debug "Address consistency detected for {:?}: {}", acckey, v);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
}
|
@ -1,7 +1,6 @@
|
||||
use super::*;
|
||||
use alloc::collections::btree_map::Entry;
|
||||
|
||||
// XXX: Move to config eventually?
|
||||
const PUNISHMENT_DURATION_MIN: usize = 60;
|
||||
const MAX_PUNISHMENTS_BY_NODE_ID: usize = 65536;
|
||||
const DIAL_INFO_FAILURE_DURATION_MIN: usize = 10;
|
||||
|
@ -4,6 +4,9 @@ use connection_table::*;
|
||||
use network_connection::*;
|
||||
use stop_token::future::FutureExt;
|
||||
|
||||
const PROTECTED_CONNECTION_DROP_SPAN: TimestampDuration = TimestampDuration::new_secs(10);
|
||||
const PROTECTED_CONNECTION_DROP_COUNT: usize = 3;
|
||||
|
||||
///////////////////////////////////////////////////////////
|
||||
// Connection manager
|
||||
|
||||
@ -38,13 +41,21 @@ impl Drop for ConnectionRefScope {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct ProtectedAddress {
|
||||
node_ref: NodeRef,
|
||||
span_start_ts: Timestamp,
|
||||
drops_in_span: usize,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct ConnectionManagerInner {
|
||||
next_id: NetworkConnectionId,
|
||||
sender: flume::Sender<ConnectionManagerEvent>,
|
||||
async_processor_jh: Option<MustJoinHandle<()>>,
|
||||
stop_source: Option<StopSource>,
|
||||
protected_addresses: HashMap<SocketAddress, NodeRef>,
|
||||
protected_addresses: HashMap<SocketAddress, ProtectedAddress>,
|
||||
reconnection_processor: DeferredStreamProcessor,
|
||||
}
|
||||
|
||||
struct ConnectionManagerArc {
|
||||
@ -74,6 +85,7 @@ impl ConnectionManager {
|
||||
stop_source: StopSource,
|
||||
sender: flume::Sender<ConnectionManagerEvent>,
|
||||
async_processor_jh: MustJoinHandle<()>,
|
||||
reconnection_processor: DeferredStreamProcessor,
|
||||
) -> ConnectionManagerInner {
|
||||
ConnectionManagerInner {
|
||||
next_id: 0.into(),
|
||||
@ -81,6 +93,7 @@ impl ConnectionManager {
|
||||
sender,
|
||||
async_processor_jh: Some(async_processor_jh),
|
||||
protected_addresses: HashMap::new(),
|
||||
reconnection_processor,
|
||||
}
|
||||
}
|
||||
fn new_arc(network_manager: NetworkManager) -> ConnectionManagerArc {
|
||||
@ -123,11 +136,6 @@ impl ConnectionManager {
|
||||
|
||||
log_net!(debug "startup connection manager");
|
||||
|
||||
let mut inner = self.arc.inner.lock();
|
||||
if inner.is_some() {
|
||||
panic!("shouldn't start connection manager twice without shutting it down first");
|
||||
}
|
||||
|
||||
// Create channel for async_processor to receive notifications of networking events
|
||||
let (sender, receiver) = flume::unbounded();
|
||||
|
||||
@ -140,8 +148,21 @@ impl ConnectionManager {
|
||||
self.clone().async_processor(stop_source.token(), receiver),
|
||||
);
|
||||
|
||||
// Spawn the reconnection processor
|
||||
let mut reconnection_processor = DeferredStreamProcessor::new();
|
||||
reconnection_processor.init().await;
|
||||
|
||||
// Store in the inner object
|
||||
*inner = Some(Self::new_inner(stop_source, sender, async_processor));
|
||||
let mut inner = self.arc.inner.lock();
|
||||
if inner.is_some() {
|
||||
panic!("shouldn't start connection manager twice without shutting it down first");
|
||||
}
|
||||
*inner = Some(Self::new_inner(
|
||||
stop_source,
|
||||
sender,
|
||||
async_processor,
|
||||
reconnection_processor,
|
||||
));
|
||||
|
||||
guard.success();
|
||||
|
||||
@ -165,7 +186,9 @@ impl ConnectionManager {
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Stop the reconnection processor
|
||||
log_net!(debug "stopping reconnection processor task");
|
||||
inner.reconnection_processor.terminate().await;
|
||||
// Stop all the connections and the async processor
|
||||
log_net!(debug "stopping async processor task");
|
||||
drop(inner.stop_source.take());
|
||||
@ -191,7 +214,7 @@ impl ConnectionManager {
|
||||
inner
|
||||
.protected_addresses
|
||||
.get(conn.flow().remote_address())
|
||||
.cloned()
|
||||
.map(|x| x.node_ref.clone())
|
||||
}
|
||||
|
||||
// Update connection protections if things change, like a node becomes a relay
|
||||
@ -205,8 +228,12 @@ impl ConnectionManager {
|
||||
return;
|
||||
};
|
||||
|
||||
// Get addresses for relays in all routing domains
|
||||
inner.protected_addresses.clear();
|
||||
// Protect addresses for relays in all routing domains
|
||||
let mut dead_addresses = inner
|
||||
.protected_addresses
|
||||
.keys()
|
||||
.cloned()
|
||||
.collect::<HashSet<_>>();
|
||||
for routing_domain in RoutingDomainSet::all() {
|
||||
let Some(relay_node) = self
|
||||
.network_manager()
|
||||
@ -218,12 +245,28 @@ impl ConnectionManager {
|
||||
for did in relay_node.dial_info_details() {
|
||||
// SocketAddress are distinct per routing domain, so they should not collide
|
||||
// and two nodes should never have the same SocketAddress
|
||||
let protected_address = did.dial_info.socket_address();
|
||||
|
||||
// Update the protection, note the protected address is not dead
|
||||
dead_addresses.remove(&protected_address);
|
||||
inner
|
||||
.protected_addresses
|
||||
.insert(did.dial_info.socket_address(), relay_node.unfiltered());
|
||||
.entry(protected_address)
|
||||
.and_modify(|pa| pa.node_ref = relay_node.unfiltered())
|
||||
.or_insert_with(|| ProtectedAddress {
|
||||
node_ref: relay_node.unfiltered(),
|
||||
span_start_ts: Timestamp::now(),
|
||||
drops_in_span: 0usize,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Remove protected addresses that were not still associated with a protected noderef
|
||||
for dead_address in dead_addresses {
|
||||
inner.protected_addresses.remove(&dead_address);
|
||||
}
|
||||
|
||||
// For all connections, register the protection
|
||||
self.arc
|
||||
.connection_table
|
||||
.with_all_connections_mut(|conn| {
|
||||
@ -248,6 +291,7 @@ impl ConnectionManager {
|
||||
&self,
|
||||
inner: &mut ConnectionManagerInner,
|
||||
prot_conn: ProtocolNetworkConnection,
|
||||
opt_dial_info: Option<DialInfo>,
|
||||
) -> EyreResult<NetworkResult<ConnectionHandle>> {
|
||||
// Get next connection id to use
|
||||
let id = inner.next_id;
|
||||
@ -264,7 +308,13 @@ impl ConnectionManager {
|
||||
None => bail!("not creating connection because we are stopping"),
|
||||
};
|
||||
|
||||
let mut conn = NetworkConnection::from_protocol(self.clone(), stop_token, prot_conn, id);
|
||||
let mut conn = NetworkConnection::from_protocol(
|
||||
self.clone(),
|
||||
stop_token,
|
||||
prot_conn,
|
||||
id,
|
||||
opt_dial_info,
|
||||
);
|
||||
let handle = conn.get_handle();
|
||||
|
||||
// See if this should be a protected connection
|
||||
@ -281,6 +331,7 @@ impl ConnectionManager {
|
||||
Ok(Some(conn)) => {
|
||||
// Connection added and a different one LRU'd out
|
||||
// Send it to be terminated
|
||||
#[cfg(feature = "verbose-tracing")]
|
||||
log_net!(debug "== LRU kill connection due to limit: {:?}", conn.debug_print(Timestamp::now()));
|
||||
let _ = inner.sender.send(ConnectionManagerEvent::Dead(conn));
|
||||
}
|
||||
@ -438,7 +489,7 @@ impl ConnectionManager {
|
||||
}
|
||||
};
|
||||
|
||||
self.on_new_protocol_network_connection(inner, prot_conn)
|
||||
self.on_new_protocol_network_connection(inner, prot_conn, Some(dial_info))
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
@ -472,7 +523,7 @@ impl ConnectionManager {
|
||||
// We don't care if this fails, since nobody here asked for the inbound connection.
|
||||
// If it does, we just drop the connection
|
||||
|
||||
let _ = self.on_new_protocol_network_connection(inner, prot_conn);
|
||||
let _ = self.on_new_protocol_network_connection(inner, prot_conn, None);
|
||||
}
|
||||
None => {
|
||||
// If this somehow happens, we're shutting down
|
||||
@ -559,14 +610,81 @@ impl ConnectionManager {
|
||||
// Inform the processor of the event
|
||||
if let Some(conn) = conn {
|
||||
// If the connection closed while it was protected, report it on the node the connection was established on
|
||||
// In-use connections will already get reported because they will cause a 'question_lost' stat on the remote node
|
||||
// In-use connections will already get reported because they will cause a 'lost_answer' stat on the remote node
|
||||
if let Some(protect_nr) = conn.protected_node_ref() {
|
||||
// Find the protected address and increase our drop count
|
||||
if let Some(inner) = self.arc.inner.lock().as_mut() {
|
||||
for pa in inner.protected_addresses.values_mut() {
|
||||
if pa.node_ref.same_entry(&protect_nr) {
|
||||
// See if we've had more than the threshold number of drops in the last span
|
||||
let cur_ts = Timestamp::now();
|
||||
let duration = cur_ts.saturating_sub(pa.span_start_ts);
|
||||
|
||||
let mut reconnect = true;
|
||||
|
||||
if duration < PROTECTED_CONNECTION_DROP_SPAN {
|
||||
pa.drops_in_span += 1;
|
||||
log_net!(debug "== Protected connection dropped (count={}): {} -> {} for node {}", pa.drops_in_span, conn.connection_id(), conn.debug_print(Timestamp::now()), protect_nr);
|
||||
|
||||
if pa.drops_in_span >= PROTECTED_CONNECTION_DROP_COUNT {
|
||||
// Consider this as a failure to send if we've dropped the connection too many times in a single timespan
|
||||
protect_nr.report_protected_connection_dropped();
|
||||
reconnect = false;
|
||||
|
||||
// Reset the drop counter
|
||||
pa.drops_in_span = 0;
|
||||
pa.span_start_ts = cur_ts;
|
||||
}
|
||||
} else {
|
||||
// Otherwise, just reset the drop detection span
|
||||
pa.drops_in_span = 1;
|
||||
pa.span_start_ts = cur_ts;
|
||||
|
||||
log_net!(debug "== Protected connection dropped (count={}): {} -> {} for node {}", pa.drops_in_span, conn.connection_id(), conn.debug_print(Timestamp::now()), protect_nr);
|
||||
}
|
||||
|
||||
// Reconnect the protected connection immediately
|
||||
if reconnect {
|
||||
if let Some(dial_info) = conn.dial_info() {
|
||||
self.spawn_reconnector_inner(inner, dial_info);
|
||||
} else {
|
||||
log_net!(debug "Can't reconnect to accepted protected connection: {} -> {} for node {}", conn.connection_id(), conn.debug_print(Timestamp::now()), protect_nr);
|
||||
}
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
let _ = sender.send_async(ConnectionManagerEvent::Dead(conn)).await;
|
||||
}
|
||||
}
|
||||
|
||||
fn spawn_reconnector_inner(&self, inner: &mut ConnectionManagerInner, dial_info: DialInfo) {
|
||||
let this = self.clone();
|
||||
inner.reconnection_processor.add(
|
||||
Box::pin(futures_util::stream::once(async { dial_info })),
|
||||
move |dial_info| {
|
||||
let this = this.clone();
|
||||
Box::pin(async move {
|
||||
match this.get_or_create_connection(dial_info.clone()).await {
|
||||
Ok(NetworkResult::Value(conn)) => {
|
||||
log_net!(debug "Reconnection successful to {}: {:?}", dial_info,conn);
|
||||
}
|
||||
Ok(res) => {
|
||||
log_net!(debug "Reconnection unsuccessful to {}: {:?}", dial_info, res);
|
||||
}
|
||||
Err(e) => {
|
||||
log_net!(debug "Reconnection error to {}: {}", dial_info, e);
|
||||
}
|
||||
}
|
||||
false
|
||||
})
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
pub async fn debug_print(&self) -> String {
|
||||
//let inner = self.arc.inner.lock();
|
||||
format!(
|
||||
|
@ -47,9 +47,16 @@ impl NetworkManager {
|
||||
return Ok(Vec::new());
|
||||
});
|
||||
|
||||
let bootstrap_peerinfo: Vec<PeerInfo> =
|
||||
deserialize_json(std::str::from_utf8(&out_data).wrap_err("bad utf8 in boot peerinfo")?)
|
||||
.wrap_err("failed to deserialize boot peerinfo")?;
|
||||
let bootstrap_peerinfo_str =
|
||||
std::str::from_utf8(&out_data).wrap_err("bad utf8 in boot peerinfo")?;
|
||||
|
||||
let bootstrap_peerinfo: Vec<PeerInfo> = match deserialize_json(bootstrap_peerinfo_str) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
error!("{}", e);
|
||||
return Err(e).wrap_err("failed to deserialize peerinfo");
|
||||
}
|
||||
};
|
||||
|
||||
Ok(bootstrap_peerinfo.into_iter().map(Arc::new).collect())
|
||||
}
|
||||
|
@ -5,6 +5,7 @@ mod native;
|
||||
#[cfg(target_arch = "wasm32")]
|
||||
mod wasm;
|
||||
|
||||
mod address_check;
|
||||
mod address_filter;
|
||||
mod connection_handle;
|
||||
mod connection_manager;
|
||||
@ -30,6 +31,7 @@ pub(crate) use stats::*;
|
||||
pub use types::*;
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////
|
||||
use address_check::*;
|
||||
use address_filter::*;
|
||||
use connection_handle::*;
|
||||
use crypto::*;
|
||||
@ -54,16 +56,9 @@ pub const IPADDR_TABLE_SIZE: usize = 1024;
|
||||
pub const IPADDR_MAX_INACTIVE_DURATION_US: TimestampDuration =
|
||||
TimestampDuration::new(300_000_000u64); // 5 minutes
|
||||
pub const NODE_CONTACT_METHOD_CACHE_SIZE: usize = 1024;
|
||||
pub const PUBLIC_ADDRESS_CHANGE_CONSISTENCY_DETECTION_COUNT: usize = 3; // Number of consistent results in the cache during outbound-only to trigger detection
|
||||
pub const PUBLIC_ADDRESS_CHANGE_INCONSISTENCY_DETECTION_COUNT: usize = 7; // Number of inconsistent results in the cache during inbound-capable to trigger detection
|
||||
pub const PUBLIC_ADDRESS_CHECK_CACHE_SIZE: usize = 10; // Length of consistent/inconsistent result cache for detection
|
||||
pub const PUBLIC_ADDRESS_CHECK_TASK_INTERVAL_SECS: u32 = 60;
|
||||
pub const PUBLIC_ADDRESS_INCONSISTENCY_TIMEOUT_US: TimestampDuration =
|
||||
TimestampDuration::new(300_000_000u64); // 5 minutes
|
||||
pub const PUBLIC_ADDRESS_INCONSISTENCY_PUNISHMENT_TIMEOUT_US: TimestampDuration =
|
||||
TimestampDuration::new(3_600_000_000_u64); // 60 minutes
|
||||
pub const ADDRESS_FILTER_TASK_INTERVAL_SECS: u32 = 60;
|
||||
pub const BOOT_MAGIC: &[u8; 4] = b"BOOT";
|
||||
pub const HOLE_PUNCH_DELAY_MS: u32 = 100;
|
||||
|
||||
// Things we get when we start up and go away when we shut down
|
||||
// Routing table is not in here because we want it to survive a network shutdown/startup restart
|
||||
@ -117,9 +112,6 @@ struct NodeContactMethodCacheKey {
|
||||
target_node_ref_sequencing: Sequencing,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Ord, PartialOrd, Hash)]
|
||||
struct PublicAddressCheckCacheKey(ProtocolType, AddressType);
|
||||
|
||||
enum SendDataToExistingFlowResult {
|
||||
Sent(UniqueFlow),
|
||||
NotSent(Vec<u8>),
|
||||
@ -137,10 +129,7 @@ struct NetworkManagerInner {
|
||||
stats: NetworkManagerStats,
|
||||
client_allowlist: LruCache<TypedKey, ClientAllowlistEntry>,
|
||||
node_contact_method_cache: LruCache<NodeContactMethodCacheKey, NodeContactMethod>,
|
||||
public_internet_address_check_cache:
|
||||
BTreeMap<PublicAddressCheckCacheKey, LruCache<IpAddr, SocketAddress>>,
|
||||
public_internet_address_inconsistencies_table:
|
||||
BTreeMap<PublicAddressCheckCacheKey, HashMap<IpAddr, Timestamp>>,
|
||||
address_check: Option<AddressCheck>,
|
||||
}
|
||||
|
||||
struct NetworkManagerUnlockedInner {
|
||||
@ -158,7 +147,6 @@ struct NetworkManagerUnlockedInner {
|
||||
update_callback: RwLock<Option<UpdateCallback>>,
|
||||
// Background processes
|
||||
rolling_transfers_task: TickTask<EyreReport>,
|
||||
public_internet_address_check_task: TickTask<EyreReport>,
|
||||
address_filter_task: TickTask<EyreReport>,
|
||||
// Network Key
|
||||
network_key: Option<SharedSecret>,
|
||||
@ -178,8 +166,7 @@ impl NetworkManager {
|
||||
stats: NetworkManagerStats::default(),
|
||||
client_allowlist: LruCache::new_unbounded(),
|
||||
node_contact_method_cache: LruCache::new(NODE_CONTACT_METHOD_CACHE_SIZE),
|
||||
public_internet_address_check_cache: BTreeMap::new(),
|
||||
public_internet_address_inconsistencies_table: BTreeMap::new(),
|
||||
address_check: None,
|
||||
}
|
||||
}
|
||||
fn new_unlocked_inner(
|
||||
@ -205,10 +192,6 @@ impl NetworkManager {
|
||||
"rolling_transfers_task",
|
||||
ROLLING_TRANSFERS_INTERVAL_SECS,
|
||||
),
|
||||
public_internet_address_check_task: TickTask::new(
|
||||
"public_address_check_task",
|
||||
PUBLIC_ADDRESS_CHECK_TASK_INTERVAL_SECS,
|
||||
),
|
||||
address_filter_task: TickTask::new(
|
||||
"address_filter_task",
|
||||
ADDRESS_FILTER_TASK_INTERVAL_SECS,
|
||||
@ -437,6 +420,20 @@ impl NetworkManager {
|
||||
return Ok(StartupDisposition::BindRetry);
|
||||
}
|
||||
}
|
||||
|
||||
let (detect_address_changes, ip6_prefix_size) = self.with_config(|c| {
|
||||
(
|
||||
c.network.detect_address_changes,
|
||||
c.network.max_connections_per_ip6_prefix_size as usize,
|
||||
)
|
||||
});
|
||||
let address_check_config = AddressCheckConfig {
|
||||
detect_address_changes,
|
||||
ip6_prefix_size,
|
||||
};
|
||||
let address_check = AddressCheck::new(address_check_config, net.clone());
|
||||
self.inner.lock().address_check = Some(address_check);
|
||||
|
||||
rpc_processor.startup().await?;
|
||||
receipt_manager.startup().await?;
|
||||
|
||||
@ -474,18 +471,22 @@ impl NetworkManager {
|
||||
// Cancel all tasks
|
||||
self.cancel_tasks().await;
|
||||
|
||||
// Shutdown address check
|
||||
self.inner.lock().address_check = Option::<AddressCheck>::None;
|
||||
|
||||
// Shutdown network components if they started up
|
||||
log_net!(debug "shutting down network components");
|
||||
|
||||
{
|
||||
let components = self.unlocked_inner.components.read().clone();
|
||||
if let Some(components) = components {
|
||||
components.net.shutdown().await;
|
||||
components.rpc_processor.shutdown().await;
|
||||
components.receipt_manager.shutdown().await;
|
||||
components.connection_manager.shutdown().await;
|
||||
|
||||
*self.unlocked_inner.components.write() = None;
|
||||
}
|
||||
}
|
||||
*self.unlocked_inner.components.write() = None;
|
||||
|
||||
// reset the state
|
||||
log_net!(debug "resetting network manager state");
|
||||
@ -835,7 +836,8 @@ impl NetworkManager {
|
||||
.await?
|
||||
);
|
||||
|
||||
// XXX: do we need a delay here? or another hole punch packet?
|
||||
// Add small delay to encourage packets to be delivered in order
|
||||
sleep(HOLE_PUNCH_DELAY_MS).await;
|
||||
|
||||
// Set the hole punch as our 'last connection' to ensure we return the receipt over the direct hole punch
|
||||
peer_nr.set_last_flow(unique_flow.flow, Timestamp::now());
|
||||
@ -1074,19 +1076,25 @@ impl NetworkManager {
|
||||
let routing_table = self.routing_table();
|
||||
let rpc = self.rpc_processor();
|
||||
|
||||
// Peek at header and see if we need to relay this
|
||||
// If the recipient id is not our node id, then it needs relaying
|
||||
// See if this sender is punished, if so, ignore the packet
|
||||
let sender_id = envelope.get_sender_typed_id();
|
||||
if self.address_filter().is_node_id_punished(sender_id) {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// Peek at header and see if we need to relay this
|
||||
// If the recipient id is not our node id, then it needs relaying
|
||||
let recipient_id = envelope.get_recipient_typed_id();
|
||||
if !routing_table.matches_own_node_id(&[recipient_id]) {
|
||||
// See if the source node is allowed to resolve nodes
|
||||
// This is a costly operation, so only outbound-relay permitted
|
||||
// nodes are allowed to do this, for example PWA users
|
||||
|
||||
// xxx: eventually allow recipient_id to be in allowlist?
|
||||
// xxx: to enable cross-routing domain relaying? or rather
|
||||
// xxx: that 'localnetwork' routing domain nodes could be allowed to
|
||||
// xxx: full relay as well as client_allowlist nodes...
|
||||
|
||||
let some_relay_nr = if self.check_client_allowlist(sender_id) {
|
||||
// Full relay allowed, do a full resolve_node
|
||||
match rpc
|
||||
@ -1095,7 +1103,7 @@ impl NetworkManager {
|
||||
{
|
||||
Ok(v) => v.map(|nr| nr.default_filtered()),
|
||||
Err(e) => {
|
||||
log_net!(debug "failed to resolve recipient node for relay, dropping outbound relayed packet: {}" ,e);
|
||||
log_net!(debug "failed to resolve recipient node for relay, dropping relayed envelope: {}" ,e);
|
||||
return Ok(false);
|
||||
}
|
||||
}
|
||||
@ -1103,6 +1111,19 @@ impl NetworkManager {
|
||||
// If this is not a node in the client allowlist, only allow inbound relay
|
||||
// which only performs a lightweight lookup before passing the packet back out
|
||||
|
||||
// If our node has the relay capability disabled, we should not be asked to relay
|
||||
if self.with_config(|c| c.capabilities.disable.contains(&CAP_RELAY)) {
|
||||
log_net!(debug "node has relay capability disabled, dropping relayed envelope from {} to {}", sender_id, recipient_id);
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// If our own node requires a relay, we should not be asked to relay
|
||||
// on behalf of other nodes, just drop relayed packets if we can't relay
|
||||
if routing_table.relay_node(routing_domain).is_some() {
|
||||
log_net!(debug "node requires a relay itself, dropping relayed envelope from {} to {}", sender_id, recipient_id);
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// See if we have the node in our routing table
|
||||
// We should, because relays are chosen by nodes that have established connectivity and
|
||||
// should be mutually in each others routing tables. The node needing the relay will be
|
||||
@ -1110,7 +1131,7 @@ impl NetworkManager {
|
||||
match routing_table.lookup_node_ref(recipient_id) {
|
||||
Ok(v) => v.map(|nr| nr.default_filtered()),
|
||||
Err(e) => {
|
||||
log_net!(debug "failed to look up recipient node for relay, dropping outbound relayed packet: {}" ,e);
|
||||
log_net!(debug "failed to look up recipient node for relay, dropping relayed envelope: {}" ,e);
|
||||
return Ok(false);
|
||||
}
|
||||
}
|
||||
@ -1196,4 +1217,48 @@ impl NetworkManager {
|
||||
pub fn restart_network(&self) {
|
||||
self.net().restart_network();
|
||||
}
|
||||
|
||||
// If some other subsystem believes our dial info is no longer valid, this will trigger
|
||||
// a re-check of the dial info and network class
|
||||
pub fn set_needs_dial_info_check(&self, routing_domain: RoutingDomain) {
|
||||
match routing_domain {
|
||||
RoutingDomain::LocalNetwork => {
|
||||
// nothing here yet
|
||||
}
|
||||
RoutingDomain::PublicInternet => {
|
||||
self.net().set_needs_public_dial_info_check(None);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Report peer info changes
|
||||
pub fn report_peer_info_change(&mut self, peer_info: Arc<PeerInfo>) {
|
||||
let mut inner = self.inner.lock();
|
||||
if let Some(address_check) = inner.address_check.as_mut() {
|
||||
address_check.report_peer_info_change(peer_info);
|
||||
}
|
||||
}
|
||||
|
||||
// Determine if our IP address has changed
|
||||
// this means we should recreate our public dial info if it is not static and rediscover it
|
||||
// Wait until we have received confirmation from N different peers
|
||||
pub fn report_socket_address_change(
|
||||
&self,
|
||||
routing_domain: RoutingDomain, // the routing domain this flow is over
|
||||
socket_address: SocketAddress, // the socket address as seen by the remote peer
|
||||
old_socket_address: Option<SocketAddress>, // the socket address previously for this peer
|
||||
flow: Flow, // the flow used
|
||||
reporting_peer: NodeRef, // the peer's noderef reporting the socket address
|
||||
) {
|
||||
let mut inner = self.inner.lock();
|
||||
if let Some(address_check) = inner.address_check.as_mut() {
|
||||
address_check.report_socket_address_change(
|
||||
routing_domain,
|
||||
socket_address,
|
||||
old_socket_address,
|
||||
flow,
|
||||
reporting_peer,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -6,6 +6,7 @@ use futures_util::stream::FuturesUnordered;
|
||||
const PORT_MAP_VALIDATE_TRY_COUNT: usize = 3;
|
||||
const PORT_MAP_VALIDATE_DELAY_MS: u32 = 500;
|
||||
const PORT_MAP_TRY_COUNT: usize = 3;
|
||||
const EXTERNAL_INFO_VALIDATIONS: usize = 5;
|
||||
|
||||
// Detection result of dial info detection futures
|
||||
#[derive(Clone, Debug)]
|
||||
@ -17,9 +18,16 @@ pub enum DetectedDialInfo {
|
||||
// Detection result of external address
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct DetectionResult {
|
||||
pub config: DiscoveryContextConfig,
|
||||
pub ddi: DetectedDialInfo,
|
||||
pub external_address_types: AddressTypeSet,
|
||||
pub local_port: u16,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub struct DiscoveryContextConfig {
|
||||
pub protocol_type: ProtocolType,
|
||||
pub address_type: AddressType,
|
||||
pub port: u16,
|
||||
}
|
||||
|
||||
// Result of checking external address
|
||||
@ -31,23 +39,16 @@ struct ExternalInfo {
|
||||
}
|
||||
|
||||
struct DiscoveryContextInner {
|
||||
// first node contacted
|
||||
external_1: Option<ExternalInfo>,
|
||||
// second node contacted
|
||||
external_2: Option<ExternalInfo>,
|
||||
external_info: Vec<ExternalInfo>,
|
||||
}
|
||||
|
||||
struct DiscoveryContextUnlockedInner {
|
||||
routing_table: RoutingTable,
|
||||
net: Network,
|
||||
clear_network_callback: ClearNetworkCallback,
|
||||
config: DiscoveryContextConfig,
|
||||
|
||||
// per-protocol
|
||||
intf_addrs: Vec<SocketAddress>,
|
||||
existing_external_address: Option<SocketAddress>,
|
||||
protocol_type: ProtocolType,
|
||||
address_type: AddressType,
|
||||
port: u16,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
@ -56,53 +57,23 @@ pub(super) struct DiscoveryContext {
|
||||
inner: Arc<Mutex<DiscoveryContextInner>>,
|
||||
}
|
||||
|
||||
pub(super) type ClearNetworkCallback = Arc<dyn Fn() -> SendPinBoxFuture<()> + Send + Sync>;
|
||||
|
||||
impl DiscoveryContext {
|
||||
pub fn new(
|
||||
routing_table: RoutingTable,
|
||||
net: Network,
|
||||
protocol_type: ProtocolType,
|
||||
address_type: AddressType,
|
||||
port: u16,
|
||||
clear_network_callback: ClearNetworkCallback,
|
||||
) -> Self {
|
||||
let intf_addrs =
|
||||
Self::get_local_addresses(routing_table.clone(), protocol_type, address_type);
|
||||
|
||||
// Get the existing external address to check to see if it has changed
|
||||
let existing_dial_info = routing_table.all_filtered_dial_info_details(
|
||||
RoutingDomain::PublicInternet.into(),
|
||||
&DialInfoFilter::default()
|
||||
.with_address_type(address_type)
|
||||
.with_protocol_type(protocol_type),
|
||||
pub fn new(routing_table: RoutingTable, net: Network, config: DiscoveryContextConfig) -> Self {
|
||||
let intf_addrs = Self::get_local_addresses(
|
||||
routing_table.clone(),
|
||||
config.protocol_type,
|
||||
config.address_type,
|
||||
);
|
||||
let existing_external_address = if existing_dial_info.len() == 1 {
|
||||
Some(
|
||||
existing_dial_info
|
||||
.first()
|
||||
.unwrap()
|
||||
.dial_info
|
||||
.socket_address(),
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Self {
|
||||
unlocked_inner: Arc::new(DiscoveryContextUnlockedInner {
|
||||
routing_table,
|
||||
net,
|
||||
clear_network_callback,
|
||||
config,
|
||||
intf_addrs,
|
||||
existing_external_address,
|
||||
protocol_type,
|
||||
address_type,
|
||||
port,
|
||||
}),
|
||||
inner: Arc::new(Mutex::new(DiscoveryContextInner {
|
||||
external_1: None,
|
||||
external_2: None,
|
||||
external_info: Vec::new(),
|
||||
})),
|
||||
}
|
||||
}
|
||||
@ -153,12 +124,12 @@ impl DiscoveryContext {
|
||||
}
|
||||
);
|
||||
|
||||
log_net!(
|
||||
log_network_result!(
|
||||
debug "request_public_address {:?}: Value({:?})",
|
||||
node_ref,
|
||||
res.answer
|
||||
);
|
||||
res.answer.map(|si| si.socket_address)
|
||||
res.answer.opt_sender_info.map(|si| si.socket_address)
|
||||
}
|
||||
|
||||
// find fast peers with a particular address type, and ask them to tell us what our external address is
|
||||
@ -171,8 +142,10 @@ impl DiscoveryContext {
|
||||
c.network.dht.max_find_node_count as usize
|
||||
};
|
||||
let routing_domain = RoutingDomain::PublicInternet;
|
||||
let protocol_type = self.unlocked_inner.protocol_type;
|
||||
let address_type = self.unlocked_inner.address_type;
|
||||
|
||||
let protocol_type = self.unlocked_inner.config.protocol_type;
|
||||
let address_type = self.unlocked_inner.config.address_type;
|
||||
let port = self.unlocked_inner.config.port;
|
||||
|
||||
// Build an filter that matches our protocol and address type
|
||||
// and excludes relayed nodes so we can get an accurate external address
|
||||
@ -228,7 +201,6 @@ impl DiscoveryContext {
|
||||
}
|
||||
|
||||
// For each peer, ask them for our public address, filtering on desired dial info
|
||||
let mut unord = FuturesUnordered::new();
|
||||
|
||||
let get_public_address_func = |node: NodeRef| {
|
||||
let this = self.clone();
|
||||
@ -242,7 +214,7 @@ impl DiscoveryContext {
|
||||
let dial_info = this
|
||||
.unlocked_inner
|
||||
.net
|
||||
.make_dial_info(address, this.unlocked_inner.protocol_type);
|
||||
.make_dial_info(address, protocol_type);
|
||||
return Some(ExternalInfo {
|
||||
dial_info,
|
||||
address,
|
||||
@ -254,46 +226,65 @@ impl DiscoveryContext {
|
||||
};
|
||||
|
||||
let mut external_address_infos = Vec::new();
|
||||
|
||||
for node in nodes.iter().take(nodes.len() - 1).cloned() {
|
||||
let mut unord = FuturesUnordered::new();
|
||||
for node in nodes.iter().cloned() {
|
||||
let gpa_future = get_public_address_func(node);
|
||||
unord.push(gpa_future);
|
||||
|
||||
// Always process two at a time so we get both addresses in parallel if possible
|
||||
if unord.len() == 2 {
|
||||
// Always process N at a time so we get all addresses in parallel if possible
|
||||
if unord.len() == EXTERNAL_INFO_VALIDATIONS {
|
||||
// Process one
|
||||
if let Some(Some(ei)) = unord.next().in_current_span().await {
|
||||
external_address_infos.push(ei);
|
||||
if external_address_infos.len() == 2 {
|
||||
if external_address_infos.len() == EXTERNAL_INFO_VALIDATIONS {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Finish whatever is left if we need to
|
||||
if external_address_infos.len() < 2 {
|
||||
if external_address_infos.len() < EXTERNAL_INFO_VALIDATIONS {
|
||||
while let Some(res) = unord.next().in_current_span().await {
|
||||
if let Some(ei) = res {
|
||||
external_address_infos.push(ei);
|
||||
if external_address_infos.len() == 2 {
|
||||
if external_address_infos.len() == EXTERNAL_INFO_VALIDATIONS {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if external_address_infos.len() < 2 {
|
||||
if external_address_infos.len() < EXTERNAL_INFO_VALIDATIONS {
|
||||
log_net!(debug "not enough peers responded with an external address for type {:?}:{:?}",
|
||||
protocol_type,
|
||||
address_type);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Try to make preferential port come first
|
||||
external_address_infos.sort_by(|a, b| {
|
||||
let acmp = a.address.ip_addr().cmp(&b.address.ip_addr());
|
||||
if acmp != cmp::Ordering::Equal {
|
||||
return acmp;
|
||||
}
|
||||
if a.address.port() == b.address.port() {
|
||||
return cmp::Ordering::Equal;
|
||||
}
|
||||
if a.address.port() == port {
|
||||
return cmp::Ordering::Less;
|
||||
}
|
||||
if b.address.port() == port {
|
||||
return cmp::Ordering::Greater;
|
||||
}
|
||||
a.address.port().cmp(&b.address.port())
|
||||
});
|
||||
|
||||
{
|
||||
let mut inner = self.inner.lock();
|
||||
inner.external_1 = Some(external_address_infos[0].clone());
|
||||
log_net!(debug "external_1: {:?}", inner.external_1);
|
||||
inner.external_2 = Some(external_address_infos[1].clone());
|
||||
log_net!(debug "external_2: {:?}", inner.external_2);
|
||||
inner.external_info = external_address_infos;
|
||||
log_net!(debug "External Addresses: ({:?}:{:?})[{}]",
|
||||
protocol_type,
|
||||
address_type,
|
||||
inner.external_info.iter().map(|x| format!("{} <- {}",x.address, x.node)).collect::<Vec<_>>().join(", "));
|
||||
}
|
||||
|
||||
true
|
||||
@ -323,11 +314,12 @@ impl DiscoveryContext {
|
||||
|
||||
#[instrument(level = "trace", skip(self), ret)]
|
||||
async fn try_upnp_port_mapping(&self) -> Option<DialInfo> {
|
||||
let protocol_type = self.unlocked_inner.protocol_type;
|
||||
let protocol_type = self.unlocked_inner.config.protocol_type;
|
||||
let address_type = self.unlocked_inner.config.address_type;
|
||||
let local_port = self.unlocked_inner.config.port;
|
||||
|
||||
let low_level_protocol_type = protocol_type.low_level_protocol_type();
|
||||
let address_type = self.unlocked_inner.address_type;
|
||||
let local_port = self.unlocked_inner.port;
|
||||
let external_1 = self.inner.lock().external_1.as_ref().unwrap().clone();
|
||||
let external_1 = self.inner.lock().external_info.first().unwrap().clone();
|
||||
|
||||
let igd_manager = self.unlocked_inner.net.unlocked_inner.igd_manager.clone();
|
||||
let mut tries = 0;
|
||||
@ -410,7 +402,7 @@ impl DiscoveryContext {
|
||||
&self,
|
||||
unord: &mut FuturesUnordered<SendPinBoxFuture<Option<DetectionResult>>>,
|
||||
) {
|
||||
let external_1 = self.inner.lock().external_1.as_ref().unwrap().clone();
|
||||
let external_1 = self.inner.lock().external_info.first().cloned().unwrap();
|
||||
|
||||
let this = self.clone();
|
||||
let do_no_nat_fut: SendPinBoxFuture<Option<DetectionResult>> = Box::pin(async move {
|
||||
@ -421,22 +413,22 @@ impl DiscoveryContext {
|
||||
{
|
||||
// Add public dial info with Direct dialinfo class
|
||||
Some(DetectionResult {
|
||||
config: this.unlocked_inner.config,
|
||||
ddi: DetectedDialInfo::Detected(DialInfoDetail {
|
||||
dial_info: external_1.dial_info.clone(),
|
||||
class: DialInfoClass::Direct,
|
||||
}),
|
||||
external_address_types: AddressTypeSet::only(external_1.address.address_type()),
|
||||
local_port: this.unlocked_inner.port,
|
||||
})
|
||||
} else {
|
||||
// Add public dial info with Blocked dialinfo class
|
||||
Some(DetectionResult {
|
||||
config: this.unlocked_inner.config,
|
||||
ddi: DetectedDialInfo::Detected(DialInfoDetail {
|
||||
dial_info: external_1.dial_info.clone(),
|
||||
class: DialInfoClass::Blocked,
|
||||
}),
|
||||
external_address_types: AddressTypeSet::only(external_1.address.address_type()),
|
||||
local_port: this.unlocked_inner.port,
|
||||
})
|
||||
}
|
||||
});
|
||||
@ -449,28 +441,69 @@ impl DiscoveryContext {
|
||||
&self,
|
||||
unord: &mut FuturesUnordered<SendPinBoxFuture<Option<DetectionResult>>>,
|
||||
) {
|
||||
// Get the external dial info for our use here
|
||||
let (external_1, external_2) = {
|
||||
let external_info = {
|
||||
let inner = self.inner.lock();
|
||||
(
|
||||
inner.external_1.as_ref().unwrap().clone(),
|
||||
inner.external_2.as_ref().unwrap().clone(),
|
||||
)
|
||||
inner.external_info.clone()
|
||||
};
|
||||
let local_port = self.unlocked_inner.config.port;
|
||||
|
||||
// If we have two different external address/port combinations, then this is a symmetric NAT
|
||||
if external_2.address != external_1.address {
|
||||
// Get the external dial info histogram for our use here
|
||||
let mut external_info_addr_port_hist = HashMap::<SocketAddress, usize>::new();
|
||||
let mut external_info_addr_hist = HashMap::<Address, usize>::new();
|
||||
for ei in &external_info {
|
||||
external_info_addr_port_hist
|
||||
.entry(ei.address)
|
||||
.and_modify(|n| *n += 1)
|
||||
.or_insert(1);
|
||||
external_info_addr_hist
|
||||
.entry(ei.address.address())
|
||||
.and_modify(|n| *n += 1)
|
||||
.or_insert(1);
|
||||
}
|
||||
|
||||
// If we have two different external addresses, then this is a symmetric NAT
|
||||
// If just the port differs, and one is the preferential port we still accept
|
||||
// this as an inbound capable dialinfo for holepunch
|
||||
let different_addresses = external_info_addr_hist.len() > 1;
|
||||
let mut best_external_info = None;
|
||||
let mut local_port_matching_external_info = None;
|
||||
let mut external_address_types = AddressTypeSet::new();
|
||||
|
||||
// Get the most popular external port from our sampling
|
||||
// There will always be a best external info
|
||||
let mut best_ei_address = None;
|
||||
let mut best_ei_cnt = 0;
|
||||
for eiph in &external_info_addr_port_hist {
|
||||
if *eiph.1 > best_ei_cnt {
|
||||
best_ei_address = Some(*eiph.0);
|
||||
best_ei_cnt = *eiph.1;
|
||||
}
|
||||
}
|
||||
// In preference order, pick out the best external address and if we have one the one that
|
||||
// matches our local port number (may be the same)
|
||||
for ei in &external_info {
|
||||
if ei.address.port() == local_port && local_port_matching_external_info.is_none() {
|
||||
local_port_matching_external_info = Some(ei.clone());
|
||||
}
|
||||
if best_ei_address.unwrap() == ei.address && best_external_info.is_none() {
|
||||
best_external_info = Some(ei.clone());
|
||||
}
|
||||
external_address_types |= ei.address.address_type();
|
||||
}
|
||||
|
||||
// There is a popular port on the best external info (more than one external address sample with same port)
|
||||
let same_address_has_popular_port = !different_addresses && best_ei_cnt > 1;
|
||||
|
||||
// If we have different addresses in our samples, or no single address has a popular port
|
||||
// then we consider this a symmetric NAT
|
||||
if different_addresses || !same_address_has_popular_port {
|
||||
let this = self.clone();
|
||||
let do_symmetric_nat_fut: SendPinBoxFuture<Option<DetectionResult>> =
|
||||
Box::pin(async move {
|
||||
Some(DetectionResult {
|
||||
config: this.unlocked_inner.config,
|
||||
ddi: DetectedDialInfo::SymmetricNAT,
|
||||
external_address_types: AddressTypeSet::only(
|
||||
external_1.address.address_type(),
|
||||
) | AddressTypeSet::only(
|
||||
external_2.address.address_type(),
|
||||
),
|
||||
local_port: this.unlocked_inner.port,
|
||||
external_address_types,
|
||||
})
|
||||
});
|
||||
unord.push(do_symmetric_nat_fut);
|
||||
@ -478,11 +511,12 @@ impl DiscoveryContext {
|
||||
}
|
||||
|
||||
// Manual Mapping Detection
|
||||
// If we have no external address that matches our local port, then lets try that port
|
||||
// on our best external address and see if there's a port forward someone added manually
|
||||
///////////
|
||||
let this = self.clone();
|
||||
let local_port = self.unlocked_inner.port;
|
||||
if external_1.dial_info.port() != local_port {
|
||||
let c_external_1 = external_1.clone();
|
||||
if local_port_matching_external_info.is_none() && best_external_info.is_some() {
|
||||
let c_external_1 = best_external_info.as_ref().unwrap().clone();
|
||||
let c_this = this.clone();
|
||||
let do_manual_map_fut: SendPinBoxFuture<Option<DetectionResult>> =
|
||||
Box::pin(async move {
|
||||
@ -501,6 +535,7 @@ impl DiscoveryContext {
|
||||
{
|
||||
// Add public dial info with Direct dialinfo class
|
||||
return Some(DetectionResult {
|
||||
config: c_this.unlocked_inner.config,
|
||||
ddi: DetectedDialInfo::Detected(DialInfoDetail {
|
||||
dial_info: external_1_dial_info_with_local_port,
|
||||
class: DialInfoClass::Direct,
|
||||
@ -508,7 +543,6 @@ impl DiscoveryContext {
|
||||
external_address_types: AddressTypeSet::only(
|
||||
c_external_1.address.address_type(),
|
||||
),
|
||||
local_port: c_this.unlocked_inner.port,
|
||||
});
|
||||
}
|
||||
|
||||
@ -534,7 +568,7 @@ impl DiscoveryContext {
|
||||
let mut ord = FuturesOrdered::new();
|
||||
|
||||
let c_this = this.clone();
|
||||
let c_external_1 = external_1.clone();
|
||||
let c_external_1 = external_info.first().cloned().unwrap();
|
||||
let do_full_cone_fut: SendPinBoxFuture<Option<DetectionResult>> =
|
||||
Box::pin(async move {
|
||||
// Let's see what kind of NAT we have
|
||||
@ -551,6 +585,7 @@ impl DiscoveryContext {
|
||||
// Add public dial info with full cone NAT network class
|
||||
|
||||
return Some(DetectionResult {
|
||||
config: c_this.unlocked_inner.config,
|
||||
ddi: DetectedDialInfo::Detected(DialInfoDetail {
|
||||
dial_info: c_external_1.dial_info,
|
||||
class: DialInfoClass::FullConeNAT,
|
||||
@ -558,7 +593,6 @@ impl DiscoveryContext {
|
||||
external_address_types: AddressTypeSet::only(
|
||||
c_external_1.address.address_type(),
|
||||
),
|
||||
local_port: c_this.unlocked_inner.port,
|
||||
});
|
||||
}
|
||||
None
|
||||
@ -566,8 +600,8 @@ impl DiscoveryContext {
|
||||
ord.push_back(do_full_cone_fut);
|
||||
|
||||
let c_this = this.clone();
|
||||
let c_external_1 = external_1.clone();
|
||||
let c_external_2 = external_2.clone();
|
||||
let c_external_1 = external_info.first().cloned().unwrap();
|
||||
let c_external_2 = external_info.get(1).cloned().unwrap();
|
||||
let do_restricted_cone_fut: SendPinBoxFuture<Option<DetectionResult>> =
|
||||
Box::pin(async move {
|
||||
// We are restricted, determine what kind of restriction
|
||||
@ -586,6 +620,7 @@ impl DiscoveryContext {
|
||||
{
|
||||
// Got a reply from a non-default port, which means we're only address restricted
|
||||
return Some(DetectionResult {
|
||||
config: c_this.unlocked_inner.config,
|
||||
ddi: DetectedDialInfo::Detected(DialInfoDetail {
|
||||
dial_info: c_external_1.dial_info.clone(),
|
||||
class: DialInfoClass::AddressRestrictedNAT,
|
||||
@ -593,11 +628,11 @@ impl DiscoveryContext {
|
||||
external_address_types: AddressTypeSet::only(
|
||||
c_external_1.address.address_type(),
|
||||
),
|
||||
local_port: c_this.unlocked_inner.port,
|
||||
});
|
||||
}
|
||||
// Didn't get a reply from a non-default port, which means we are also port restricted
|
||||
Some(DetectionResult {
|
||||
config: c_this.unlocked_inner.config,
|
||||
ddi: DetectedDialInfo::Detected(DialInfoDetail {
|
||||
dial_info: c_external_1.dial_info.clone(),
|
||||
class: DialInfoClass::PortRestrictedNAT,
|
||||
@ -605,7 +640,6 @@ impl DiscoveryContext {
|
||||
external_address_types: AddressTypeSet::only(
|
||||
c_external_1.address.address_type(),
|
||||
),
|
||||
local_port: c_this.unlocked_inner.port,
|
||||
})
|
||||
});
|
||||
ord.push_back(do_restricted_cone_fut);
|
||||
@ -656,30 +690,6 @@ impl DiscoveryContext {
|
||||
return;
|
||||
}
|
||||
|
||||
// Did external address change from the last time we made dialinfo?
|
||||
// Disregard port for this because we only need to know if the ip address has changed
|
||||
// If the port has changed it will change only for this protocol and will be overwritten individually by each protocol discover()
|
||||
let some_clear_network_callback = {
|
||||
let inner = self.inner.lock();
|
||||
let ext_1 = inner.external_1.as_ref().unwrap().address.address();
|
||||
let ext_2 = inner.external_2.as_ref().unwrap().address.address();
|
||||
if (ext_1 != ext_2)
|
||||
|| Some(ext_1)
|
||||
!= self
|
||||
.unlocked_inner
|
||||
.existing_external_address
|
||||
.map(|ea| ea.address())
|
||||
{
|
||||
// External address was not found, or has changed, go ahead and clear the network so we can do better
|
||||
Some(self.unlocked_inner.clear_network_callback.clone())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
};
|
||||
if let Some(clear_network_callback) = some_clear_network_callback {
|
||||
clear_network_callback().in_current_span().await;
|
||||
}
|
||||
|
||||
// UPNP Automatic Mapping
|
||||
///////////
|
||||
if enable_upnp {
|
||||
@ -691,6 +701,7 @@ impl DiscoveryContext {
|
||||
if let Some(external_mapped_dial_info) = this.try_upnp_port_mapping().await {
|
||||
// Got a port mapping, let's use it
|
||||
return Some(DetectionResult {
|
||||
config: this.unlocked_inner.config,
|
||||
ddi: DetectedDialInfo::Detected(DialInfoDetail {
|
||||
dial_info: external_mapped_dial_info.clone(),
|
||||
class: DialInfoClass::Mapped,
|
||||
@ -698,7 +709,6 @@ impl DiscoveryContext {
|
||||
external_address_types: AddressTypeSet::only(
|
||||
external_mapped_dial_info.address_type(),
|
||||
),
|
||||
local_port: this.unlocked_inner.port,
|
||||
});
|
||||
}
|
||||
None
|
||||
@ -710,9 +720,20 @@ impl DiscoveryContext {
|
||||
///////////
|
||||
|
||||
// If our local interface list contains external_1 then there is no NAT in place
|
||||
let external_1 = self.inner.lock().external_1.as_ref().unwrap().clone();
|
||||
let local_address_in_external_info = self
|
||||
.inner
|
||||
.lock()
|
||||
.external_info
|
||||
.iter()
|
||||
.find_map(|ei| {
|
||||
self.unlocked_inner
|
||||
.intf_addrs
|
||||
.contains(&ei.address)
|
||||
.then_some(true)
|
||||
})
|
||||
.unwrap_or_default();
|
||||
|
||||
if self.unlocked_inner.intf_addrs.contains(&external_1.address) {
|
||||
if local_address_in_external_info {
|
||||
self.protocol_process_no_nat(unord).await;
|
||||
} else {
|
||||
self.protocol_process_nat(unord).await;
|
||||
|
@ -30,6 +30,10 @@ use std::path::{Path, PathBuf};
|
||||
|
||||
/////////////////////////////////////////////////////////////////
|
||||
|
||||
pub const UPDATE_NETWORK_CLASS_TASK_TICK_PERIOD_SECS: u32 = 1;
|
||||
pub const NETWORK_INTERFACES_TASK_TICK_PERIOD_SECS: u32 = 1;
|
||||
pub const UPNP_TASK_TICK_PERIOD_SECS: u32 = 1;
|
||||
|
||||
pub const PEEK_DETECT_LEN: usize = 64;
|
||||
|
||||
cfg_if! {
|
||||
@ -168,9 +172,15 @@ impl Network {
|
||||
routing_table,
|
||||
connection_manager,
|
||||
interfaces: NetworkInterfaces::new(),
|
||||
update_network_class_task: TickTask::new("update_network_class_task", 1),
|
||||
network_interfaces_task: TickTask::new("network_interfaces_task", 1),
|
||||
upnp_task: TickTask::new("upnp_task", 1),
|
||||
update_network_class_task: TickTask::new(
|
||||
"update_network_class_task",
|
||||
UPDATE_NETWORK_CLASS_TASK_TICK_PERIOD_SECS,
|
||||
),
|
||||
network_interfaces_task: TickTask::new(
|
||||
"network_interfaces_task",
|
||||
NETWORK_INTERFACES_TASK_TICK_PERIOD_SECS,
|
||||
),
|
||||
upnp_task: TickTask::new("upnp_task", UPNP_TASK_TICK_PERIOD_SECS),
|
||||
network_task_lock: AsyncMutex::new(()),
|
||||
igd_manager: igd_manager::IGDManager::new(config.clone()),
|
||||
}
|
||||
@ -738,22 +748,6 @@ impl Network {
|
||||
self.register_all_dial_info(&mut editor_public_internet, &mut editor_local_network)
|
||||
.await?;
|
||||
|
||||
// Set network class statically if we have static public dialinfo
|
||||
let detect_address_changes = {
|
||||
let c = self.config.get();
|
||||
c.network.detect_address_changes
|
||||
};
|
||||
if !detect_address_changes {
|
||||
let inner = self.inner.lock();
|
||||
if !inner.static_public_dial_info.is_empty() {
|
||||
editor_public_internet.set_network_class(Some(NetworkClass::InboundCapable));
|
||||
}
|
||||
}
|
||||
|
||||
// Set network class statically for local network routing domain until
|
||||
// we can do some reachability analysis eventually
|
||||
editor_local_network.set_network_class(Some(NetworkClass::InboundCapable));
|
||||
|
||||
// Commit routing domain edits
|
||||
if editor_public_internet.commit(true).await {
|
||||
editor_public_internet.publish();
|
||||
|
@ -234,7 +234,6 @@ impl Network {
|
||||
|
||||
// Register the public address
|
||||
editor_public_internet.add_dial_info(pdi.clone(), DialInfoClass::Direct);
|
||||
editor_public_internet.set_network_class(Some(NetworkClass::InboundCapable));
|
||||
|
||||
// See if this public address is also a local interface address we haven't registered yet
|
||||
if self.is_stable_interface_address(pdi_addr.ip()) {
|
||||
@ -242,7 +241,6 @@ impl Network {
|
||||
DialInfo::udp_from_socketaddr(pdi_addr),
|
||||
DialInfoClass::Direct,
|
||||
);
|
||||
editor_local_network.set_network_class(Some(NetworkClass::InboundCapable));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -253,7 +251,6 @@ impl Network {
|
||||
// if no other public address is specified
|
||||
if !detect_address_changes && public_address.is_none() && di.address().is_global() {
|
||||
editor_public_internet.add_dial_info(di.clone(), DialInfoClass::Direct);
|
||||
editor_public_internet.set_network_class(Some(NetworkClass::InboundCapable));
|
||||
}
|
||||
|
||||
// Register interface dial info as well since the address is on the local interface
|
||||
|
@ -3,6 +3,8 @@ use super::*;
|
||||
use futures_util::stream::FuturesUnordered;
|
||||
use stop_token::future::FutureExt as StopTokenFutureExt;
|
||||
|
||||
type InboundProtocolMap = HashMap<(AddressType, LowLevelProtocolType, u16), Vec<ProtocolType>>;
|
||||
|
||||
impl Network {
|
||||
#[instrument(parent = None, level = "trace", skip(self), err)]
|
||||
pub async fn update_network_class_task_routine(
|
||||
@ -14,99 +16,65 @@ impl Network {
|
||||
let _guard = self.unlocked_inner.network_task_lock.lock().await;
|
||||
|
||||
// Do the public dial info check
|
||||
let out = self.do_public_dial_info_check(stop_token, l, t).await;
|
||||
let finished = self.do_public_dial_info_check(stop_token, l, t).await?;
|
||||
|
||||
// Done with public dial info check
|
||||
{
|
||||
if finished {
|
||||
let mut inner = self.inner.lock();
|
||||
inner.needs_public_dial_info_check = false;
|
||||
inner.public_dial_info_check_punishment = None;
|
||||
}
|
||||
|
||||
out
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self), err)]
|
||||
pub async fn update_with_detected_dial_info(&self, ddi: DetectedDialInfo) -> EyreResult<()> {
|
||||
let existing_network_class = self
|
||||
.routing_table()
|
||||
.get_network_class(RoutingDomain::PublicInternet)
|
||||
.unwrap_or_default();
|
||||
|
||||
match ddi {
|
||||
DetectedDialInfo::SymmetricNAT => {
|
||||
// If we get any symmetric nat dialinfo, this whole network class is outbound only,
|
||||
// and all dial info should be treated as invalid
|
||||
if !matches!(existing_network_class, NetworkClass::OutboundOnly) {
|
||||
let mut editor = self.routing_table().edit_public_internet_routing_domain();
|
||||
|
||||
editor.clear_dial_info_details(None, None);
|
||||
editor.set_network_class(Some(NetworkClass::OutboundOnly));
|
||||
editor.commit(true).await;
|
||||
}
|
||||
}
|
||||
DetectedDialInfo::Detected(did) => {
|
||||
// get existing dial info into table by protocol/address type
|
||||
let mut existing_dial_info =
|
||||
BTreeMap::<(ProtocolType, AddressType), DialInfoDetail>::new();
|
||||
for did in self.routing_table().all_filtered_dial_info_details(
|
||||
RoutingDomain::PublicInternet.into(),
|
||||
&DialInfoFilter::all(),
|
||||
#[instrument(level = "trace", skip(self, editor))]
|
||||
fn process_detected_dial_info(
|
||||
&self,
|
||||
editor: &mut RoutingDomainEditorPublicInternet,
|
||||
ddi: DetectedDialInfo,
|
||||
) {
|
||||
// Only need to keep one per pt/at pair, since they will all have the same dialinfoclass
|
||||
existing_dial_info.insert(
|
||||
(did.dial_info.protocol_type(), did.dial_info.address_type()),
|
||||
did,
|
||||
);
|
||||
}
|
||||
// We got a dial info, upgrade everything unless we are fixed to outbound only due to a symmetric nat
|
||||
if !matches!(existing_network_class, NetworkClass::OutboundOnly) {
|
||||
// Get existing dial info for protocol/address type combination
|
||||
let pt = did.dial_info.protocol_type();
|
||||
let at = did.dial_info.address_type();
|
||||
|
||||
// See what operations to perform with this dialinfo
|
||||
let mut clear = false;
|
||||
let mut add = false;
|
||||
|
||||
if let Some(edi) = existing_dial_info.get(&(pt, at)) {
|
||||
// Is the dial info class better than our existing dial info?
|
||||
// Or is the new dial info the same class, but different? Only change if things are different.
|
||||
if did.class < edi.class
|
||||
|| (did.class == edi.class && did.dial_info != edi.dial_info)
|
||||
{
|
||||
// Better or same dial info class was found, clear existing dialinfo for this pt/at pair
|
||||
// Only keep one dial info per protocol/address type combination
|
||||
clear = true;
|
||||
add = true;
|
||||
}
|
||||
// Otherwise, don't upgrade, don't add, this is worse than what we have already
|
||||
} else {
|
||||
// No existing dial info of this type accept it, no need to upgrade, but add it
|
||||
add = true;
|
||||
}
|
||||
|
||||
if clear || add {
|
||||
let mut editor = self.routing_table().edit_public_internet_routing_domain();
|
||||
|
||||
if clear {
|
||||
editor.clear_dial_info_details(
|
||||
Some(did.dial_info.address_type()),
|
||||
Some(did.dial_info.protocol_type()),
|
||||
);
|
||||
}
|
||||
|
||||
if add {
|
||||
match ddi {
|
||||
DetectedDialInfo::SymmetricNAT => {}
|
||||
DetectedDialInfo::Detected(did) => {
|
||||
// We got a dialinfo, add it and tag us as inbound capable
|
||||
editor.add_dial_info(did.dial_info.clone(), did.class);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
editor.set_network_class(Some(NetworkClass::InboundCapable));
|
||||
editor.commit(true).await;
|
||||
#[instrument(level = "trace", skip(self, editor))]
|
||||
fn update_with_detection_result(
|
||||
&self,
|
||||
editor: &mut RoutingDomainEditorPublicInternet,
|
||||
inbound_protocol_map: &InboundProtocolMap,
|
||||
dr: DetectionResult,
|
||||
) {
|
||||
// Found some new dial info for this protocol/address combination
|
||||
self.process_detected_dial_info(editor, dr.ddi.clone());
|
||||
|
||||
// Add additional dialinfo for protocols on the same port
|
||||
match &dr.ddi {
|
||||
DetectedDialInfo::SymmetricNAT => {}
|
||||
DetectedDialInfo::Detected(did) => {
|
||||
let ipmkey = (
|
||||
did.dial_info.address_type(),
|
||||
did.dial_info.protocol_type().low_level_protocol_type(),
|
||||
dr.config.port,
|
||||
);
|
||||
if let Some(ipm) = inbound_protocol_map.get(&ipmkey) {
|
||||
for additional_pt in ipm.iter().skip(1) {
|
||||
// Make dialinfo for additional protocol type
|
||||
let additional_ddi = DetectedDialInfo::Detected(DialInfoDetail {
|
||||
dial_info: self
|
||||
.make_dial_info(did.dial_info.socket_address(), *additional_pt),
|
||||
class: did.class,
|
||||
});
|
||||
// Add additional dialinfo
|
||||
self.process_detected_dial_info(editor, additional_ddi);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self), err)]
|
||||
@ -115,7 +83,7 @@ impl Network {
|
||||
stop_token: StopToken,
|
||||
_l: Timestamp,
|
||||
_t: Timestamp,
|
||||
) -> EyreResult<()> {
|
||||
) -> EyreResult<bool> {
|
||||
// Figure out if we can optimize TCP/WS checking since they are often on the same port
|
||||
let (protocol_config, inbound_protocol_map) = {
|
||||
let mut inner = self.inner.lock();
|
||||
@ -166,7 +134,7 @@ impl Network {
|
||||
.into_iter()
|
||||
.collect();
|
||||
|
||||
// Set most permissive network config
|
||||
// Set most permissive network config and start from scratch
|
||||
let mut editor = self.routing_table().edit_public_internet_routing_domain();
|
||||
editor.setup_network(
|
||||
protocol_config.outbound,
|
||||
@ -174,47 +142,26 @@ impl Network {
|
||||
protocol_config.family_global,
|
||||
protocol_config.public_internet_capabilities.clone(),
|
||||
);
|
||||
editor.commit(true).await;
|
||||
|
||||
// Create a callback to clear the network if we need to 'start over'
|
||||
let this = self.clone();
|
||||
let clear_network_callback: ClearNetworkCallback = Arc::new(move || {
|
||||
let this = this.clone();
|
||||
Box::pin(async move {
|
||||
// Ensure we only do this once per network class discovery
|
||||
{
|
||||
let mut inner = this.inner.lock();
|
||||
if inner.network_already_cleared {
|
||||
return;
|
||||
}
|
||||
inner.network_already_cleared = true;
|
||||
}
|
||||
let mut editor = this.routing_table().edit_public_internet_routing_domain();
|
||||
editor.clear_dial_info_details(None, None);
|
||||
editor.set_network_class(None);
|
||||
editor.commit(true).await;
|
||||
})
|
||||
});
|
||||
|
||||
// Process all protocol and address combinations
|
||||
let mut unord = FuturesUnordered::new();
|
||||
|
||||
for ((at, _llpt, port), protocols) in &inbound_protocol_map {
|
||||
let first_pt = protocols.first().unwrap();
|
||||
|
||||
let discovery_context = DiscoveryContext::new(
|
||||
self.routing_table(),
|
||||
self.clone(),
|
||||
*first_pt,
|
||||
*at,
|
||||
*port,
|
||||
clear_network_callback.clone(),
|
||||
);
|
||||
let mut context_configs = HashSet::new();
|
||||
for ((address_type, _llpt, port), protocols) in inbound_protocol_map.clone() {
|
||||
let protocol_type = *protocols.first().unwrap();
|
||||
let dcc = DiscoveryContextConfig {
|
||||
protocol_type,
|
||||
address_type,
|
||||
port,
|
||||
};
|
||||
context_configs.insert(dcc);
|
||||
let discovery_context = DiscoveryContext::new(self.routing_table(), self.clone(), dcc);
|
||||
discovery_context.discover(&mut unord).await;
|
||||
}
|
||||
|
||||
// Wait for all discovery futures to complete and apply discoverycontexts
|
||||
let mut all_address_types = AddressTypeSet::new();
|
||||
let mut external_address_types = AddressTypeSet::new();
|
||||
loop {
|
||||
match unord
|
||||
.next()
|
||||
@ -223,37 +170,17 @@ impl Network {
|
||||
.await
|
||||
{
|
||||
Ok(Some(Some(dr))) => {
|
||||
// Found some new dial info for this protocol/address combination
|
||||
self.update_with_detected_dial_info(dr.ddi.clone()).await?;
|
||||
// Got something for this config
|
||||
context_configs.remove(&dr.config);
|
||||
|
||||
// Add the external address kinds to the set we've seen
|
||||
all_address_types |= dr.external_address_types;
|
||||
external_address_types |= dr.external_address_types;
|
||||
|
||||
// Add additional dialinfo for protocols on the same port
|
||||
if let DetectedDialInfo::Detected(did) = &dr.ddi {
|
||||
let ipmkey = (
|
||||
did.dial_info.address_type(),
|
||||
did.dial_info.protocol_type().low_level_protocol_type(),
|
||||
dr.local_port,
|
||||
);
|
||||
if let Some(ipm) = inbound_protocol_map.get(&ipmkey) {
|
||||
for additional_pt in ipm.iter().skip(1) {
|
||||
// Make dialinfo for additional protocol type
|
||||
let additional_ddi = DetectedDialInfo::Detected(DialInfoDetail {
|
||||
dial_info: self.make_dial_info(
|
||||
did.dial_info.socket_address(),
|
||||
*additional_pt,
|
||||
),
|
||||
class: did.class,
|
||||
});
|
||||
// Add additional dialinfo
|
||||
self.update_with_detected_dial_info(additional_ddi).await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Import the dialinfo
|
||||
self.update_with_detection_result(&mut editor, &inbound_protocol_map, dr);
|
||||
}
|
||||
Ok(Some(None)) => {
|
||||
// Found no new dial info for this protocol/address combination
|
||||
// Found no dial info for this protocol/address combination
|
||||
}
|
||||
Ok(None) => {
|
||||
// All done, normally
|
||||
@ -261,20 +188,34 @@ impl Network {
|
||||
}
|
||||
Err(_) => {
|
||||
// Stop token, exit early without error propagation
|
||||
return Ok(());
|
||||
return Ok(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// See if we have any discovery contexts that did not complete for a
|
||||
// particular protocol type if its external address type was supported.
|
||||
let mut success = true;
|
||||
for cc in &context_configs {
|
||||
if external_address_types.contains(cc.address_type) {
|
||||
success = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if !success {
|
||||
log_net!(debug "Network class discovery failed, trying again, needed {:?}", context_configs);
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// All done
|
||||
|
||||
log_net!(debug "Network class discovery finished with address_types {:?}", all_address_types);
|
||||
log_net!(debug "Network class discovery finished with address_types {:?}", external_address_types);
|
||||
|
||||
// Set the address types we've seen
|
||||
editor.setup_network(
|
||||
protocol_config.outbound,
|
||||
protocol_config.inbound,
|
||||
all_address_types,
|
||||
external_address_types,
|
||||
protocol_config.public_internet_capabilities,
|
||||
);
|
||||
if editor.commit(true).await {
|
||||
@ -298,7 +239,7 @@ impl Network {
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
/// Make a dialinfo from an address and protocol type
|
||||
|
@ -82,16 +82,29 @@ pub struct NetworkConnectionStats {
|
||||
last_message_recv_time: Option<Timestamp>,
|
||||
}
|
||||
|
||||
/// Represents a connection in the connection table for connection-oriented protocols
|
||||
#[derive(Debug)]
|
||||
pub(in crate::network_manager) struct NetworkConnection {
|
||||
/// A unique id for this connection
|
||||
connection_id: NetworkConnectionId,
|
||||
/// The dial info used to make this connection if it was made with 'connect'
|
||||
/// None if the connection was 'accepted'
|
||||
opt_dial_info: Option<DialInfo>,
|
||||
/// The network flow 5-tuple this connection is over
|
||||
flow: Flow,
|
||||
/// Each connection has a processor and this is the task we wait for to ensure it exits cleanly
|
||||
processor: Option<MustJoinHandle<()>>,
|
||||
/// When this connection was connected or accepted
|
||||
established_time: Timestamp,
|
||||
/// Statistics about network traffic
|
||||
stats: Arc<Mutex<NetworkConnectionStats>>,
|
||||
/// To send data out this connection, it is places in this channel
|
||||
sender: flume::Sender<(Option<Id>, Vec<u8>)>,
|
||||
/// Drop this when we want to drop the connection
|
||||
stop_source: Option<StopSource>,
|
||||
/// The node we are responsible for protecting the connection for if it is protected
|
||||
protected_nr: Option<NodeRef>,
|
||||
/// The number of references to the network connection that exist (handles)
|
||||
ref_count: usize,
|
||||
}
|
||||
|
||||
@ -110,6 +123,7 @@ impl NetworkConnection {
|
||||
|
||||
Self {
|
||||
connection_id: id,
|
||||
opt_dial_info: None,
|
||||
flow,
|
||||
processor: None,
|
||||
established_time: Timestamp::now(),
|
||||
@ -129,6 +143,7 @@ impl NetworkConnection {
|
||||
manager_stop_token: StopToken,
|
||||
protocol_connection: ProtocolNetworkConnection,
|
||||
connection_id: NetworkConnectionId,
|
||||
opt_dial_info: Option<DialInfo>,
|
||||
) -> Self {
|
||||
// Get flow
|
||||
let flow = protocol_connection.flow();
|
||||
@ -164,6 +179,7 @@ impl NetworkConnection {
|
||||
// Return the connection
|
||||
Self {
|
||||
connection_id,
|
||||
opt_dial_info,
|
||||
flow,
|
||||
processor: Some(processor),
|
||||
established_time: Timestamp::now(),
|
||||
@ -183,6 +199,10 @@ impl NetworkConnection {
|
||||
self.flow
|
||||
}
|
||||
|
||||
pub fn dial_info(&self) -> Option<DialInfo> {
|
||||
self.opt_dial_info.clone()
|
||||
}
|
||||
|
||||
#[expect(dead_code)]
|
||||
pub fn unique_flow(&self) -> UniqueFlow {
|
||||
UniqueFlow {
|
||||
@ -448,32 +468,28 @@ impl NetworkConnection {
|
||||
|
||||
pub fn debug_print(&self, cur_ts: Timestamp) -> String {
|
||||
format!(
|
||||
"{} <- {} | {} | est {} sent {} rcvd {} refcount {}{}",
|
||||
self.flow.remote_address(),
|
||||
self.flow
|
||||
.local()
|
||||
.map(|x| x.to_string())
|
||||
.unwrap_or("---".to_owned()),
|
||||
"{} | {} | est {} sent {} rcvd {} refcount {}{}",
|
||||
self.flow,
|
||||
self.connection_id.as_u64(),
|
||||
debug_duration(
|
||||
display_duration(
|
||||
cur_ts
|
||||
.as_u64()
|
||||
.saturating_sub(self.established_time.as_u64())
|
||||
),
|
||||
self.stats()
|
||||
.last_message_sent_time
|
||||
.map(|ts| debug_duration(cur_ts.as_u64().saturating_sub(ts.as_u64())))
|
||||
.map(|ts| display_duration(cur_ts.as_u64().saturating_sub(ts.as_u64())))
|
||||
.unwrap_or("---".to_owned()),
|
||||
self.stats()
|
||||
.last_message_recv_time
|
||||
.map(|ts| debug_duration(cur_ts.as_u64().saturating_sub(ts.as_u64())))
|
||||
.map(|ts| display_duration(cur_ts.as_u64().saturating_sub(ts.as_u64())))
|
||||
.unwrap_or("---".to_owned()),
|
||||
self.ref_count,
|
||||
if let Some(pnr) = &self.protected_nr {
|
||||
format!(" PROTECTED:{}", pnr)
|
||||
} else {
|
||||
"".to_owned()
|
||||
}
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
@ -74,6 +74,7 @@ impl fmt::Debug for ReceiptRecordCallbackType {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct ReceiptRecord {
|
||||
expiration_ts: Timestamp,
|
||||
receipt: Receipt,
|
||||
@ -82,18 +83,6 @@ struct ReceiptRecord {
|
||||
receipt_callback: ReceiptRecordCallbackType,
|
||||
}
|
||||
|
||||
impl fmt::Debug for ReceiptRecord {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("ReceiptRecord")
|
||||
.field("expiration_ts", &self.expiration_ts)
|
||||
.field("receipt", &self.receipt)
|
||||
.field("expected_returns", &self.expected_returns)
|
||||
.field("returns_so_far", &self.returns_so_far)
|
||||
.field("receipt_callback", &self.receipt_callback)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl ReceiptRecord {
|
||||
#[expect(dead_code)]
|
||||
pub fn new(
|
||||
|
@ -1,70 +1,26 @@
|
||||
use super::*;
|
||||
use stop_token::future::FutureExt as _;
|
||||
|
||||
// global debugging statistics for hole punch success
|
||||
static HOLE_PUNCH_SUCCESS: AtomicUsize = AtomicUsize::new(0);
|
||||
static HOLE_PUNCH_FAILURE: AtomicUsize = AtomicUsize::new(0);
|
||||
static REVERSE_CONNECT_SUCCESS: AtomicUsize = AtomicUsize::new(0);
|
||||
static REVERSE_CONNECT_FAILURE: AtomicUsize = AtomicUsize::new(0);
|
||||
|
||||
impl NetworkManager {
|
||||
/// Send raw data to a node
|
||||
///
|
||||
/// We may not have dial info for a node, but have an existing flow for it
|
||||
/// because an inbound flow happened first, and no FindNodeQ has happened to that
|
||||
/// node yet to discover its dial info. The existing flow should be tried first
|
||||
/// in this case, if it matches the node ref's filters and no more permissive flow
|
||||
/// could be established.
|
||||
///
|
||||
/// Sending to a node requires determining a NetworkClass compatible contact method
|
||||
/// between the source and destination node
|
||||
/// Sending to a node requires determining a NodeContactMethod.
|
||||
/// NodeContactMethod is how to reach a node given the context of our current node, which may
|
||||
/// include information about the existing connections and network state of our node.
|
||||
/// NodeContactMethod calculation requires first calculating the per-RoutingDomain ContactMethod
|
||||
/// between the source and destination PeerInfo, which is a stateless operation.
|
||||
#[instrument(level = "trace", target = "net", skip_all, err)]
|
||||
pub(crate) async fn send_data(
|
||||
&self,
|
||||
destination_node_ref: FilteredNodeRef,
|
||||
data: Vec<u8>,
|
||||
) -> EyreResult<NetworkResult<SendDataMethod>> {
|
||||
// First try to send data to the last flow we've seen this peer on
|
||||
|
||||
let data = if let Some(flow) = destination_node_ref.last_flow() {
|
||||
#[cfg(feature = "verbose-tracing")]
|
||||
log_net!(debug
|
||||
"send_data: trying last flow ({:?}) for {:?}",
|
||||
flow,
|
||||
destination_node_ref
|
||||
);
|
||||
|
||||
match self.net().send_data_to_existing_flow(flow, data).await? {
|
||||
SendDataToExistingFlowResult::Sent(unique_flow) => {
|
||||
// Update timestamp for this last flow since we just sent to it
|
||||
destination_node_ref.set_last_flow(unique_flow.flow, Timestamp::now());
|
||||
|
||||
#[cfg(feature = "verbose-tracing")]
|
||||
log_net!(debug
|
||||
"send_data: sent to last flow ({:?}) for {:?}",
|
||||
unique_flow,
|
||||
destination_node_ref
|
||||
);
|
||||
|
||||
return Ok(NetworkResult::value(SendDataMethod {
|
||||
opt_relayed_contact_method: None,
|
||||
contact_method: NodeContactMethod::Existing,
|
||||
unique_flow,
|
||||
}));
|
||||
}
|
||||
SendDataToExistingFlowResult::NotSent(data) => {
|
||||
// Couldn't send data to existing flow
|
||||
// so pass the data back out
|
||||
#[cfg(feature = "verbose-tracing")]
|
||||
log_net!(debug
|
||||
"send_data: did not send to last flow ({:?}) for {:?}",
|
||||
flow,
|
||||
destination_node_ref
|
||||
);
|
||||
data
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// No last connection
|
||||
data
|
||||
};
|
||||
|
||||
// No existing connection was found or usable, so we proceed to see how to make a new one
|
||||
|
||||
// Get the best way to contact this node
|
||||
let possibly_relayed_contact_method =
|
||||
self.get_node_contact_method(destination_node_ref.clone())?;
|
||||
@ -135,10 +91,23 @@ impl NetworkManager {
|
||||
.await?;
|
||||
if matches!(nres, NetworkResult::Timeout) {
|
||||
// Failed to holepunch, fallback to inbound relay
|
||||
log_network_result!(debug "Reverse connection failed to {}, falling back to inbound relay via {}", target_node_ref, relay_nr);
|
||||
let success = REVERSE_CONNECT_SUCCESS.load(Ordering::Acquire);
|
||||
let failure = REVERSE_CONNECT_FAILURE.fetch_add(1, Ordering::AcqRel) + 1;
|
||||
let rate = (success as f64 * 100.0) / ((success + failure) as f64);
|
||||
|
||||
log_network_result!(debug "Reverse connection failed ({:.2}% success) to {}, falling back to inbound relay via {}", rate, target_node_ref, relay_nr);
|
||||
network_result_try!(this.try_possibly_relayed_contact_method(NodeContactMethod::InboundRelay(relay_nr), destination_node_ref, data).await?)
|
||||
} else {
|
||||
log_network_result!(debug "Reverse connection successful to {} via {}", target_node_ref, relay_nr);
|
||||
if let NetworkResult::Value(sdm) = &nres {
|
||||
if matches!(sdm.contact_method, NodeContactMethod::SignalReverse(_,_)) {
|
||||
|
||||
let success = REVERSE_CONNECT_SUCCESS.fetch_add(1, Ordering::AcqRel) + 1;
|
||||
let failure = REVERSE_CONNECT_FAILURE.load(Ordering::Acquire);
|
||||
let rate = (success as f64 * 100.0) / ((success + failure) as f64);
|
||||
|
||||
log_network_result!(debug "Reverse connection successful ({:.2}% success) to {} via {}", rate, target_node_ref, relay_nr);
|
||||
}
|
||||
}
|
||||
network_result_try!(nres)
|
||||
}
|
||||
}
|
||||
@ -148,10 +117,22 @@ impl NetworkManager {
|
||||
.await?;
|
||||
if matches!(nres, NetworkResult::Timeout) {
|
||||
// Failed to holepunch, fallback to inbound relay
|
||||
log_network_result!(debug "Hole punch failed to {}, falling back to inbound relay via {}", target_node_ref, relay_nr);
|
||||
let success = HOLE_PUNCH_SUCCESS.load(Ordering::Acquire);
|
||||
let failure = HOLE_PUNCH_FAILURE.fetch_add(1, Ordering::AcqRel) + 1;
|
||||
let rate = (success as f64 * 100.0) / ((success + failure) as f64);
|
||||
|
||||
log_network_result!(debug "Hole punch failed ({:.2}% success) to {} , falling back to inbound relay via {}", rate, target_node_ref , relay_nr);
|
||||
network_result_try!(this.try_possibly_relayed_contact_method(NodeContactMethod::InboundRelay(relay_nr), destination_node_ref, data).await?)
|
||||
} else {
|
||||
log_network_result!(debug "Hole punch successful to {} via {}", target_node_ref, relay_nr);
|
||||
if let NetworkResult::Value(sdm) = &nres {
|
||||
if matches!(sdm.contact_method, NodeContactMethod::SignalHolePunch(_,_)) {
|
||||
let success = HOLE_PUNCH_SUCCESS.fetch_add(1, Ordering::AcqRel) + 1;
|
||||
let failure = HOLE_PUNCH_FAILURE.load(Ordering::Acquire);
|
||||
let rate = (success as f64 * 100.0) / ((success + failure) as f64);
|
||||
|
||||
log_network_result!(debug "Hole punch successful ({:.2}% success) to {} via {}", rate, target_node_ref, relay_nr);
|
||||
}
|
||||
}
|
||||
network_result_try!(nres)
|
||||
}
|
||||
}
|
||||
@ -415,11 +396,40 @@ impl NetworkManager {
|
||||
}
|
||||
};
|
||||
|
||||
// Node A is our own node
|
||||
// Peer A is our own node
|
||||
// Use whatever node info we've calculated so far
|
||||
let peer_a = routing_table.get_current_peer_info(routing_domain);
|
||||
let own_node_info_ts = peer_a.signed_node_info().timestamp();
|
||||
|
||||
// Node B is the target node
|
||||
// Peer B is the target node, get just the timestamp for the cache check
|
||||
let target_node_info_ts = match target_node_ref.operate(|_rti, e| {
|
||||
e.signed_node_info(routing_domain)
|
||||
.map(|sni| sni.timestamp())
|
||||
}) {
|
||||
Some(ts) => ts,
|
||||
None => {
|
||||
log_net!(
|
||||
"no node info for node {:?} in {:?}",
|
||||
target_node_ref,
|
||||
routing_domain
|
||||
);
|
||||
return Ok(NodeContactMethod::Unreachable);
|
||||
}
|
||||
};
|
||||
|
||||
// Get cache key
|
||||
let mut ncm_key = NodeContactMethodCacheKey {
|
||||
node_ids: target_node_ref.node_ids(),
|
||||
own_node_info_ts,
|
||||
target_node_info_ts,
|
||||
target_node_ref_filter: target_node_ref.filter(),
|
||||
target_node_ref_sequencing: target_node_ref.sequencing(),
|
||||
};
|
||||
if let Some(ncm) = self.inner.lock().node_contact_method_cache.get(&ncm_key) {
|
||||
return Ok(ncm.clone());
|
||||
}
|
||||
|
||||
// Peer B is the target node, get the whole peer info now
|
||||
let peer_b = match target_node_ref.make_peer_info(routing_domain) {
|
||||
Some(pi) => Arc::new(pi),
|
||||
None => {
|
||||
@ -427,18 +437,8 @@ impl NetworkManager {
|
||||
return Ok(NodeContactMethod::Unreachable);
|
||||
}
|
||||
};
|
||||
|
||||
// Get cache key
|
||||
let ncm_key = NodeContactMethodCacheKey {
|
||||
node_ids: target_node_ref.node_ids(),
|
||||
own_node_info_ts: peer_a.signed_node_info().timestamp(),
|
||||
target_node_info_ts: peer_b.signed_node_info().timestamp(),
|
||||
target_node_ref_filter: target_node_ref.filter(),
|
||||
target_node_ref_sequencing: target_node_ref.sequencing(),
|
||||
};
|
||||
if let Some(ncm) = self.inner.lock().node_contact_method_cache.get(&ncm_key) {
|
||||
return Ok(ncm.clone());
|
||||
}
|
||||
// Update the key's timestamp to ensure we avoid any race conditions
|
||||
ncm_key.target_node_info_ts = peer_b.signed_node_info().timestamp();
|
||||
|
||||
// Dial info filter comes from the target node ref but must be filtered by this node's outbound capabilities
|
||||
let dial_info_filter = target_node_ref.dial_info_filter().filtered(
|
||||
@ -513,9 +513,42 @@ impl NetworkManager {
|
||||
if !target_node_ref.node_ids().contains(&target_key) {
|
||||
bail!("signalreverse target noderef didn't match target key: {:?} != {} for relay {}", target_node_ref, target_key, relay_key );
|
||||
}
|
||||
// Set sequencing requirement for the relay
|
||||
relay_nr.set_sequencing(sequencing);
|
||||
let target_node_ref =
|
||||
|
||||
// Tighten sequencing for the target to the best reverse connection flow we can get
|
||||
let tighten = peer_a
|
||||
.signed_node_info()
|
||||
.node_info()
|
||||
.filtered_dial_info_details(DialInfoDetail::NO_SORT, |did| {
|
||||
did.matches_filter(&dial_info_filter)
|
||||
})
|
||||
.iter()
|
||||
.find_map(|did| {
|
||||
if peer_b
|
||||
.signed_node_info()
|
||||
.node_info()
|
||||
.address_types()
|
||||
.contains(did.dial_info.address_type())
|
||||
&& peer_b
|
||||
.signed_node_info()
|
||||
.node_info()
|
||||
.outbound_protocols()
|
||||
.contains(did.dial_info.protocol_type())
|
||||
&& did.dial_info.protocol_type().is_ordered()
|
||||
{
|
||||
Some(true)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.unwrap_or(false);
|
||||
|
||||
let mut target_node_ref =
|
||||
target_node_ref.filtered_clone(NodeRefFilter::from(dial_info_filter));
|
||||
if tighten {
|
||||
target_node_ref.set_sequencing(Sequencing::EnsureOrdered);
|
||||
}
|
||||
NodeContactMethod::SignalReverse(relay_nr, target_node_ref)
|
||||
}
|
||||
ContactMethod::SignalHolePunch(relay_key, target_key) => {
|
||||
@ -531,6 +564,7 @@ impl NetworkManager {
|
||||
if !target_node_ref.node_ids().contains(&target_key) {
|
||||
bail!("signalholepunch target noderef didn't match target key: {:?} != {} for relay {}", target_node_ref, target_key, relay_key );
|
||||
}
|
||||
// Set sequencing requirement for the relay
|
||||
relay_nr.set_sequencing(sequencing);
|
||||
|
||||
// if any other protocol were possible here we could update this and do_hole_punch
|
||||
@ -749,10 +783,13 @@ impl NetworkManager {
|
||||
// punch should come through and create a real 'last connection' for us if this succeeds
|
||||
network_result_try!(
|
||||
self.net()
|
||||
.send_data_to_dial_info(hole_punch_did.dial_info, Vec::new())
|
||||
.send_data_to_dial_info(hole_punch_did.dial_info.clone(), Vec::new())
|
||||
.await?
|
||||
);
|
||||
|
||||
// Add small delay to encourage packets to be delivered in order
|
||||
sleep(HOLE_PUNCH_DELAY_MS).await;
|
||||
|
||||
// Issue the signal
|
||||
let rpc = self.rpc_processor();
|
||||
network_result_try!(rpc
|
||||
@ -766,6 +803,13 @@ impl NetworkManager {
|
||||
.await
|
||||
.wrap_err("failed to send signal")?);
|
||||
|
||||
// Another hole punch after the signal for UDP redundancy
|
||||
network_result_try!(
|
||||
self.net()
|
||||
.send_data_to_dial_info(hole_punch_did.dial_info, Vec::new())
|
||||
.await?
|
||||
);
|
||||
|
||||
// Wait for the return receipt
|
||||
let inbound_nr = match eventual_value
|
||||
.timeout_at(stop_token)
|
||||
|
@ -1,15 +0,0 @@
|
||||
use super::*;
|
||||
|
||||
impl NetworkManager {
|
||||
// Determine if a local IP address has changed
|
||||
// this means we should restart the low level network and and recreate all of our dial info
|
||||
// Wait until we have received confirmation from N different peers
|
||||
pub fn report_local_network_socket_address(
|
||||
&self,
|
||||
_socket_address: SocketAddress,
|
||||
_flow: Flow,
|
||||
_reporting_peer: NodeRef,
|
||||
) {
|
||||
// XXX: Nothing here yet.
|
||||
}
|
||||
}
|
@ -1,5 +1,3 @@
|
||||
pub mod local_network_address_check;
|
||||
pub mod public_internet_address_check;
|
||||
pub mod rolling_transfers;
|
||||
|
||||
use super::*;
|
||||
@ -20,20 +18,6 @@ impl NetworkManager {
|
||||
});
|
||||
}
|
||||
|
||||
// Set public internet address check task
|
||||
{
|
||||
let this = self.clone();
|
||||
self.unlocked_inner
|
||||
.public_internet_address_check_task
|
||||
.set_routine(move |s, l, t| {
|
||||
Box::pin(this.clone().public_internet_address_check_task_routine(
|
||||
s,
|
||||
Timestamp::new(l),
|
||||
Timestamp::new(t),
|
||||
))
|
||||
});
|
||||
}
|
||||
|
||||
// Set address filter task
|
||||
{
|
||||
let this = self.clone();
|
||||
|
@ -1,287 +0,0 @@
|
||||
use super::*;
|
||||
|
||||
impl NetworkManager {
|
||||
// Clean up the public address check tables, removing entries that have timed out
|
||||
#[instrument(parent = None, level = "trace", skip_all, err)]
|
||||
pub(crate) async fn public_internet_address_check_task_routine(
|
||||
self,
|
||||
_stop_token: StopToken,
|
||||
_last_ts: Timestamp,
|
||||
cur_ts: Timestamp,
|
||||
) -> EyreResult<()> {
|
||||
// go through public_address_inconsistencies_table and time out things that have expired
|
||||
let mut inner = self.inner.lock();
|
||||
for pait_v in inner
|
||||
.public_internet_address_inconsistencies_table
|
||||
.values_mut()
|
||||
{
|
||||
pait_v.retain(|_addr, exp_ts| {
|
||||
// Keep it if it's in the future
|
||||
*exp_ts > cur_ts
|
||||
});
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
// Determine if a global IP address has changed
|
||||
// this means we should recreate our public dial info if it is not static and rediscover it
|
||||
// Wait until we have received confirmation from N different peers
|
||||
pub fn report_public_internet_socket_address(
|
||||
&self,
|
||||
socket_address: SocketAddress, // the socket address as seen by the remote peer
|
||||
flow: Flow, // the flow used
|
||||
reporting_peer: NodeRef, // the peer's noderef reporting the socket address
|
||||
) {
|
||||
log_network_result!("report_public_internet_socket_address:\nsocket_address: {:#?}\nflow: {:#?}\nreporting_peer: {:#?}", socket_address, flow, reporting_peer);
|
||||
|
||||
// Ignore these reports if we are currently detecting public dial info
|
||||
let net = self.net();
|
||||
if net.needs_public_dial_info_check() {
|
||||
return;
|
||||
}
|
||||
|
||||
// Ignore flows that do not start from our listening port (unbound connections etc),
|
||||
// because a router is going to map these differently
|
||||
let Some(pla) =
|
||||
net.get_preferred_local_address_by_key(flow.protocol_type(), flow.address_type())
|
||||
else {
|
||||
return;
|
||||
};
|
||||
let Some(local) = flow.local() else {
|
||||
return;
|
||||
};
|
||||
if local.port() != pla.port() {
|
||||
log_network_result!(debug "ignoring public internet address report because local port did not match listener: {} != {}", local.port(), pla.port());
|
||||
return;
|
||||
}
|
||||
|
||||
// Get our current published peer info
|
||||
let routing_table = self.routing_table();
|
||||
let Some(published_peer_info) =
|
||||
routing_table.get_published_peer_info(RoutingDomain::PublicInternet)
|
||||
else {
|
||||
return;
|
||||
};
|
||||
|
||||
// If we are a webapp we should skip this completely
|
||||
// because we will never get inbound dialinfo directly on our public ip address
|
||||
// If we have an invalid network class, this is not necessary yet
|
||||
let public_internet_network_class = published_peer_info
|
||||
.signed_node_info()
|
||||
.node_info()
|
||||
.network_class();
|
||||
if matches!(public_internet_network_class, NetworkClass::WebApp) {
|
||||
return;
|
||||
}
|
||||
|
||||
let (detect_address_changes, ip6_prefix_size) = self.with_config(|c| {
|
||||
(
|
||||
c.network.detect_address_changes,
|
||||
c.network.max_connections_per_ip6_prefix_size as usize,
|
||||
)
|
||||
});
|
||||
|
||||
// Get the ip(block) this report is coming from
|
||||
let reporting_ipblock = ip_to_ipblock(ip6_prefix_size, flow.remote_address().ip_addr());
|
||||
|
||||
// Reject public address reports from nodes that we know are behind symmetric nat or
|
||||
// nodes that must be using a relay for everything
|
||||
let Some(node_info) = reporting_peer.node_info(RoutingDomain::PublicInternet) else {
|
||||
return;
|
||||
};
|
||||
if node_info.network_class() != NetworkClass::InboundCapable {
|
||||
return;
|
||||
}
|
||||
|
||||
// If the socket address reported is the same as the reporter, then this is coming through a relay
|
||||
// or it should be ignored due to local proximity (nodes on the same network block should not be trusted as
|
||||
// public ip address reporters, only disinterested parties)
|
||||
if reporting_ipblock == ip_to_ipblock(ip6_prefix_size, socket_address.ip_addr()) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Check if the public address report is coming from a node/block that gives an 'inconsistent' location
|
||||
// meaning that the node may be not useful for public address detection
|
||||
// This is done on a per address/protocol basis
|
||||
|
||||
let mut inner = self.inner.lock();
|
||||
let inner = &mut *inner;
|
||||
|
||||
let addr_proto_type_key =
|
||||
PublicAddressCheckCacheKey(flow.protocol_type(), flow.address_type());
|
||||
if inner
|
||||
.public_internet_address_inconsistencies_table
|
||||
.get(&addr_proto_type_key)
|
||||
.map(|pait| pait.contains_key(&reporting_ipblock))
|
||||
.unwrap_or(false)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
// Insert this new public address into the lru cache for the address check
|
||||
// if we've seen this address before, it brings it to the front
|
||||
let pacc = inner
|
||||
.public_internet_address_check_cache
|
||||
.entry(addr_proto_type_key)
|
||||
.or_insert_with(|| LruCache::new(PUBLIC_ADDRESS_CHECK_CACHE_SIZE));
|
||||
pacc.insert(reporting_ipblock, socket_address);
|
||||
|
||||
// Determine if our external address has likely changed
|
||||
let mut bad_public_internet_address_detection_punishment: Option<
|
||||
Box<dyn FnOnce() + Send + 'static>,
|
||||
> = None;
|
||||
|
||||
let needs_public_internet_address_detection = if matches!(
|
||||
public_internet_network_class,
|
||||
NetworkClass::InboundCapable
|
||||
) {
|
||||
// Get the dial info filter for this connection so we can check if we have any public dialinfo that may have changed
|
||||
let dial_info_filter = flow.make_dial_info_filter();
|
||||
|
||||
// Get current external ip/port from registered global dialinfo
|
||||
let current_addresses: BTreeSet<SocketAddress> = published_peer_info
|
||||
.signed_node_info()
|
||||
.node_info()
|
||||
.filtered_dial_info_details(DialInfoDetail::NO_SORT, |did| {
|
||||
did.matches_filter(&dial_info_filter)
|
||||
})
|
||||
.iter()
|
||||
.map(|did| {
|
||||
// Strip port from direct and mapped addresses
|
||||
// as the incoming dialinfo may not match the outbound
|
||||
// connections' NAT mapping. In this case we only check for IP address changes.
|
||||
if did.class == DialInfoClass::Direct || did.class == DialInfoClass::Mapped {
|
||||
did.dial_info.socket_address().with_port(0)
|
||||
} else {
|
||||
did.dial_info.socket_address()
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
// If we are inbound capable, but start to see inconsistent socket addresses from multiple reporting peers
|
||||
// then we zap the network class and re-detect it
|
||||
|
||||
// Keep list of the origin ip blocks of inconsistent public address reports
|
||||
let mut inconsistencies = Vec::new();
|
||||
|
||||
// Iteration goes from most recent to least recent node/address pair
|
||||
for (reporting_ip_block, a) in pacc {
|
||||
// If this address is not one of our current addresses (inconsistent)
|
||||
// and we haven't already denylisted the reporting source,
|
||||
// Also check address with port zero in the event we are only checking changes to ip addresses
|
||||
if !current_addresses.contains(a)
|
||||
&& !current_addresses.contains(&a.with_port(0))
|
||||
&& !inner
|
||||
.public_internet_address_inconsistencies_table
|
||||
.get(&addr_proto_type_key)
|
||||
.map(|pait| pait.contains_key(reporting_ip_block))
|
||||
.unwrap_or(false)
|
||||
{
|
||||
// Record the origin of the inconsistency
|
||||
log_network_result!(debug "inconsistency added from {:?}: reported {:?} with current_addresses = {:?}", reporting_ip_block, a, current_addresses);
|
||||
|
||||
inconsistencies.push(*reporting_ip_block);
|
||||
}
|
||||
}
|
||||
|
||||
// If we have enough inconsistencies to consider changing our public dial info,
|
||||
// add them to our denylist (throttling) and go ahead and check for new
|
||||
// public dialinfo
|
||||
let inconsistent =
|
||||
if inconsistencies.len() >= PUBLIC_ADDRESS_CHANGE_INCONSISTENCY_DETECTION_COUNT {
|
||||
let exp_ts = Timestamp::now() + PUBLIC_ADDRESS_INCONSISTENCY_TIMEOUT_US;
|
||||
let pait = inner
|
||||
.public_internet_address_inconsistencies_table
|
||||
.entry(addr_proto_type_key)
|
||||
.or_default();
|
||||
for i in &inconsistencies {
|
||||
pait.insert(*i, exp_ts);
|
||||
}
|
||||
|
||||
// Run this routine if the inconsistent nodes turn out to be lying
|
||||
let this = self.clone();
|
||||
bad_public_internet_address_detection_punishment = Some(Box::new(move || {
|
||||
// xxx does this even work??
|
||||
|
||||
let mut inner = this.inner.lock();
|
||||
let pait = inner
|
||||
.public_internet_address_inconsistencies_table
|
||||
.entry(addr_proto_type_key)
|
||||
.or_default();
|
||||
let exp_ts =
|
||||
Timestamp::now() + PUBLIC_ADDRESS_INCONSISTENCY_PUNISHMENT_TIMEOUT_US;
|
||||
for i in inconsistencies {
|
||||
pait.insert(i, exp_ts);
|
||||
}
|
||||
}));
|
||||
|
||||
true
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
// // debug code
|
||||
// if inconsistent {
|
||||
// log_net!("report_public_internet_socket_address: {:#?}\ncurrent_addresses: {:#?}\ninconsistencies: {}", inner
|
||||
// .public_address_check_cache, current_addresses, inconsistencies);
|
||||
// }
|
||||
|
||||
inconsistent
|
||||
} else if matches!(public_internet_network_class, NetworkClass::OutboundOnly) {
|
||||
// If we are currently outbound only, we don't have any public dial info
|
||||
// but if we are starting to see consistent socket address from multiple reporting peers
|
||||
// then we may be become inbound capable, so zap the network class so we can re-detect it and any public dial info
|
||||
|
||||
let mut consistencies = 0;
|
||||
let mut consistent = false;
|
||||
let mut current_address = Option::<SocketAddress>::None;
|
||||
|
||||
// Iteration goes from most recent to least recent node/address pair
|
||||
for (_, a) in pacc {
|
||||
if let Some(current_address) = current_address {
|
||||
if current_address == *a {
|
||||
consistencies += 1;
|
||||
if consistencies >= PUBLIC_ADDRESS_CHANGE_CONSISTENCY_DETECTION_COUNT {
|
||||
consistent = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
current_address = Some(*a);
|
||||
}
|
||||
}
|
||||
consistent
|
||||
} else {
|
||||
// If we are a webapp we never do this.
|
||||
// If we have invalid network class, then public address detection is already going to happen via the network_class_discovery task
|
||||
|
||||
// we should have checked for this condition earlier at the top of this function
|
||||
unreachable!();
|
||||
};
|
||||
|
||||
if needs_public_internet_address_detection {
|
||||
if detect_address_changes {
|
||||
// Reset the address check cache now so we can start detecting fresh
|
||||
info!("PublicInternet address has changed, detecting public dial info");
|
||||
log_net!(debug "report_public_internet_socket_address:\nsocket_address: {:#?}\nflow: {:#?}\nreporting_peer: {:#?}", socket_address, flow, reporting_peer);
|
||||
log_net!(debug
|
||||
"public_internet_address_check_cache: {:#?}",
|
||||
inner.public_internet_address_check_cache
|
||||
);
|
||||
|
||||
inner.public_internet_address_check_cache.clear();
|
||||
|
||||
// Re-detect the public dialinfo
|
||||
net.set_needs_public_dial_info_check(
|
||||
bad_public_internet_address_detection_punishment,
|
||||
);
|
||||
} else {
|
||||
warn!("PublicInternet address may have changed. Restarting the server may be required.");
|
||||
warn!("report_public_internet_socket_address:\nsocket_address: {:#?}\nflow: {:#?}\nreporting_peer: {:#?}", socket_address, flow, reporting_peer);
|
||||
warn!(
|
||||
"public_internet_address_check_cache: {:#?}",
|
||||
inner.public_internet_address_check_cache
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -259,7 +259,7 @@ impl DialInfo {
|
||||
Self::WSS(di) => di.socket_address.ip_addr(),
|
||||
}
|
||||
}
|
||||
#[cfg_attr(target_arch = "wasm32", expect(dead_code))]
|
||||
#[expect(dead_code)]
|
||||
pub fn port(&self) -> u16 {
|
||||
match self {
|
||||
Self::UDP(di) => di.socket_address.port(),
|
||||
|
@ -11,12 +11,22 @@ use super::*;
|
||||
/// established connection is always from a real address to another real address.
|
||||
///
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
||||
#[derive(Copy, Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
||||
pub struct Flow {
|
||||
remote: PeerAddress,
|
||||
local: Option<SocketAddress>,
|
||||
}
|
||||
|
||||
impl fmt::Display for Flow {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
if let Some(local) = &self.local {
|
||||
write!(f, "{} -> {}", local, self.remote)
|
||||
} else {
|
||||
write!(f, "{}", self.remote)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Flow {
|
||||
pub fn new(remote: PeerAddress, local: SocketAddress) -> Self {
|
||||
assert!(!remote.protocol_type().is_ordered() || !local.address().is_unspecified());
|
||||
@ -47,11 +57,6 @@ impl Flow {
|
||||
pub fn address_type(&self) -> AddressType {
|
||||
self.remote.address_type()
|
||||
}
|
||||
pub fn make_dial_info_filter(&self) -> DialInfoFilter {
|
||||
DialInfoFilter::all()
|
||||
.with_protocol_type(self.protocol_type())
|
||||
.with_address_type(self.address_type())
|
||||
}
|
||||
}
|
||||
|
||||
impl MatchesDialInfoFilter for Flow {
|
||||
@ -75,4 +80,19 @@ pub struct UniqueFlow {
|
||||
pub connection_id: Option<NetworkConnectionId>,
|
||||
}
|
||||
|
||||
impl fmt::Display for UniqueFlow {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"{} ({})",
|
||||
self.flow,
|
||||
if let Some(connection_id) = &self.connection_id {
|
||||
format!("id={}", connection_id)
|
||||
} else {
|
||||
"---".to_string()
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub type NetworkConnectionId = AlignedU64;
|
||||
|
@ -404,15 +404,12 @@ impl Network {
|
||||
.edit_public_internet_routing_domain();
|
||||
|
||||
// set up the routing table's network config
|
||||
// if we have static public dialinfo, upgrade our network class
|
||||
|
||||
editor_public_internet.setup_network(
|
||||
protocol_config.outbound,
|
||||
protocol_config.inbound,
|
||||
protocol_config.family_global,
|
||||
protocol_config.public_internet_capabilities.clone(),
|
||||
);
|
||||
editor_public_internet.set_network_class(Some(NetworkClass::WebApp));
|
||||
|
||||
// commit routing domain edits
|
||||
if editor_public_internet.commit(true).await {
|
||||
|
@ -27,7 +27,7 @@ const NEVER_SEEN_PING_COUNT: u32 = 3;
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
pub(crate) enum BucketEntryDeadReason {
|
||||
FailedToSend,
|
||||
CanNotSend,
|
||||
TooManyLostAnswers,
|
||||
NoPingResponse,
|
||||
}
|
||||
@ -87,8 +87,11 @@ impl From<BucketEntryStateReason> for BucketEntryState {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Ord, Hash)]
|
||||
pub(crate) struct LastFlowKey(ProtocolType, AddressType);
|
||||
#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
||||
pub(crate) struct LastFlowKey(pub ProtocolType, pub AddressType);
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
||||
pub(crate) struct LastSenderInfoKey(pub RoutingDomain, pub ProtocolType, pub AddressType);
|
||||
|
||||
/// Bucket entry information specific to the LocalNetwork RoutingDomain
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
@ -101,6 +104,24 @@ pub(crate) struct BucketEntryPublicInternet {
|
||||
node_status: Option<NodeStatus>,
|
||||
}
|
||||
|
||||
impl fmt::Display for BucketEntryPublicInternet {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
if let Some(sni) = &self.signed_node_info {
|
||||
writeln!(f, "signed_node_info:")?;
|
||||
write!(f, " {}", indent_string(sni))?;
|
||||
} else {
|
||||
writeln!(f, "signed_node_info: None")?;
|
||||
}
|
||||
writeln!(
|
||||
f,
|
||||
"last_seen_our_node_info_ts: {}",
|
||||
self.last_seen_our_node_info_ts
|
||||
)?;
|
||||
writeln!(f, "node_status: {:?}", self.node_status)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Bucket entry information specific to the LocalNetwork RoutingDomain
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub(crate) struct BucketEntryLocalNetwork {
|
||||
@ -112,6 +133,24 @@ pub(crate) struct BucketEntryLocalNetwork {
|
||||
node_status: Option<NodeStatus>,
|
||||
}
|
||||
|
||||
impl fmt::Display for BucketEntryLocalNetwork {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
if let Some(sni) = &self.signed_node_info {
|
||||
writeln!(f, "signed_node_info:")?;
|
||||
write!(f, " {}", indent_string(sni))?;
|
||||
} else {
|
||||
writeln!(f, "signed_node_info: None")?;
|
||||
}
|
||||
writeln!(
|
||||
f,
|
||||
"last_seen_our_node_info_ts: {}",
|
||||
self.last_seen_our_node_info_ts
|
||||
)?;
|
||||
writeln!(f, "node_status: {:?}", self.node_status)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// The data associated with each bucket entry
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub(crate) struct BucketEntryInner {
|
||||
@ -130,6 +169,9 @@ pub(crate) struct BucketEntryInner {
|
||||
/// The last flows used to contact this node, per protocol type
|
||||
#[serde(skip)]
|
||||
last_flows: BTreeMap<LastFlowKey, (Flow, Timestamp)>,
|
||||
/// Last seen senderinfo per protocol/address type
|
||||
#[serde(skip)]
|
||||
last_sender_info: HashMap<LastSenderInfoKey, SenderInfo>,
|
||||
/// The node info for this entry on the publicinternet routing domain
|
||||
public_internet: BucketEntryPublicInternet,
|
||||
/// The node info for this entry on the localnetwork routing domain
|
||||
@ -142,6 +184,12 @@ pub(crate) struct BucketEntryInner {
|
||||
/// The accounting for the transfer statistics
|
||||
#[serde(skip)]
|
||||
transfer_stats_accounting: TransferStatsAccounting,
|
||||
/// The account for the state and reason statistics
|
||||
#[serde(skip)]
|
||||
state_stats_accounting: Mutex<StateStatsAccounting>,
|
||||
/// RPC answer stats accounting
|
||||
#[serde(skip)]
|
||||
answer_stats_accounting: AnswerStatsAccounting,
|
||||
/// If the entry is being punished and should be considered dead
|
||||
#[serde(skip)]
|
||||
punishment: Option<PunishmentReason>,
|
||||
@ -155,6 +203,52 @@ pub(crate) struct BucketEntryInner {
|
||||
node_ref_tracks: HashMap<usize, backtrace::Backtrace>,
|
||||
}
|
||||
|
||||
impl fmt::Display for BucketEntryInner {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
writeln!(f, "validated_node_ids: {}", self.validated_node_ids)?;
|
||||
writeln!(f, "unsupported_node_ids: {}", self.unsupported_node_ids)?;
|
||||
writeln!(f, "envelope_support: {:?}", self.envelope_support)?;
|
||||
writeln!(
|
||||
f,
|
||||
"updated_since_last_network_change: {:?}",
|
||||
self.updated_since_last_network_change
|
||||
)?;
|
||||
writeln!(f, "last_flows:")?;
|
||||
for lf in &self.last_flows {
|
||||
writeln!(
|
||||
f,
|
||||
" {:?}/{:?}: {} @ {}",
|
||||
lf.0 .0, lf.0 .1, lf.1 .0, lf.1 .1
|
||||
)?;
|
||||
}
|
||||
writeln!(f, "last_sender_info:")?;
|
||||
for lsi in &self.last_sender_info {
|
||||
writeln!(
|
||||
f,
|
||||
" {:?}/{:?}/{:?}: {}",
|
||||
lsi.0 .0, lsi.0 .1, lsi.0 .2, lsi.1.socket_address
|
||||
)?;
|
||||
}
|
||||
writeln!(f, "public_internet:")?;
|
||||
write!(f, "{}", indent_all_string(&self.public_internet))?;
|
||||
writeln!(f, "local_network:")?;
|
||||
write!(f, "{}", indent_all_string(&self.local_network))?;
|
||||
writeln!(f, "peer_stats:")?;
|
||||
write!(f, "{}", indent_all_string(&self.peer_stats))?;
|
||||
writeln!(
|
||||
f,
|
||||
"punishment: {}",
|
||||
if let Some(punishment) = self.punishment {
|
||||
format!("{:?}", punishment)
|
||||
} else {
|
||||
"None".to_owned()
|
||||
}
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl BucketEntryInner {
|
||||
#[cfg(feature = "tracking")]
|
||||
pub fn track(&mut self) -> usize {
|
||||
@ -593,7 +687,7 @@ impl BucketEntryInner {
|
||||
}
|
||||
|
||||
pub fn state_reason(&self, cur_ts: Timestamp) -> BucketEntryStateReason {
|
||||
if let Some(punished_reason) = self.punishment {
|
||||
let reason = if let Some(punished_reason) = self.punishment {
|
||||
BucketEntryStateReason::Punished(punished_reason)
|
||||
} else if let Some(dead_reason) = self.check_dead(cur_ts) {
|
||||
BucketEntryStateReason::Dead(dead_reason)
|
||||
@ -601,7 +695,14 @@ impl BucketEntryInner {
|
||||
BucketEntryStateReason::Unreliable(unreliable_reason)
|
||||
} else {
|
||||
BucketEntryStateReason::Reliable
|
||||
}
|
||||
};
|
||||
|
||||
// record this reason
|
||||
self.state_stats_accounting
|
||||
.lock()
|
||||
.record_state_reason(cur_ts, reason);
|
||||
|
||||
reason
|
||||
}
|
||||
|
||||
pub fn state(&self, cur_ts: Timestamp) -> BucketEntryState {
|
||||
@ -681,6 +782,18 @@ impl BucketEntryInner {
|
||||
self.peer_stats.latency = Some(self.latency_stats_accounting.record_latency(latency));
|
||||
}
|
||||
|
||||
// Called every UPDATE_STATE_STATS_SECS seconds
|
||||
pub(super) fn update_state_stats(&mut self) {
|
||||
if let Some(state_stats) = self.state_stats_accounting.lock().take_stats() {
|
||||
self.peer_stats.state = state_stats;
|
||||
}
|
||||
}
|
||||
|
||||
// called every ROLLING_ANSWERS_INTERVAL_SECS seconds
|
||||
pub(super) fn roll_answer_stats(&mut self, cur_ts: Timestamp) {
|
||||
self.peer_stats.rpc_stats.answer = self.answer_stats_accounting.roll_answers(cur_ts);
|
||||
}
|
||||
|
||||
///// state machine handling
|
||||
pub(super) fn check_unreliable(
|
||||
&self,
|
||||
@ -714,7 +827,7 @@ impl BucketEntryInner {
|
||||
pub(super) fn check_dead(&self, cur_ts: Timestamp) -> Option<BucketEntryDeadReason> {
|
||||
// If we have failed to send NEVER_REACHED_PING_COUNT times in a row, the node is dead
|
||||
if self.peer_stats.rpc_stats.failed_to_send >= NEVER_SEEN_PING_COUNT {
|
||||
return Some(BucketEntryDeadReason::FailedToSend);
|
||||
return Some(BucketEntryDeadReason::CanNotSend);
|
||||
}
|
||||
|
||||
match self.peer_stats.rpc_stats.last_seen_ts {
|
||||
@ -744,17 +857,28 @@ impl BucketEntryInner {
|
||||
None
|
||||
}
|
||||
|
||||
/// Return the last time we either saw a node, or asked it a question
|
||||
fn latest_contact_time(&self) -> Option<Timestamp> {
|
||||
self.peer_stats
|
||||
.rpc_stats
|
||||
.last_seen_ts
|
||||
.max(self.peer_stats.rpc_stats.last_question_ts)
|
||||
/// Return the last time we asked a node a question
|
||||
fn last_outbound_contact_time(&self) -> Option<Timestamp> {
|
||||
// This is outbound and inbound contact time which may be a reasonable optimization for nodes that have
|
||||
// a very low rate of 'lost answers', but for now we are reverting this to ensure outbound connectivity before
|
||||
// we claim a node is reliable
|
||||
//
|
||||
// self.peer_stats
|
||||
// .rpc_stats
|
||||
// .last_seen_ts
|
||||
// .max(self.peer_stats.rpc_stats.last_question_ts)
|
||||
|
||||
self.peer_stats.rpc_stats.last_question_ts
|
||||
}
|
||||
|
||||
/// Return the last time we asked a node a question
|
||||
// fn last_question_time(&self) -> Option<Timestamp> {
|
||||
// self.peer_stats.rpc_stats.last_question_ts
|
||||
// }
|
||||
|
||||
fn needs_constant_ping(&self, cur_ts: Timestamp, interval_us: TimestampDuration) -> bool {
|
||||
// If we have not either seen the node in the last 'interval' then we should ping it
|
||||
let latest_contact_time = self.latest_contact_time();
|
||||
let latest_contact_time = self.last_outbound_contact_time();
|
||||
|
||||
match latest_contact_time {
|
||||
None => true,
|
||||
@ -773,7 +897,7 @@ impl BucketEntryInner {
|
||||
match state {
|
||||
BucketEntryState::Reliable => {
|
||||
// If we are in a reliable state, we need a ping on an exponential scale
|
||||
let latest_contact_time = self.latest_contact_time();
|
||||
let latest_contact_time = self.last_outbound_contact_time();
|
||||
|
||||
match latest_contact_time {
|
||||
None => {
|
||||
@ -873,6 +997,7 @@ impl BucketEntryInner {
|
||||
|
||||
pub(super) fn question_sent(&mut self, ts: Timestamp, bytes: ByteCount, expects_answer: bool) {
|
||||
self.transfer_stats_accounting.add_up(bytes);
|
||||
self.answer_stats_accounting.record_question(ts);
|
||||
self.peer_stats.rpc_stats.messages_sent += 1;
|
||||
self.peer_stats.rpc_stats.failed_to_send = 0;
|
||||
if expects_answer {
|
||||
@ -892,13 +1017,16 @@ impl BucketEntryInner {
|
||||
}
|
||||
pub(super) fn answer_rcvd(&mut self, send_ts: Timestamp, recv_ts: Timestamp, bytes: ByteCount) {
|
||||
self.transfer_stats_accounting.add_down(bytes);
|
||||
self.answer_stats_accounting.record_answer(recv_ts);
|
||||
self.peer_stats.rpc_stats.messages_rcvd += 1;
|
||||
self.peer_stats.rpc_stats.questions_in_flight -= 1;
|
||||
self.record_latency(recv_ts.saturating_sub(send_ts));
|
||||
self.touch_last_seen(recv_ts);
|
||||
self.peer_stats.rpc_stats.recent_lost_answers = 0;
|
||||
}
|
||||
pub(super) fn question_lost(&mut self) {
|
||||
pub(super) fn lost_answer(&mut self) {
|
||||
let cur_ts = Timestamp::now();
|
||||
self.answer_stats_accounting.record_lost_answer(cur_ts);
|
||||
self.peer_stats.rpc_stats.first_consecutive_seen_ts = None;
|
||||
self.peer_stats.rpc_stats.questions_in_flight -= 1;
|
||||
self.peer_stats.rpc_stats.recent_lost_answers += 1;
|
||||
@ -910,6 +1038,19 @@ impl BucketEntryInner {
|
||||
self.peer_stats.rpc_stats.failed_to_send += 1;
|
||||
self.peer_stats.rpc_stats.first_consecutive_seen_ts = None;
|
||||
}
|
||||
pub(super) fn report_sender_info(
|
||||
&mut self,
|
||||
key: LastSenderInfoKey,
|
||||
sender_info: SenderInfo,
|
||||
) -> Option<SenderInfo> {
|
||||
let last_sender_info = self.last_sender_info.insert(key, sender_info);
|
||||
if last_sender_info != Some(sender_info) {
|
||||
// Return last senderinfo if this new one is different
|
||||
last_sender_info
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
@ -930,6 +1071,7 @@ impl BucketEntry {
|
||||
envelope_support: Vec::new(),
|
||||
updated_since_last_network_change: false,
|
||||
last_flows: BTreeMap::new(),
|
||||
last_sender_info: HashMap::new(),
|
||||
local_network: BucketEntryLocalNetwork {
|
||||
last_seen_our_node_info_ts: Timestamp::new(0u64),
|
||||
signed_node_info: None,
|
||||
@ -945,9 +1087,12 @@ impl BucketEntry {
|
||||
rpc_stats: RPCStats::default(),
|
||||
latency: None,
|
||||
transfer: TransferStatsDownUp::default(),
|
||||
state: StateStats::default(),
|
||||
},
|
||||
latency_stats_accounting: LatencyStatsAccounting::new(),
|
||||
transfer_stats_accounting: TransferStatsAccounting::new(),
|
||||
state_stats_accounting: Mutex::new(StateStatsAccounting::new()),
|
||||
answer_stats_accounting: AnswerStatsAccounting::new(),
|
||||
punishment: None,
|
||||
#[cfg(feature = "tracking")]
|
||||
next_track_id: 0,
|
||||
|
@ -66,22 +66,12 @@ impl RoutingTable {
|
||||
pub(crate) fn debug_info_nodeinfo(&self) -> String {
|
||||
let mut out = String::new();
|
||||
let inner = self.inner.read();
|
||||
out += "Routing Table Info:\n";
|
||||
|
||||
out += &format!(" Node Ids: {}\n", self.unlocked_inner.node_ids());
|
||||
out += &format!("Node Ids: {}\n", self.unlocked_inner.node_ids());
|
||||
out += &format!(
|
||||
" Self Latency Stats Accounting: {:#?}\n\n",
|
||||
inner.self_latency_stats_accounting
|
||||
"Self Transfer Stats:\n{}",
|
||||
indent_all_string(&inner.self_transfer_stats)
|
||||
);
|
||||
out += &format!(
|
||||
" Self Transfer Stats Accounting: {:#?}\n\n",
|
||||
inner.self_transfer_stats_accounting
|
||||
);
|
||||
out += &format!(
|
||||
" Self Transfer Stats: {:#?}\n\n",
|
||||
inner.self_transfer_stats
|
||||
);
|
||||
out += &format!(" Version: {}\n\n", veilid_version_string());
|
||||
out += &format!("Version: {}", veilid_version_string());
|
||||
|
||||
out
|
||||
}
|
||||
@ -93,11 +83,11 @@ impl RoutingTable {
|
||||
|
||||
out += "Local Network Dial Info Details:\n";
|
||||
for (n, ldi) in ldis.iter().enumerate() {
|
||||
out += &format!(" {:>2}: {:?}\n", n, ldi);
|
||||
out += &indent_all_string(&format!("{:>2}: {}\n", n, ldi));
|
||||
}
|
||||
out += "Public Internet Dial Info Details:\n";
|
||||
for (n, gdi) in gdis.iter().enumerate() {
|
||||
out += &format!(" {:>2}: {:?}\n", n, gdi);
|
||||
out += &indent_all_string(&format!("{:>2}: {}\n", n, gdi));
|
||||
}
|
||||
out
|
||||
}
|
||||
@ -109,17 +99,16 @@ impl RoutingTable {
|
||||
) -> String {
|
||||
let mut out = String::new();
|
||||
if published {
|
||||
out += &format!(
|
||||
"{:?} Published PeerInfo:\n {:#?}\n",
|
||||
routing_domain,
|
||||
self.get_published_peer_info(routing_domain)
|
||||
);
|
||||
let pistr = if let Some(pi) = self.get_published_peer_info(routing_domain) {
|
||||
format!("\n{}\n", indent_all_string(&pi))
|
||||
} else {
|
||||
out += &format!(
|
||||
"{:?} Current PeerInfo:\n {:#?}\n",
|
||||
routing_domain,
|
||||
self.get_current_peer_info(routing_domain)
|
||||
);
|
||||
" None".to_owned()
|
||||
};
|
||||
out += &format!("{:?} Published PeerInfo:{}", routing_domain, pistr);
|
||||
} else {
|
||||
let pi = self.get_current_peer_info(routing_domain);
|
||||
let pistr = format!("\n{}\n", indent_all_string(&pi));
|
||||
out += &format!("{:?} Current PeerInfo:{}", routing_domain, pistr);
|
||||
}
|
||||
out
|
||||
}
|
||||
@ -138,7 +127,7 @@ impl RoutingTable {
|
||||
//
|
||||
},
|
||||
BucketEntryStateReason::Dead(d) => match d {
|
||||
BucketEntryDeadReason::FailedToSend => "DFSEND",
|
||||
BucketEntryDeadReason::CanNotSend => "DFSEND",
|
||||
BucketEntryDeadReason::TooManyLostAnswers => "DALOST",
|
||||
BucketEntryDeadReason::NoPingResponse => "DNOPNG",
|
||||
},
|
||||
@ -153,11 +142,60 @@ impl RoutingTable {
|
||||
}
|
||||
}
|
||||
|
||||
fn format_entry(
|
||||
cur_ts: Timestamp,
|
||||
node: TypedKey,
|
||||
e: &BucketEntryInner,
|
||||
relay_tag: &str,
|
||||
) -> String {
|
||||
format!(
|
||||
" {} [{}][{}] {} [{}] lastq@{} seen@{}",
|
||||
// node id
|
||||
node,
|
||||
// state reason
|
||||
Self::format_state_reason(e.state_reason(cur_ts)),
|
||||
// Relay tag
|
||||
relay_tag,
|
||||
// average latency
|
||||
e.peer_stats()
|
||||
.latency
|
||||
.as_ref()
|
||||
.map(|l| l.to_string())
|
||||
.unwrap_or_else(|| "???".to_string()),
|
||||
// capabilities
|
||||
if let Some(ni) = e.node_info(RoutingDomain::PublicInternet) {
|
||||
ni.capabilities()
|
||||
.iter()
|
||||
.map(|x| x.to_string())
|
||||
.collect::<Vec<String>>()
|
||||
.join(",")
|
||||
} else {
|
||||
"???".to_owned()
|
||||
},
|
||||
// duration since last question
|
||||
e.peer_stats()
|
||||
.rpc_stats
|
||||
.last_question_ts
|
||||
.as_ref()
|
||||
.map(|l| cur_ts.saturating_sub(*l).to_string())
|
||||
.unwrap_or_else(|| "???".to_string()),
|
||||
// duration since last seen
|
||||
e.peer_stats()
|
||||
.rpc_stats
|
||||
.last_seen_ts
|
||||
.as_ref()
|
||||
.map(|l| cur_ts.saturating_sub(*l).to_string())
|
||||
.unwrap_or_else(|| "???".to_string()),
|
||||
)
|
||||
}
|
||||
|
||||
pub(crate) fn debug_info_entries(
|
||||
&self,
|
||||
min_state: BucketEntryState,
|
||||
capabilities: Vec<FourCC>,
|
||||
) -> String {
|
||||
let relay_node_filter = self.make_public_internet_relay_node_filter();
|
||||
|
||||
let inner = self.inner.read();
|
||||
let inner = &*inner;
|
||||
let cur_ts = Timestamp::now();
|
||||
@ -191,35 +229,26 @@ impl RoutingTable {
|
||||
if !filtered_entries.is_empty() {
|
||||
out += &format!("{} Bucket #{}:\n", ck, b);
|
||||
for e in filtered_entries {
|
||||
let state_reason = e.1.with(inner, |_rti, e| e.state_reason(cur_ts));
|
||||
out += &format!(
|
||||
" {} [{}] {} [{}]\n",
|
||||
e.0.encode(),
|
||||
Self::format_state_reason(state_reason),
|
||||
e.1.with(inner, |_rti, e| {
|
||||
e.peer_stats()
|
||||
.latency
|
||||
.as_ref()
|
||||
.map(|l| {
|
||||
format!(
|
||||
"{:.2}ms",
|
||||
timestamp_to_secs(l.average.as_u64()) * 1000.0
|
||||
)
|
||||
})
|
||||
.unwrap_or_else(|| "???.??ms".to_string())
|
||||
}),
|
||||
e.1.with(inner, |_rti, e| {
|
||||
if let Some(ni) = e.node_info(RoutingDomain::PublicInternet) {
|
||||
ni.capabilities()
|
||||
.iter()
|
||||
.map(|x| x.to_string())
|
||||
.collect::<Vec<String>>()
|
||||
.join(",")
|
||||
let node = *e.0;
|
||||
|
||||
let can_be_relay = e.1.with(inner, |_rti, e| relay_node_filter(e));
|
||||
let is_relay = self
|
||||
.relay_node(RoutingDomain::PublicInternet)
|
||||
.map(|r| r.same_bucket_entry(e.1))
|
||||
.unwrap_or(false);
|
||||
let relay_tag = if is_relay {
|
||||
"R"
|
||||
} else if can_be_relay {
|
||||
"r"
|
||||
} else {
|
||||
"???".to_owned()
|
||||
}
|
||||
})
|
||||
);
|
||||
"-"
|
||||
};
|
||||
|
||||
out += " ";
|
||||
out += &e.1.with(inner, |_rti, e| {
|
||||
Self::format_entry(cur_ts, TypedKey::new(*ck, node), e, relay_tag)
|
||||
});
|
||||
out += "\n";
|
||||
}
|
||||
}
|
||||
b += 1;
|
||||
@ -230,6 +259,71 @@ impl RoutingTable {
|
||||
out
|
||||
}
|
||||
|
||||
pub(crate) fn debug_info_entries_fastest(
|
||||
&self,
|
||||
min_state: BucketEntryState,
|
||||
capabilities: Vec<FourCC>,
|
||||
node_count: usize,
|
||||
) -> String {
|
||||
let cur_ts = Timestamp::now();
|
||||
let relay_node_filter = self.make_public_internet_relay_node_filter();
|
||||
let mut relay_count = 0usize;
|
||||
|
||||
let mut filters = VecDeque::new();
|
||||
filters.push_front(
|
||||
Box::new(|rti: &RoutingTableInner, e: Option<Arc<BucketEntry>>| {
|
||||
let Some(e) = e else {
|
||||
return false;
|
||||
};
|
||||
let cap_match = e.with(rti, |_rti, e| {
|
||||
e.has_all_capabilities(RoutingDomain::PublicInternet, &capabilities)
|
||||
});
|
||||
let state = e.with(rti, |_rti, e| e.state(cur_ts));
|
||||
state >= min_state && cap_match
|
||||
}) as RoutingTableEntryFilter,
|
||||
);
|
||||
let nodes = self.find_preferred_fastest_nodes(
|
||||
node_count,
|
||||
filters,
|
||||
|_rti, entry: Option<Arc<BucketEntry>>| {
|
||||
NodeRef::new(self.clone(), entry.unwrap().clone())
|
||||
},
|
||||
);
|
||||
let mut out = String::new();
|
||||
let entry_count = nodes.len();
|
||||
for node in nodes {
|
||||
let can_be_relay = node.operate(|_rti, e| relay_node_filter(e));
|
||||
let is_relay = self
|
||||
.relay_node(RoutingDomain::PublicInternet)
|
||||
.map(|r| r.same_entry(&node))
|
||||
.unwrap_or(false);
|
||||
let relay_tag = if is_relay {
|
||||
"R"
|
||||
} else if can_be_relay {
|
||||
"r"
|
||||
} else {
|
||||
"-"
|
||||
};
|
||||
if can_be_relay {
|
||||
relay_count += 1;
|
||||
}
|
||||
|
||||
out += " ";
|
||||
out += &node
|
||||
.operate(|_rti, e| Self::format_entry(cur_ts, node.best_node_id(), e, relay_tag));
|
||||
out += "\n";
|
||||
}
|
||||
|
||||
out += &format!(
|
||||
"Entries: {} Relays: {} Relay %: {:.2}\n",
|
||||
entry_count,
|
||||
relay_count,
|
||||
(relay_count as f64) * 100.0 / (entry_count as f64)
|
||||
);
|
||||
|
||||
out
|
||||
}
|
||||
|
||||
pub(crate) fn debug_info_entry(&self, node_ref: NodeRef) -> String {
|
||||
let cur_ts = Timestamp::now();
|
||||
|
||||
@ -237,9 +331,9 @@ impl RoutingTable {
|
||||
out += &node_ref.operate(|_rti, e| {
|
||||
let state_reason = e.state_reason(cur_ts);
|
||||
format!(
|
||||
"state: {}\n{:#?}\n",
|
||||
"{}\nstate: {}\n",
|
||||
e,
|
||||
Self::format_state_reason(state_reason),
|
||||
e
|
||||
)
|
||||
});
|
||||
out
|
||||
|
@ -42,8 +42,11 @@ pub const RELAY_MANAGEMENT_INTERVAL_SECS: u32 = 1;
|
||||
pub const PRIVATE_ROUTE_MANAGEMENT_INTERVAL_SECS: u32 = 1;
|
||||
|
||||
// Connectionless protocols like UDP are dependent on a NAT translation timeout
|
||||
// We should ping them with some frequency and 30 seconds is typical timeout
|
||||
pub const CONNECTIONLESS_TIMEOUT_SECS: u32 = 29;
|
||||
// We ping relays to maintain our UDP NAT state with a RELAY_KEEPALIVE_PING_INTERVAL_SECS=10 frequency
|
||||
// since 30 seconds is a typical UDP NAT state timeout.
|
||||
// Non-relay flows are assumed to be alive for half the typical timeout and we regenerate the hole punch
|
||||
// if it the flow hasn't had any activity in this amount of time.
|
||||
pub const CONNECTIONLESS_TIMEOUT_SECS: u32 = 15;
|
||||
|
||||
// Table store keys
|
||||
const ALL_ENTRY_BYTES: &[u8] = b"all_entry_bytes";
|
||||
@ -100,6 +103,10 @@ pub(crate) struct RoutingTableUnlockedInner {
|
||||
kick_queue: Mutex<BTreeSet<BucketIndex>>,
|
||||
/// Background process for computing statistics
|
||||
rolling_transfers_task: TickTask<EyreReport>,
|
||||
/// Background process for computing statistics
|
||||
update_state_stats_task: TickTask<EyreReport>,
|
||||
/// Background process for computing statistics
|
||||
rolling_answers_task: TickTask<EyreReport>,
|
||||
/// Background process to purge dead routing table entries when necessary
|
||||
kick_buckets_task: TickTask<EyreReport>,
|
||||
/// Background process to get our initial routing table
|
||||
@ -108,8 +115,14 @@ pub(crate) struct RoutingTableUnlockedInner {
|
||||
peer_minimum_refresh_task: TickTask<EyreReport>,
|
||||
/// Background process to ensure we have enough nodes close to our own in our routing table
|
||||
closest_peers_refresh_task: TickTask<EyreReport>,
|
||||
/// Background process to check nodes to see if they are still alive and for reliability
|
||||
ping_validator_task: TickTask<EyreReport>,
|
||||
/// Background process to check PublicInternet nodes to see if they are still alive and for reliability
|
||||
ping_validator_public_internet_task: TickTask<EyreReport>,
|
||||
/// Background process to check LocalNetwork nodes to see if they are still alive and for reliability
|
||||
ping_validator_local_network_task: TickTask<EyreReport>,
|
||||
/// Background process to check PublicInternet relay nodes to see if they are still alive and for reliability
|
||||
ping_validator_public_internet_relay_task: TickTask<EyreReport>,
|
||||
/// Background process to check Active Watch nodes to see if they are still alive and for reliability
|
||||
ping_validator_active_watch_task: TickTask<EyreReport>,
|
||||
/// Background process to keep relays up
|
||||
relay_management_task: TickTask<EyreReport>,
|
||||
/// Background process to keep private routes up
|
||||
@ -216,6 +229,14 @@ impl RoutingTable {
|
||||
"rolling_transfers_task",
|
||||
ROLLING_TRANSFERS_INTERVAL_SECS,
|
||||
),
|
||||
update_state_stats_task: TickTask::new(
|
||||
"update_state_stats_task",
|
||||
UPDATE_STATE_STATS_INTERVAL_SECS,
|
||||
),
|
||||
rolling_answers_task: TickTask::new(
|
||||
"rolling_answers_task",
|
||||
ROLLING_ANSWER_INTERVAL_SECS,
|
||||
),
|
||||
kick_buckets_task: TickTask::new("kick_buckets_task", 1),
|
||||
bootstrap_task: TickTask::new("bootstrap_task", 1),
|
||||
peer_minimum_refresh_task: TickTask::new("peer_minimum_refresh_task", 1),
|
||||
@ -223,7 +244,19 @@ impl RoutingTable {
|
||||
"closest_peers_refresh_task",
|
||||
c.network.dht.min_peer_refresh_time_ms,
|
||||
),
|
||||
ping_validator_task: TickTask::new("ping_validator_task", 1),
|
||||
ping_validator_public_internet_task: TickTask::new(
|
||||
"ping_validator_public_internet_task",
|
||||
1,
|
||||
),
|
||||
ping_validator_local_network_task: TickTask::new(
|
||||
"ping_validator_local_network_task",
|
||||
1,
|
||||
),
|
||||
ping_validator_public_internet_relay_task: TickTask::new(
|
||||
"ping_validator_public_internet_relay_task",
|
||||
1,
|
||||
),
|
||||
ping_validator_active_watch_task: TickTask::new("ping_validator_active_watch_task", 1),
|
||||
relay_management_task: TickTask::new(
|
||||
"relay_management_task",
|
||||
RELAY_MANAGEMENT_INTERVAL_SECS,
|
||||
@ -573,12 +606,6 @@ impl RoutingTable {
|
||||
self.inner.read().get_current_peer_info(routing_domain)
|
||||
}
|
||||
|
||||
/// If we have a valid network class in this routing domain, then our 'NodeInfo' is valid
|
||||
/// If this is true, we can get our final peer info, otherwise we only have a 'best effort' peer info
|
||||
pub fn has_valid_network_class(&self, routing_domain: RoutingDomain) -> bool {
|
||||
self.inner.read().has_valid_network_class(routing_domain)
|
||||
}
|
||||
|
||||
/// Return the domain's currently registered network class
|
||||
#[cfg_attr(target_arch = "wasm32", expect(dead_code))]
|
||||
pub fn get_network_class(&self, routing_domain: RoutingDomain) -> Option<NetworkClass> {
|
||||
|
@ -91,12 +91,16 @@ pub trait NodeRefCommonTrait: NodeRefAccessorsTrait + NodeRefOperateTrait {
|
||||
.unwrap_or(0u64.into())
|
||||
})
|
||||
}
|
||||
fn has_seen_our_node_info_ts(
|
||||
&self,
|
||||
routing_domain: RoutingDomain,
|
||||
our_node_info_ts: Timestamp,
|
||||
) -> bool {
|
||||
self.operate(|_rti, e| e.has_seen_our_node_info_ts(routing_domain, our_node_info_ts))
|
||||
fn has_seen_our_node_info_ts(&self, routing_domain: RoutingDomain) -> bool {
|
||||
self.operate(|rti, e| {
|
||||
let Some(our_node_info_ts) = rti
|
||||
.get_published_peer_info(routing_domain)
|
||||
.map(|pi| pi.signed_node_info().timestamp())
|
||||
else {
|
||||
return false;
|
||||
};
|
||||
e.has_seen_our_node_info_ts(routing_domain, our_node_info_ts)
|
||||
})
|
||||
}
|
||||
fn set_seen_our_node_info_ts(&self, routing_domain: RoutingDomain, seen_ts: Timestamp) {
|
||||
self.operate_mut(|_rti, e| e.set_seen_our_node_info_ts(routing_domain, seen_ts));
|
||||
@ -183,7 +187,7 @@ pub trait NodeRefCommonTrait: NodeRefAccessorsTrait + NodeRefOperateTrait {
|
||||
out
|
||||
}
|
||||
|
||||
/// Get the most recent 'last connection' to this node
|
||||
/// Get the most recent 'last connection' to this node matching the node ref filter
|
||||
/// Filtered first and then sorted by ordering preference and then by most recent
|
||||
fn last_flow(&self) -> Option<Flow> {
|
||||
self.operate(|rti, e| {
|
||||
@ -203,7 +207,8 @@ pub trait NodeRefCommonTrait: NodeRefAccessorsTrait + NodeRefOperateTrait {
|
||||
})
|
||||
}
|
||||
|
||||
/// Get all the 'last connection' flows for this node
|
||||
/// Get all the 'last connection' flows for this node matching the node ref filter
|
||||
/// Filtered first and then sorted by ordering preference and then by most recent
|
||||
#[expect(dead_code)]
|
||||
fn last_flows(&self) -> Vec<Flow> {
|
||||
self.operate(|rti, e| {
|
||||
@ -287,9 +292,9 @@ pub trait NodeRefCommonTrait: NodeRefAccessorsTrait + NodeRefOperateTrait {
|
||||
e.answer_rcvd(send_ts, recv_ts, bytes);
|
||||
})
|
||||
}
|
||||
fn stats_question_lost(&self) {
|
||||
fn stats_lost_answer(&self) {
|
||||
self.operate_mut(|_rti, e| {
|
||||
e.question_lost();
|
||||
e.lost_answer();
|
||||
})
|
||||
}
|
||||
fn stats_failed_to_send(&self, ts: Timestamp, expects_answer: bool) {
|
||||
@ -297,4 +302,18 @@ pub trait NodeRefCommonTrait: NodeRefAccessorsTrait + NodeRefOperateTrait {
|
||||
e.failed_to_send(ts, expects_answer);
|
||||
})
|
||||
}
|
||||
fn report_sender_info(
|
||||
&self,
|
||||
routing_domain: RoutingDomain,
|
||||
protocol_type: ProtocolType,
|
||||
address_type: AddressType,
|
||||
sender_info: SenderInfo,
|
||||
) -> Option<SenderInfo> {
|
||||
self.operate_mut(|_rti, e| {
|
||||
e.report_sender_info(
|
||||
LastSenderInfoKey(routing_domain, protocol_type, address_type),
|
||||
sender_info,
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -1122,8 +1122,7 @@ impl RouteSpecStore {
|
||||
// We can optimize the peer info in this safety route if it has been successfully
|
||||
// communicated over either via an outbound test, or used as a private route inbound
|
||||
// and we are replying over the same route as our safety route outbound
|
||||
let optimize = safety_rssd.get_stats().last_tested_ts.is_some()
|
||||
|| safety_rssd.get_stats().last_received_ts.is_some();
|
||||
let optimize = safety_rssd.get_stats().last_known_valid_ts.is_some();
|
||||
|
||||
// Get the first hop noderef of the safety route
|
||||
let first_hop = safety_rssd.hop_node_ref(0).unwrap();
|
||||
@ -1492,10 +1491,7 @@ impl RouteSpecStore {
|
||||
|
||||
// See if we can optimize this compilation yet
|
||||
// We don't want to include full nodeinfo if we don't have to
|
||||
let optimized = optimized.unwrap_or(
|
||||
rssd.get_stats().last_tested_ts.is_some()
|
||||
|| rssd.get_stats().last_received_ts.is_some(),
|
||||
);
|
||||
let optimized = optimized.unwrap_or(rssd.get_stats().last_known_valid_ts.is_some());
|
||||
|
||||
let rsd = rssd
|
||||
.get_route_by_key(key)
|
||||
@ -1519,10 +1515,7 @@ impl RouteSpecStore {
|
||||
|
||||
// See if we can optimize this compilation yet
|
||||
// We don't want to include full nodeinfo if we don't have to
|
||||
let optimized = optimized.unwrap_or(
|
||||
rssd.get_stats().last_tested_ts.is_some()
|
||||
|| rssd.get_stats().last_received_ts.is_some(),
|
||||
);
|
||||
let optimized = optimized.unwrap_or(rssd.get_stats().last_known_valid_ts.is_some());
|
||||
|
||||
let mut out = Vec::new();
|
||||
for (key, rsd) in rssd.iter_route_set() {
|
||||
@ -1726,15 +1719,15 @@ impl RouteSpecStore {
|
||||
|
||||
/// Clear caches when local our local node info changes
|
||||
#[instrument(level = "trace", target = "route", skip(self))]
|
||||
pub fn reset(&self) {
|
||||
log_rtab!(debug "flushing route spec store");
|
||||
pub fn reset_cache(&self) {
|
||||
log_rtab!(debug "resetting route cache");
|
||||
|
||||
let inner = &mut *self.inner.lock();
|
||||
|
||||
// Clean up local allocated routes
|
||||
// Clean up local allocated routes (does not delete allocated routes, set republication flag)
|
||||
inner.content.reset_details();
|
||||
|
||||
// Reset private route cache
|
||||
// Reset private route cache (does not delete imported routes)
|
||||
inner.cache.reset_remote_private_routes();
|
||||
}
|
||||
|
||||
@ -1761,6 +1754,17 @@ impl RouteSpecStore {
|
||||
inner.cache.roll_transfers(last_ts, cur_ts);
|
||||
}
|
||||
|
||||
/// Process answer statistics
|
||||
pub fn roll_answers(&self, cur_ts: Timestamp) {
|
||||
let inner = &mut *self.inner.lock();
|
||||
|
||||
// Roll transfers for locally allocated routes
|
||||
inner.content.roll_answers(cur_ts);
|
||||
|
||||
// Roll transfers for remote private routes
|
||||
inner.cache.roll_answers(cur_ts);
|
||||
}
|
||||
|
||||
/// Convert private route list to binary blob
|
||||
pub fn private_routes_to_blob(private_routes: &[PrivateRoute]) -> VeilidAPIResult<Vec<u8>> {
|
||||
let mut buffer = vec![];
|
||||
|
@ -365,6 +365,12 @@ impl RouteSpecStoreCache {
|
||||
v.get_stats_mut().roll_transfers(last_ts, cur_ts);
|
||||
}
|
||||
}
|
||||
/// Roll answer statistics
|
||||
pub fn roll_answers(&mut self, cur_ts: Timestamp) {
|
||||
for (_k, v) in self.remote_private_route_set_cache.iter_mut() {
|
||||
v.get_stats_mut().roll_answers(cur_ts);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for RouteSpecStoreCache {
|
||||
|
@ -122,4 +122,10 @@ impl RouteSpecStoreContent {
|
||||
rssd.get_stats_mut().roll_transfers(last_ts, cur_ts);
|
||||
}
|
||||
}
|
||||
/// Roll answer statistics
|
||||
pub fn roll_answers(&mut self, cur_ts: Timestamp) {
|
||||
for rssd in self.details.values_mut() {
|
||||
rssd.get_stats_mut().roll_answers(cur_ts);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -5,30 +5,81 @@ pub(crate) struct RouteStats {
|
||||
/// Consecutive failed to send count
|
||||
#[serde(skip)]
|
||||
pub failed_to_send: u32,
|
||||
/// Questions lost
|
||||
/// Consecutive questions that didn't get an answer
|
||||
#[serde(skip)]
|
||||
pub questions_lost: u32,
|
||||
pub recent_lost_answers: u32,
|
||||
/// Timestamp of when the route was created
|
||||
pub created_ts: Timestamp,
|
||||
/// Timestamp of when the route was last checked for validity
|
||||
/// Timestamp of when the route was last checked for validity or received traffic
|
||||
#[serde(skip)]
|
||||
pub last_tested_ts: Option<Timestamp>,
|
||||
pub last_known_valid_ts: Option<Timestamp>,
|
||||
/// Timestamp of when the route was last sent to
|
||||
#[serde(skip)]
|
||||
pub last_sent_ts: Option<Timestamp>,
|
||||
/// Timestamp of when the route was last received over
|
||||
/// Timestamp of when the route last received a question or statement
|
||||
#[serde(skip)]
|
||||
pub last_received_ts: Option<Timestamp>,
|
||||
pub last_rcvd_question_ts: Option<Timestamp>,
|
||||
/// Timestamp of when the route last received an answer
|
||||
#[serde(skip)]
|
||||
pub last_rcvd_answer_ts: Option<Timestamp>,
|
||||
/// Transfers up and down
|
||||
pub transfer_stats_down_up: TransferStatsDownUp,
|
||||
pub transfer: TransferStatsDownUp,
|
||||
/// Latency stats
|
||||
pub latency_stats: LatencyStats,
|
||||
pub latency: LatencyStats,
|
||||
/// Answer stats
|
||||
pub answer: AnswerStats,
|
||||
/// Accounting mechanism for this route's RPC latency
|
||||
#[serde(skip)]
|
||||
latency_stats_accounting: LatencyStatsAccounting,
|
||||
/// Accounting mechanism for the bandwidth across this route
|
||||
#[serde(skip)]
|
||||
transfer_stats_accounting: TransferStatsAccounting,
|
||||
/// Accounting mechanism for this route's RPC answers
|
||||
#[serde(skip)]
|
||||
answer_stats_accounting: AnswerStatsAccounting,
|
||||
}
|
||||
|
||||
impl fmt::Display for RouteStats {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
writeln!(f, "created: {}", self.created_ts)?;
|
||||
writeln!(
|
||||
f,
|
||||
"# recently-lost/failed-to-send: {} / {}",
|
||||
self.recent_lost_answers, self.failed_to_send
|
||||
)?;
|
||||
writeln!(
|
||||
f,
|
||||
"last_known_valid: {}",
|
||||
if let Some(ts) = &self.last_known_valid_ts {
|
||||
ts.to_string()
|
||||
} else {
|
||||
"None".to_owned()
|
||||
}
|
||||
)?;
|
||||
writeln!(
|
||||
f,
|
||||
"last_sent: {}",
|
||||
if let Some(ts) = &self.last_sent_ts {
|
||||
ts.to_string()
|
||||
} else {
|
||||
"None".to_owned()
|
||||
}
|
||||
)?;
|
||||
writeln!(
|
||||
f,
|
||||
"last_rcvd_question: {}",
|
||||
if let Some(ts) = &self.last_rcvd_question_ts {
|
||||
ts.to_string()
|
||||
} else {
|
||||
"None".to_owned()
|
||||
}
|
||||
)?;
|
||||
write!(f, "transfer:\n{}", indent_all_string(&self.transfer))?;
|
||||
write!(f, "latency: {}", self.latency)?;
|
||||
write!(f, "answer:\n{}", indent_all_string(&self.answer))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl RouteStats {
|
||||
@ -44,16 +95,28 @@ impl RouteStats {
|
||||
self.failed_to_send += 1;
|
||||
}
|
||||
|
||||
/// Mark a route as having lost a question
|
||||
pub fn record_question_lost(&mut self) {
|
||||
self.questions_lost += 1;
|
||||
/// Mark a route as having lost an answer
|
||||
pub fn record_lost_answer(&mut self) {
|
||||
let cur_ts = Timestamp::now();
|
||||
self.recent_lost_answers += 1;
|
||||
self.answer_stats_accounting.record_lost_answer(cur_ts);
|
||||
}
|
||||
|
||||
/// Mark a route as having received something
|
||||
pub fn record_received(&mut self, cur_ts: Timestamp, bytes: ByteCount) {
|
||||
self.last_received_ts = Some(cur_ts);
|
||||
self.last_tested_ts = Some(cur_ts);
|
||||
/// Mark a route as having received a question or statement
|
||||
pub fn record_question_received(&mut self, cur_ts: Timestamp, bytes: ByteCount) {
|
||||
self.last_rcvd_question_ts = Some(cur_ts);
|
||||
self.last_known_valid_ts = Some(cur_ts);
|
||||
self.transfer_stats_accounting.add_down(bytes);
|
||||
self.answer_stats_accounting.record_question(cur_ts);
|
||||
}
|
||||
|
||||
/// Mark a route as having received an answer
|
||||
pub fn record_answer_received(&mut self, cur_ts: Timestamp, bytes: ByteCount) {
|
||||
self.last_rcvd_answer_ts = Some(cur_ts);
|
||||
self.last_known_valid_ts = Some(cur_ts);
|
||||
self.recent_lost_answers = 0;
|
||||
self.transfer_stats_accounting.add_down(bytes);
|
||||
self.answer_stats_accounting.record_answer(cur_ts);
|
||||
}
|
||||
|
||||
/// Mark a route as having been sent to
|
||||
@ -67,58 +130,50 @@ impl RouteStats {
|
||||
|
||||
/// Mark a route as having been sent to
|
||||
pub fn record_latency(&mut self, latency: TimestampDuration) {
|
||||
self.latency_stats = self.latency_stats_accounting.record_latency(latency);
|
||||
}
|
||||
|
||||
/// Mark a route as having been tested
|
||||
pub fn record_tested(&mut self, cur_ts: Timestamp) {
|
||||
self.last_tested_ts = Some(cur_ts);
|
||||
|
||||
// Reset question_lost and failed_to_send if we test clean
|
||||
self.failed_to_send = 0;
|
||||
self.questions_lost = 0;
|
||||
self.latency = self.latency_stats_accounting.record_latency(latency);
|
||||
}
|
||||
|
||||
/// Roll transfers for these route stats
|
||||
pub fn roll_transfers(&mut self, last_ts: Timestamp, cur_ts: Timestamp) {
|
||||
self.transfer_stats_accounting.roll_transfers(
|
||||
last_ts,
|
||||
cur_ts,
|
||||
&mut self.transfer_stats_down_up,
|
||||
)
|
||||
self.transfer_stats_accounting
|
||||
.roll_transfers(last_ts, cur_ts, &mut self.transfer);
|
||||
}
|
||||
pub fn roll_answers(&mut self, cur_ts: Timestamp) {
|
||||
self.answer = self.answer_stats_accounting.roll_answers(cur_ts);
|
||||
}
|
||||
|
||||
/// Get the latency stats
|
||||
pub fn latency_stats(&self) -> &LatencyStats {
|
||||
&self.latency_stats
|
||||
&self.latency
|
||||
}
|
||||
|
||||
/// Get the transfer stats
|
||||
#[expect(dead_code)]
|
||||
pub fn transfer_stats(&self) -> &TransferStatsDownUp {
|
||||
&self.transfer_stats_down_up
|
||||
&self.transfer
|
||||
}
|
||||
|
||||
/// Reset stats when network restarts
|
||||
pub fn reset(&mut self) {
|
||||
self.last_tested_ts = None;
|
||||
self.last_known_valid_ts = None;
|
||||
self.last_sent_ts = None;
|
||||
self.last_received_ts = None;
|
||||
self.last_rcvd_question_ts = None;
|
||||
self.last_rcvd_answer_ts = None;
|
||||
self.failed_to_send = 0;
|
||||
self.questions_lost = 0;
|
||||
self.recent_lost_answers = 0;
|
||||
}
|
||||
|
||||
/// Check if a route needs testing
|
||||
pub fn needs_testing(&self, cur_ts: Timestamp) -> bool {
|
||||
// Has the route had any failures lately?
|
||||
if self.questions_lost > 0 || self.failed_to_send > 0 {
|
||||
if self.recent_lost_answers > 0 || self.failed_to_send > 0 {
|
||||
// If so, always test
|
||||
return true;
|
||||
}
|
||||
|
||||
// Has the route been tested within the idle time we'd want to check things?
|
||||
// (also if we've received successfully over the route, this will get set)
|
||||
if let Some(last_tested_ts) = self.last_tested_ts {
|
||||
if let Some(last_tested_ts) = self.last_known_valid_ts {
|
||||
if cur_ts.saturating_sub(last_tested_ts)
|
||||
> TimestampDuration::new(ROUTE_MIN_IDLE_TIME_MS as u64 * 1000u64)
|
||||
{
|
||||
|
@ -94,19 +94,6 @@ impl RoutingTableInner {
|
||||
}
|
||||
}
|
||||
|
||||
fn with_public_internet_routing_domain_mut<F, R>(&mut self, f: F) -> R
|
||||
where
|
||||
F: FnOnce(&mut PublicInternetRoutingDomainDetail) -> R,
|
||||
{
|
||||
f(&mut self.public_internet_routing_domain)
|
||||
}
|
||||
fn with_local_network_routing_domain_mut<F, R>(&mut self, f: F) -> R
|
||||
where
|
||||
F: FnOnce(&mut LocalNetworkRoutingDomainDetail) -> R,
|
||||
{
|
||||
f(&mut self.local_network_routing_domain)
|
||||
}
|
||||
|
||||
pub fn relay_node(&self, domain: RoutingDomain) -> Option<FilteredNodeRef> {
|
||||
self.with_routing_domain(domain, |rdd| rdd.relay_node())
|
||||
}
|
||||
@ -115,6 +102,17 @@ impl RoutingTableInner {
|
||||
self.with_routing_domain(domain, |rdd| rdd.relay_node_last_keepalive())
|
||||
}
|
||||
|
||||
pub fn set_relay_node_last_keepalive(&mut self, domain: RoutingDomain, ts: Timestamp) {
|
||||
match domain {
|
||||
RoutingDomain::PublicInternet => self
|
||||
.public_internet_routing_domain
|
||||
.set_relay_node_last_keepalive(Some(ts)),
|
||||
RoutingDomain::LocalNetwork => self
|
||||
.local_network_routing_domain
|
||||
.set_relay_node_last_keepalive(Some(ts)),
|
||||
};
|
||||
}
|
||||
|
||||
#[expect(dead_code)]
|
||||
pub fn has_dial_info(&self, domain: RoutingDomain) -> bool {
|
||||
self.with_routing_domain(domain, |rdd| !rdd.dial_info_details().is_empty())
|
||||
@ -250,11 +248,6 @@ impl RoutingTableInner {
|
||||
self.with_routing_domain(routing_domain, |rdd| rdd.get_published_peer_info())
|
||||
}
|
||||
|
||||
/// Return if this routing domain has a valid network class
|
||||
pub fn has_valid_network_class(&self, routing_domain: RoutingDomain) -> bool {
|
||||
self.with_routing_domain(routing_domain, |rdd| rdd.has_valid_network_class())
|
||||
}
|
||||
|
||||
/// Return a copy of our node's current peerinfo (may not yet be published)
|
||||
pub fn get_current_peer_info(&self, routing_domain: RoutingDomain) -> Arc<PeerInfo> {
|
||||
self.with_routing_domain(routing_domain, |rdd| rdd.get_peer_info(self))
|
||||
@ -477,6 +470,7 @@ impl RoutingTableInner {
|
||||
None
|
||||
}
|
||||
|
||||
// Collect all entries that are 'needs_ping' and have some node info making them reachable somehow
|
||||
pub(super) fn get_nodes_needing_ping(
|
||||
&self,
|
||||
outer_self: RoutingTable,
|
||||
@ -487,24 +481,36 @@ impl RoutingTableInner {
|
||||
.get_published_peer_info(routing_domain)
|
||||
.map(|pi| pi.signed_node_info().timestamp());
|
||||
|
||||
// Collect all entries that are 'needs_ping' and have some node info making them reachable somehow
|
||||
let mut node_refs = Vec::<FilteredNodeRef>::with_capacity(self.bucket_entry_count());
|
||||
self.with_entries(cur_ts, BucketEntryState::Unreliable, |rti, entry| {
|
||||
let entry_needs_ping = |e: &BucketEntryInner| {
|
||||
let mut filters = VecDeque::new();
|
||||
|
||||
// Remove our own node from the results
|
||||
let filter_self =
|
||||
Box::new(move |_rti: &RoutingTableInner, v: Option<Arc<BucketEntry>>| v.is_some())
|
||||
as RoutingTableEntryFilter;
|
||||
filters.push_back(filter_self);
|
||||
|
||||
let filter_ping = Box::new(
|
||||
move |rti: &RoutingTableInner, v: Option<Arc<BucketEntry>>| {
|
||||
let entry = v.unwrap();
|
||||
entry.with_inner(|e| {
|
||||
// If this entry isn't in the routing domain we are checking, don't include it
|
||||
if !e.exists_in_routing_domain(rti, routing_domain) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// If we don't have node status for this node, then we should ping it to get some node status
|
||||
if e.has_node_info(routing_domain.into()) && e.node_status(routing_domain).is_none()
|
||||
if e.has_node_info(routing_domain.into())
|
||||
&& e.node_status(routing_domain).is_none()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
// If this entry needs a ping because this node hasn't seen our latest node info, then do it
|
||||
if opt_own_node_info_ts.is_some()
|
||||
&& !e.has_seen_our_node_info_ts(routing_domain, opt_own_node_info_ts.unwrap())
|
||||
&& !e.has_seen_our_node_info_ts(
|
||||
routing_domain,
|
||||
opt_own_node_info_ts.unwrap(),
|
||||
)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
@ -515,19 +521,67 @@ impl RoutingTableInner {
|
||||
}
|
||||
|
||||
false
|
||||
})
|
||||
},
|
||||
) as RoutingTableEntryFilter;
|
||||
filters.push_back(filter_ping);
|
||||
|
||||
// Sort by least recently contacted
|
||||
let compare = |_rti: &RoutingTableInner,
|
||||
a_entry: &Option<Arc<BucketEntry>>,
|
||||
b_entry: &Option<Arc<BucketEntry>>| {
|
||||
// same nodes are always the same
|
||||
if let Some(a_entry) = a_entry {
|
||||
if let Some(b_entry) = b_entry {
|
||||
if Arc::ptr_eq(a_entry, b_entry) {
|
||||
return core::cmp::Ordering::Equal;
|
||||
}
|
||||
}
|
||||
} else if b_entry.is_none() {
|
||||
return core::cmp::Ordering::Equal;
|
||||
}
|
||||
|
||||
// our own node always comes last (should not happen, here for completeness)
|
||||
if a_entry.is_none() {
|
||||
return core::cmp::Ordering::Greater;
|
||||
}
|
||||
if b_entry.is_none() {
|
||||
return core::cmp::Ordering::Less;
|
||||
}
|
||||
// Sort by least recently contacted regardless of reliability
|
||||
// If something needs a ping it should get it in the order of need
|
||||
let ae = a_entry.as_ref().unwrap();
|
||||
let be = b_entry.as_ref().unwrap();
|
||||
ae.with_inner(|ae| {
|
||||
be.with_inner(|be| {
|
||||
let ca = ae
|
||||
.peer_stats()
|
||||
.rpc_stats
|
||||
.last_question_ts
|
||||
.unwrap_or(Timestamp::new(0))
|
||||
.as_u64();
|
||||
let cb = be
|
||||
.peer_stats()
|
||||
.rpc_stats
|
||||
.last_question_ts
|
||||
.unwrap_or(Timestamp::new(0))
|
||||
.as_u64();
|
||||
|
||||
ca.cmp(&cb)
|
||||
})
|
||||
})
|
||||
};
|
||||
|
||||
if entry.with_inner(entry_needs_ping) {
|
||||
node_refs.push(FilteredNodeRef::new(
|
||||
let transform = |_rti: &RoutingTableInner, v: Option<Arc<BucketEntry>>| {
|
||||
FilteredNodeRef::new(
|
||||
outer_self.clone(),
|
||||
entry,
|
||||
v.unwrap().clone(),
|
||||
NodeRefFilter::new().with_routing_domain(routing_domain),
|
||||
Sequencing::default(),
|
||||
));
|
||||
}
|
||||
Option::<()>::None
|
||||
});
|
||||
node_refs
|
||||
)
|
||||
};
|
||||
|
||||
self.find_peers_with_sort_and_filter(usize::MAX, cur_ts, filters, compare, transform)
|
||||
}
|
||||
|
||||
#[expect(dead_code)]
|
||||
@ -891,20 +945,16 @@ impl RoutingTableInner {
|
||||
}
|
||||
|
||||
// Public internet routing domain is ready for app use,
|
||||
// when we have proper dialinfo/networkclass
|
||||
let public_internet_ready = !matches!(
|
||||
self.get_network_class(RoutingDomain::PublicInternet)
|
||||
.unwrap_or_default(),
|
||||
NetworkClass::Invalid
|
||||
);
|
||||
// when we have proper dialinfo/networkclass and it is published
|
||||
let public_internet_ready = self
|
||||
.get_published_peer_info(RoutingDomain::PublicInternet)
|
||||
.is_some();
|
||||
|
||||
// Local internet routing domain is ready for app use
|
||||
// when we have proper dialinfo/networkclass
|
||||
let local_network_ready = !matches!(
|
||||
self.get_network_class(RoutingDomain::LocalNetwork)
|
||||
.unwrap_or_default(),
|
||||
NetworkClass::Invalid
|
||||
);
|
||||
// when we have proper dialinfo/networkclass and it is published
|
||||
let local_network_ready = self
|
||||
.get_published_peer_info(RoutingDomain::LocalNetwork)
|
||||
.is_some();
|
||||
|
||||
let live_entry_counts = self.cached_entry_counts();
|
||||
|
||||
@ -1017,7 +1067,7 @@ impl RoutingTableInner {
|
||||
&'b Option<Arc<BucketEntry>>,
|
||||
&'b Option<Arc<BucketEntry>>,
|
||||
) -> core::cmp::Ordering,
|
||||
T: for<'r, 't> FnMut(&'r RoutingTableInner, Option<Arc<BucketEntry>>) -> O,
|
||||
T: for<'r> FnMut(&'r RoutingTableInner, Option<Arc<BucketEntry>>) -> O,
|
||||
{
|
||||
// collect all the nodes for sorting
|
||||
let mut nodes =
|
||||
|
@ -6,9 +6,7 @@ pub trait RoutingDomainEditorCommonTrait {
|
||||
address_type: Option<AddressType>,
|
||||
protocol_type: Option<ProtocolType>,
|
||||
) -> &mut Self;
|
||||
fn clear_relay_node(&mut self) -> &mut Self;
|
||||
fn set_relay_node(&mut self, relay_node: NodeRef) -> &mut Self;
|
||||
fn set_relay_node_keepalive(&mut self, ts: Option<Timestamp>) -> &mut Self;
|
||||
fn set_relay_node(&mut self, relay_node: Option<NodeRef>) -> &mut Self;
|
||||
#[cfg_attr(target_arch = "wasm32", expect(dead_code))]
|
||||
fn add_dial_info(&mut self, dial_info: DialInfo, class: DialInfoClass) -> &mut Self;
|
||||
fn setup_network(
|
||||
@ -18,7 +16,6 @@ pub trait RoutingDomainEditorCommonTrait {
|
||||
address_types: AddressTypeSet,
|
||||
capabilities: Vec<Capability>,
|
||||
) -> &mut Self;
|
||||
fn set_network_class(&mut self, network_class: Option<NetworkClass>) -> &mut Self;
|
||||
fn commit(&mut self, pause_tasks: bool) -> SendPinBoxFutureLifetime<'_, bool>;
|
||||
fn shutdown(&mut self) -> SendPinBoxFutureLifetime<'_, ()>;
|
||||
fn publish(&mut self);
|
||||
@ -41,17 +38,10 @@ impl<T: RoutingDomainDetailCommonAccessors> RoutingDomainDetailApplyCommonChange
|
||||
.clear_dial_info_details(address_type, protocol_type);
|
||||
}
|
||||
|
||||
RoutingDomainChangeCommon::ClearRelayNode => {
|
||||
self.common_mut().set_relay_node(None);
|
||||
}
|
||||
|
||||
RoutingDomainChangeCommon::SetRelayNode { relay_node } => {
|
||||
self.common_mut().set_relay_node(Some(relay_node.clone()))
|
||||
self.common_mut().set_relay_node(relay_node)
|
||||
}
|
||||
|
||||
RoutingDomainChangeCommon::SetRelayNodeKeepalive { ts } => {
|
||||
self.common_mut().set_relay_node_last_keepalive(ts);
|
||||
}
|
||||
RoutingDomainChangeCommon::AddDialInfo { dial_info_detail } => {
|
||||
if !self.ensure_dial_info_is_valid(&dial_info_detail.dial_info) {
|
||||
return;
|
||||
@ -77,9 +67,6 @@ impl<T: RoutingDomainDetailCommonAccessors> RoutingDomainDetailApplyCommonChange
|
||||
capabilities.clone(),
|
||||
);
|
||||
}
|
||||
RoutingDomainChangeCommon::SetNetworkClass { network_class } => {
|
||||
self.common_mut().set_network_class(network_class);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -90,12 +77,8 @@ pub(super) enum RoutingDomainChangeCommon {
|
||||
address_type: Option<AddressType>,
|
||||
protocol_type: Option<ProtocolType>,
|
||||
},
|
||||
ClearRelayNode,
|
||||
SetRelayNode {
|
||||
relay_node: NodeRef,
|
||||
},
|
||||
SetRelayNodeKeepalive {
|
||||
ts: Option<Timestamp>,
|
||||
relay_node: Option<NodeRef>,
|
||||
},
|
||||
AddDialInfo {
|
||||
dial_info_detail: DialInfoDetail,
|
||||
@ -110,7 +93,4 @@ pub(super) enum RoutingDomainChangeCommon {
|
||||
address_types: AddressTypeSet,
|
||||
capabilities: Vec<Capability>,
|
||||
},
|
||||
SetNetworkClass {
|
||||
network_class: Option<NetworkClass>,
|
||||
},
|
||||
}
|
||||
|
@ -47,27 +47,13 @@ impl RoutingDomainEditorCommonTrait for RoutingDomainEditorLocalNetwork {
|
||||
self
|
||||
}
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
fn clear_relay_node(&mut self) -> &mut Self {
|
||||
self.changes.push(RoutingDomainChangeLocalNetwork::Common(
|
||||
RoutingDomainChangeCommon::ClearRelayNode,
|
||||
));
|
||||
self
|
||||
}
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
fn set_relay_node(&mut self, relay_node: NodeRef) -> &mut Self {
|
||||
fn set_relay_node(&mut self, relay_node: Option<NodeRef>) -> &mut Self {
|
||||
self.changes.push(RoutingDomainChangeLocalNetwork::Common(
|
||||
RoutingDomainChangeCommon::SetRelayNode { relay_node },
|
||||
));
|
||||
self
|
||||
}
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
fn set_relay_node_keepalive(&mut self, ts: Option<Timestamp>) -> &mut Self {
|
||||
self.changes.push(RoutingDomainChangeLocalNetwork::Common(
|
||||
RoutingDomainChangeCommon::SetRelayNodeKeepalive { ts },
|
||||
));
|
||||
self
|
||||
}
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
fn add_dial_info(&mut self, dial_info: DialInfo, class: DialInfoClass) -> &mut Self {
|
||||
self.changes.push(RoutingDomainChangeLocalNetwork::Common(
|
||||
RoutingDomainChangeCommon::AddDialInfo {
|
||||
@ -116,14 +102,6 @@ impl RoutingDomainEditorCommonTrait for RoutingDomainEditorLocalNetwork {
|
||||
self
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
fn set_network_class(&mut self, network_class: Option<NetworkClass>) -> &mut Self {
|
||||
self.changes.push(RoutingDomainChangeLocalNetwork::Common(
|
||||
RoutingDomainChangeCommon::SetNetworkClass { network_class },
|
||||
));
|
||||
self
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
fn commit(&mut self, pause_tasks: bool) -> SendPinBoxFutureLifetime<'_, bool> {
|
||||
Box::pin(async move {
|
||||
@ -140,10 +118,11 @@ impl RoutingDomainEditorCommonTrait for RoutingDomainEditorLocalNetwork {
|
||||
|
||||
// Apply changes
|
||||
let mut peer_info_changed = false;
|
||||
|
||||
{
|
||||
let mut rti_lock = self.routing_table.inner.write();
|
||||
let rti = &mut rti_lock;
|
||||
rti.with_local_network_routing_domain_mut(|detail| {
|
||||
let detail = &mut rti.local_network_routing_domain;
|
||||
{
|
||||
let old_dial_info_details = detail.dial_info_details().clone();
|
||||
let old_relay_node = detail.relay_node();
|
||||
let old_outbound_protocols = detail.outbound_protocols();
|
||||
@ -157,7 +136,9 @@ impl RoutingDomainEditorCommonTrait for RoutingDomainEditorLocalNetwork {
|
||||
RoutingDomainChangeLocalNetwork::Common(common_change) => {
|
||||
detail.apply_common_change(common_change);
|
||||
}
|
||||
RoutingDomainChangeLocalNetwork::SetLocalNetworks { local_networks } => {
|
||||
RoutingDomainChangeLocalNetwork::SetLocalNetworks {
|
||||
local_networks,
|
||||
} => {
|
||||
detail.set_local_networks(local_networks);
|
||||
}
|
||||
}
|
||||
@ -177,7 +158,10 @@ impl RoutingDomainEditorCommonTrait for RoutingDomainEditorLocalNetwork {
|
||||
.filter(|di| !new_dial_info_details.contains(di))
|
||||
.collect::<Vec<_>>();
|
||||
if !removed_dial_info.is_empty() {
|
||||
info!("[LocalNetwork] removed dial info: {:#?}", removed_dial_info);
|
||||
info!(
|
||||
"[LocalNetwork] removed dial info:\n{}",
|
||||
indent_all_string(&removed_dial_info.to_multiline_string())
|
||||
);
|
||||
peer_info_changed = true;
|
||||
}
|
||||
let added_dial_info = new_dial_info_details
|
||||
@ -185,7 +169,10 @@ impl RoutingDomainEditorCommonTrait for RoutingDomainEditorLocalNetwork {
|
||||
.filter(|di| !old_dial_info_details.contains(di))
|
||||
.collect::<Vec<_>>();
|
||||
if !added_dial_info.is_empty() {
|
||||
info!("[LocalNetwork] added dial info: {:#?}", added_dial_info);
|
||||
info!(
|
||||
"[LocalNetwork] added dial info:\n{}",
|
||||
indent_all_string(&added_dial_info.to_multiline_string())
|
||||
);
|
||||
peer_info_changed = true;
|
||||
}
|
||||
if let Some(nrn) = new_relay_node {
|
||||
@ -234,13 +221,22 @@ impl RoutingDomainEditorCommonTrait for RoutingDomainEditorLocalNetwork {
|
||||
);
|
||||
peer_info_changed = true;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if peer_info_changed {
|
||||
// Allow signed node info updates at same timestamp for otherwise dead nodes if our network has changed
|
||||
rti.reset_all_updated_since_last_network_change();
|
||||
}
|
||||
}
|
||||
|
||||
// Operations that require an unlocked routing table go here
|
||||
if peer_info_changed {
|
||||
// Update protections
|
||||
self.routing_table
|
||||
.network_manager()
|
||||
.connection_manager()
|
||||
.update_protections();
|
||||
}
|
||||
peer_info_changed
|
||||
})
|
||||
}
|
||||
@ -257,8 +253,7 @@ impl RoutingDomainEditorCommonTrait for RoutingDomainEditorLocalNetwork {
|
||||
fn shutdown(&mut self) -> SendPinBoxFutureLifetime<'_, ()> {
|
||||
Box::pin(async move {
|
||||
self.clear_dial_info_details(None, None)
|
||||
.set_network_class(None)
|
||||
.clear_relay_node()
|
||||
.set_relay_node(None)
|
||||
.commit(true)
|
||||
.await;
|
||||
self.routing_table
|
||||
|
@ -70,6 +70,9 @@ impl RoutingDomainDetail for LocalNetworkRoutingDomainDetail {
|
||||
fn capabilities(&self) -> Vec<Capability> {
|
||||
self.common.capabilities()
|
||||
}
|
||||
fn requires_relay(&self) -> Option<RelayKind> {
|
||||
self.common.requires_relay()
|
||||
}
|
||||
fn relay_node(&self) -> Option<FilteredNodeRef> {
|
||||
self.common.relay_node()
|
||||
}
|
||||
@ -79,10 +82,6 @@ impl RoutingDomainDetail for LocalNetworkRoutingDomainDetail {
|
||||
fn dial_info_details(&self) -> &Vec<DialInfoDetail> {
|
||||
self.common.dial_info_details()
|
||||
}
|
||||
fn has_valid_network_class(&self) -> bool {
|
||||
self.common.has_valid_network_class()
|
||||
}
|
||||
|
||||
fn inbound_dial_info_filter(&self) -> DialInfoFilter {
|
||||
self.common.inbound_dial_info_filter()
|
||||
}
|
||||
@ -113,6 +112,7 @@ impl RoutingDomainDetail for LocalNetworkRoutingDomainDetail {
|
||||
}
|
||||
|
||||
fn publish_peer_info(&self, rti: &RoutingTableInner) -> bool {
|
||||
let peer_info = {
|
||||
let pi = self.get_peer_info(rti);
|
||||
|
||||
// If the network class is not yet determined, don't publish
|
||||
@ -122,7 +122,7 @@ impl RoutingDomainDetail for LocalNetworkRoutingDomainDetail {
|
||||
}
|
||||
|
||||
// If we need a relay and we don't have one, don't publish yet
|
||||
if let Some(_relay_kind) = pi.signed_node_info().node_info().requires_relay() {
|
||||
if let Some(_relay_kind) = self.requires_relay() {
|
||||
if pi.signed_node_info().relay_ids().is_empty() {
|
||||
log_rtab!(debug "[LocalNetwork] Not publishing peer info that wants relay until we have a relay");
|
||||
return false;
|
||||
@ -138,8 +138,15 @@ impl RoutingDomainDetail for LocalNetworkRoutingDomainDetail {
|
||||
}
|
||||
}
|
||||
|
||||
log_rtab!(debug "[LocalNetwork] Published new peer info: {:#?}", pi);
|
||||
*ppi_lock = Some(pi);
|
||||
log_rtab!(debug "[LocalNetwork] Published new peer info: {}", pi);
|
||||
*ppi_lock = Some(pi.clone());
|
||||
|
||||
pi
|
||||
};
|
||||
|
||||
rti.unlocked_inner
|
||||
.network_manager()
|
||||
.report_peer_info_change(peer_info);
|
||||
|
||||
true
|
||||
}
|
||||
@ -200,4 +207,8 @@ impl RoutingDomainDetail for LocalNetworkRoutingDomainDetail {
|
||||
|
||||
ContactMethod::Unreachable
|
||||
}
|
||||
|
||||
fn set_relay_node_last_keepalive(&mut self, ts: Option<Timestamp>) {
|
||||
self.common.set_relay_node_last_keepalive(ts);
|
||||
}
|
||||
}
|
||||
|
@ -18,10 +18,10 @@ pub(crate) trait RoutingDomainDetail {
|
||||
fn inbound_protocols(&self) -> ProtocolTypeSet;
|
||||
fn address_types(&self) -> AddressTypeSet;
|
||||
fn capabilities(&self) -> Vec<Capability>;
|
||||
fn requires_relay(&self) -> Option<RelayKind>;
|
||||
fn relay_node(&self) -> Option<FilteredNodeRef>;
|
||||
fn relay_node_last_keepalive(&self) -> Option<Timestamp>;
|
||||
fn dial_info_details(&self) -> &Vec<DialInfoDetail>;
|
||||
fn has_valid_network_class(&self) -> bool;
|
||||
fn get_published_peer_info(&self) -> Option<Arc<PeerInfo>>;
|
||||
fn inbound_dial_info_filter(&self) -> DialInfoFilter;
|
||||
fn outbound_dial_info_filter(&self) -> DialInfoFilter;
|
||||
@ -49,6 +49,9 @@ pub(crate) trait RoutingDomainDetail {
|
||||
sequencing: Sequencing,
|
||||
dif_sort: Option<Arc<DialInfoDetailSort>>,
|
||||
) -> ContactMethod;
|
||||
|
||||
// Set last relay keepalive time
|
||||
fn set_relay_node_last_keepalive(&mut self, ts: Option<Timestamp>);
|
||||
}
|
||||
|
||||
trait RoutingDomainDetailCommonAccessors: RoutingDomainDetail {
|
||||
@ -109,31 +112,29 @@ fn first_filtered_dial_info_detail_between_nodes(
|
||||
#[derive(Debug)]
|
||||
struct RoutingDomainDetailCommon {
|
||||
routing_domain: RoutingDomain,
|
||||
network_class: Option<NetworkClass>,
|
||||
outbound_protocols: ProtocolTypeSet,
|
||||
inbound_protocols: ProtocolTypeSet,
|
||||
address_types: AddressTypeSet,
|
||||
relay_node: Option<NodeRef>,
|
||||
relay_node_last_keepalive: Option<Timestamp>,
|
||||
capabilities: Vec<Capability>,
|
||||
dial_info_details: Vec<DialInfoDetail>,
|
||||
// caches
|
||||
cached_peer_info: Mutex<Option<Arc<PeerInfo>>>,
|
||||
relay_node_last_keepalive: Option<Timestamp>,
|
||||
}
|
||||
|
||||
impl RoutingDomainDetailCommon {
|
||||
pub fn new(routing_domain: RoutingDomain) -> Self {
|
||||
Self {
|
||||
routing_domain,
|
||||
network_class: Default::default(),
|
||||
outbound_protocols: Default::default(),
|
||||
inbound_protocols: Default::default(),
|
||||
address_types: Default::default(),
|
||||
relay_node: Default::default(),
|
||||
relay_node_last_keepalive: Default::default(),
|
||||
capabilities: Default::default(),
|
||||
dial_info_details: Default::default(),
|
||||
cached_peer_info: Mutex::new(Default::default()),
|
||||
relay_node_last_keepalive: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -141,7 +142,24 @@ impl RoutingDomainDetailCommon {
|
||||
// Accessors
|
||||
|
||||
pub fn network_class(&self) -> Option<NetworkClass> {
|
||||
self.network_class
|
||||
cfg_if! {
|
||||
if #[cfg(target_arch = "wasm32")] {
|
||||
Some(NetworkClass::WebApp)
|
||||
} else {
|
||||
if self.address_types.is_empty() {
|
||||
None
|
||||
}
|
||||
else if self.dial_info_details.is_empty() {
|
||||
if self.relay_node.is_none() {
|
||||
None
|
||||
} else {
|
||||
Some(NetworkClass::OutboundOnly)
|
||||
}
|
||||
} else {
|
||||
Some(NetworkClass::InboundCapable)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn outbound_protocols(&self) -> ProtocolTypeSet {
|
||||
@ -160,6 +178,37 @@ impl RoutingDomainDetailCommon {
|
||||
self.capabilities.clone()
|
||||
}
|
||||
|
||||
pub fn requires_relay(&self) -> Option<RelayKind> {
|
||||
match self.network_class()? {
|
||||
NetworkClass::InboundCapable => {
|
||||
let mut all_inbound_set: HashSet<(ProtocolType, AddressType)> = HashSet::new();
|
||||
for p in self.inbound_protocols {
|
||||
for a in self.address_types {
|
||||
all_inbound_set.insert((p, a));
|
||||
}
|
||||
}
|
||||
for did in &self.dial_info_details {
|
||||
if did.class.requires_relay() {
|
||||
return Some(RelayKind::Inbound);
|
||||
}
|
||||
let ib = (did.dial_info.protocol_type(), did.dial_info.address_type());
|
||||
all_inbound_set.remove(&ib);
|
||||
}
|
||||
if !all_inbound_set.is_empty() {
|
||||
return Some(RelayKind::Inbound);
|
||||
}
|
||||
}
|
||||
NetworkClass::OutboundOnly => {
|
||||
return Some(RelayKind::Inbound);
|
||||
}
|
||||
NetworkClass::WebApp => {
|
||||
return Some(RelayKind::Outbound);
|
||||
}
|
||||
NetworkClass::Invalid => {}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
pub fn relay_node(&self) -> Option<FilteredNodeRef> {
|
||||
self.relay_node.as_ref().map(|nr| {
|
||||
nr.custom_filtered(NodeRefFilter::new().with_routing_domain(self.routing_domain))
|
||||
@ -174,10 +223,6 @@ impl RoutingDomainDetailCommon {
|
||||
&self.dial_info_details
|
||||
}
|
||||
|
||||
pub fn has_valid_network_class(&self) -> bool {
|
||||
self.network_class.unwrap_or(NetworkClass::Invalid) != NetworkClass::Invalid
|
||||
}
|
||||
|
||||
pub fn inbound_dial_info_filter(&self) -> DialInfoFilter {
|
||||
DialInfoFilter::all()
|
||||
.with_protocol_type_set(self.inbound_protocols)
|
||||
@ -219,19 +264,11 @@ impl RoutingDomainDetailCommon {
|
||||
self.clear_cache();
|
||||
}
|
||||
|
||||
fn set_network_class(&mut self, network_class: Option<NetworkClass>) {
|
||||
self.network_class = network_class;
|
||||
self.clear_cache();
|
||||
}
|
||||
|
||||
fn set_relay_node(&mut self, opt_relay_node: Option<NodeRef>) {
|
||||
self.relay_node = opt_relay_node;
|
||||
self.relay_node_last_keepalive = None;
|
||||
self.clear_cache();
|
||||
}
|
||||
fn set_relay_node_last_keepalive(&mut self, ts: Option<Timestamp>) {
|
||||
self.relay_node_last_keepalive = ts;
|
||||
}
|
||||
|
||||
fn clear_dial_info_details(
|
||||
&mut self,
|
||||
@ -267,12 +304,16 @@ impl RoutingDomainDetailCommon {
|
||||
// self.clear_cache();
|
||||
// }
|
||||
|
||||
fn set_relay_node_last_keepalive(&mut self, ts: Option<Timestamp>) {
|
||||
self.relay_node_last_keepalive = ts;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
// Internal functions
|
||||
|
||||
fn make_peer_info(&self, rti: &RoutingTableInner) -> PeerInfo {
|
||||
let node_info = NodeInfo::new(
|
||||
self.network_class.unwrap_or(NetworkClass::Invalid),
|
||||
self.network_class().unwrap_or(NetworkClass::Invalid),
|
||||
self.outbound_protocols,
|
||||
self.address_types,
|
||||
VALID_ENVELOPE_VERSIONS.to_vec(),
|
||||
|
@ -17,6 +17,28 @@ impl RoutingDomainEditorPublicInternet {
|
||||
changes: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn sanitize(&self, detail: &mut PublicInternetRoutingDomainDetail) {
|
||||
// Get the best dial info for each protocol type and address
|
||||
let mut best_dids: HashMap<(ProtocolType, Address), DialInfoDetail> = HashMap::new();
|
||||
for did in detail.common.dial_info_details() {
|
||||
let didkey = (did.dial_info.protocol_type(), did.dial_info.address());
|
||||
best_dids
|
||||
.entry(didkey)
|
||||
.and_modify(|e| {
|
||||
if did.class < e.class {
|
||||
*e = did.clone();
|
||||
}
|
||||
})
|
||||
.or_insert(did.clone());
|
||||
}
|
||||
|
||||
// Remove all but the best dial info for each protocol type, address type, and address
|
||||
detail.common.clear_dial_info_details(None, None);
|
||||
for did in best_dids.into_values() {
|
||||
detail.common.add_dial_info_detail(did);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl RoutingDomainEditorCommonTrait for RoutingDomainEditorPublicInternet {
|
||||
@ -36,27 +58,13 @@ impl RoutingDomainEditorCommonTrait for RoutingDomainEditorPublicInternet {
|
||||
self
|
||||
}
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
fn clear_relay_node(&mut self) -> &mut Self {
|
||||
self.changes.push(RoutingDomainChangePublicInternet::Common(
|
||||
RoutingDomainChangeCommon::ClearRelayNode,
|
||||
));
|
||||
self
|
||||
}
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
fn set_relay_node(&mut self, relay_node: NodeRef) -> &mut Self {
|
||||
fn set_relay_node(&mut self, relay_node: Option<NodeRef>) -> &mut Self {
|
||||
self.changes.push(RoutingDomainChangePublicInternet::Common(
|
||||
RoutingDomainChangeCommon::SetRelayNode { relay_node },
|
||||
));
|
||||
self
|
||||
}
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
fn set_relay_node_keepalive(&mut self, ts: Option<Timestamp>) -> &mut Self {
|
||||
self.changes.push(RoutingDomainChangePublicInternet::Common(
|
||||
RoutingDomainChangeCommon::SetRelayNodeKeepalive { ts },
|
||||
));
|
||||
self
|
||||
}
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
fn add_dial_info(&mut self, dial_info: DialInfo, class: DialInfoClass) -> &mut Self {
|
||||
self.changes.push(RoutingDomainChangePublicInternet::Common(
|
||||
RoutingDomainChangeCommon::AddDialInfo {
|
||||
@ -105,14 +113,6 @@ impl RoutingDomainEditorCommonTrait for RoutingDomainEditorPublicInternet {
|
||||
self
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
fn set_network_class(&mut self, network_class: Option<NetworkClass>) -> &mut Self {
|
||||
self.changes.push(RoutingDomainChangePublicInternet::Common(
|
||||
RoutingDomainChangeCommon::SetNetworkClass { network_class },
|
||||
));
|
||||
self
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
fn commit(&mut self, pause_tasks: bool) -> SendPinBoxFutureLifetime<'_, bool> {
|
||||
Box::pin(async move {
|
||||
@ -129,10 +129,11 @@ impl RoutingDomainEditorCommonTrait for RoutingDomainEditorPublicInternet {
|
||||
|
||||
// Apply changes
|
||||
let mut peer_info_changed = false;
|
||||
|
||||
{
|
||||
let mut rti_lock = self.routing_table.inner.write();
|
||||
let rti = &mut rti_lock;
|
||||
rti.with_public_internet_routing_domain_mut(|detail| {
|
||||
let detail = &mut rti.public_internet_routing_domain;
|
||||
{
|
||||
let old_dial_info_details = detail.dial_info_details().clone();
|
||||
let old_relay_node = detail.relay_node();
|
||||
let old_outbound_protocols = detail.outbound_protocols();
|
||||
@ -149,6 +150,9 @@ impl RoutingDomainEditorCommonTrait for RoutingDomainEditorPublicInternet {
|
||||
}
|
||||
}
|
||||
|
||||
// Sanitize peer info
|
||||
self.sanitize(detail);
|
||||
|
||||
let new_dial_info_details = detail.dial_info_details().clone();
|
||||
let new_relay_node = detail.relay_node();
|
||||
let new_outbound_protocols = detail.outbound_protocols();
|
||||
@ -164,8 +168,8 @@ impl RoutingDomainEditorCommonTrait for RoutingDomainEditorPublicInternet {
|
||||
.collect::<Vec<_>>();
|
||||
if !removed_dial_info.is_empty() {
|
||||
info!(
|
||||
"[PublicInternet] removed dial info: {:#?}",
|
||||
removed_dial_info
|
||||
"[PublicInternet] removed dial info:\n{}",
|
||||
indent_all_string(&removed_dial_info.to_multiline_string())
|
||||
);
|
||||
peer_info_changed = true;
|
||||
}
|
||||
@ -174,7 +178,10 @@ impl RoutingDomainEditorCommonTrait for RoutingDomainEditorPublicInternet {
|
||||
.filter(|di| !old_dial_info_details.contains(di))
|
||||
.collect::<Vec<_>>();
|
||||
if !added_dial_info.is_empty() {
|
||||
info!("[PublicInternet] added dial info: {:#?}", added_dial_info);
|
||||
info!(
|
||||
"[PublicInternet] added dial info:\n{}",
|
||||
indent_all_string(&added_dial_info.to_multiline_string())
|
||||
);
|
||||
peer_info_changed = true;
|
||||
}
|
||||
if let Some(nrn) = new_relay_node {
|
||||
@ -223,12 +230,22 @@ impl RoutingDomainEditorCommonTrait for RoutingDomainEditorPublicInternet {
|
||||
);
|
||||
peer_info_changed = true;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if peer_info_changed {
|
||||
// Allow signed node info updates at same timestamp for otherwise dead nodes if our network has changed
|
||||
rti.reset_all_updated_since_last_network_change();
|
||||
}
|
||||
}
|
||||
|
||||
// Operations that require an unlocked routing table go here
|
||||
if peer_info_changed {
|
||||
// Update protections
|
||||
self.routing_table
|
||||
.network_manager()
|
||||
.connection_manager()
|
||||
.update_protections();
|
||||
}
|
||||
|
||||
peer_info_changed
|
||||
})
|
||||
@ -242,10 +259,10 @@ impl RoutingDomainEditorCommonTrait for RoutingDomainEditorPublicInternet {
|
||||
.write()
|
||||
.publish_peer_info(RoutingDomain::PublicInternet);
|
||||
|
||||
// Clear the routespecstore cache if our PublicInternet dial info has changed
|
||||
if changed {
|
||||
// Clear the routespecstore cache if our PublicInternet dial info has changed
|
||||
let rss = self.routing_table.route_spec_store();
|
||||
rss.reset();
|
||||
rss.reset_cache();
|
||||
}
|
||||
}
|
||||
|
||||
@ -253,8 +270,7 @@ impl RoutingDomainEditorCommonTrait for RoutingDomainEditorPublicInternet {
|
||||
fn shutdown(&mut self) -> SendPinBoxFutureLifetime<'_, ()> {
|
||||
Box::pin(async move {
|
||||
self.clear_dial_info_details(None, None)
|
||||
.set_network_class(None)
|
||||
.clear_relay_node()
|
||||
.set_relay_node(None)
|
||||
.commit(true)
|
||||
.await;
|
||||
self.routing_table
|
||||
|
@ -51,6 +51,9 @@ impl RoutingDomainDetail for PublicInternetRoutingDomainDetail {
|
||||
fn capabilities(&self) -> Vec<Capability> {
|
||||
self.common.capabilities()
|
||||
}
|
||||
fn requires_relay(&self) -> Option<RelayKind> {
|
||||
self.common.requires_relay()
|
||||
}
|
||||
fn relay_node(&self) -> Option<FilteredNodeRef> {
|
||||
self.common.relay_node()
|
||||
}
|
||||
@ -60,9 +63,6 @@ impl RoutingDomainDetail for PublicInternetRoutingDomainDetail {
|
||||
fn dial_info_details(&self) -> &Vec<DialInfoDetail> {
|
||||
self.common.dial_info_details()
|
||||
}
|
||||
fn has_valid_network_class(&self) -> bool {
|
||||
self.common.has_valid_network_class()
|
||||
}
|
||||
|
||||
fn inbound_dial_info_filter(&self) -> DialInfoFilter {
|
||||
self.common.inbound_dial_info_filter()
|
||||
@ -90,6 +90,7 @@ impl RoutingDomainDetail for PublicInternetRoutingDomainDetail {
|
||||
}
|
||||
|
||||
fn publish_peer_info(&self, rti: &RoutingTableInner) -> bool {
|
||||
let peer_info = {
|
||||
let pi = self.get_peer_info(rti);
|
||||
|
||||
// If the network class is not yet determined, don't publish
|
||||
@ -99,7 +100,7 @@ impl RoutingDomainDetail for PublicInternetRoutingDomainDetail {
|
||||
}
|
||||
|
||||
// If we need a relay and we don't have one, don't publish yet
|
||||
if let Some(_relay_kind) = pi.signed_node_info().node_info().requires_relay() {
|
||||
if let Some(_relay_kind) = self.requires_relay() {
|
||||
if pi.signed_node_info().relay_ids().is_empty() {
|
||||
log_rtab!(debug "[PublicInternet] Not publishing peer info that wants relay until we have a relay");
|
||||
return false;
|
||||
@ -115,8 +116,15 @@ impl RoutingDomainDetail for PublicInternetRoutingDomainDetail {
|
||||
}
|
||||
}
|
||||
|
||||
log_rtab!(debug "[PublicInternet] Published new peer info: {:#?}", pi);
|
||||
*ppi_lock = Some(pi);
|
||||
log_rtab!(debug "[PublicInternet] Published new peer info: {}", pi);
|
||||
*ppi_lock = Some(pi.clone());
|
||||
|
||||
pi
|
||||
};
|
||||
|
||||
rti.unlocked_inner
|
||||
.network_manager()
|
||||
.report_peer_info_change(peer_info);
|
||||
|
||||
true
|
||||
}
|
||||
@ -366,4 +374,8 @@ impl RoutingDomainDetail for PublicInternetRoutingDomainDetail {
|
||||
|
||||
ContactMethod::Unreachable
|
||||
}
|
||||
|
||||
fn set_relay_node_last_keepalive(&mut self, ts: Option<Timestamp>) {
|
||||
self.common.set_relay_node_last_keepalive(ts);
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,4 @@
|
||||
use crate::*;
|
||||
use alloc::collections::VecDeque;
|
||||
use super::*;
|
||||
|
||||
// Latency entry is per round-trip packet (ping or data)
|
||||
// - Size is number of entries
|
||||
@ -11,6 +10,17 @@ const ROLLING_LATENCIES_SIZE: usize = 10;
|
||||
const ROLLING_TRANSFERS_SIZE: usize = 10;
|
||||
pub const ROLLING_TRANSFERS_INTERVAL_SECS: u32 = 1;
|
||||
|
||||
// State entry is per state reason change
|
||||
// - Size is number of entries
|
||||
const ROLLING_STATE_REASON_SPAN_SIZE: usize = 32;
|
||||
pub const UPDATE_STATE_STATS_INTERVAL_SECS: u32 = 1;
|
||||
|
||||
// Answer entries are in counts per interval
|
||||
// - Size is number of entries
|
||||
// - Interval is number of seconds in each entry
|
||||
const ROLLING_ANSWERS_SIZE: usize = 10;
|
||||
pub const ROLLING_ANSWER_INTERVAL_SECS: u32 = 60;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||
pub struct TransferCount {
|
||||
down: ByteCount,
|
||||
@ -73,9 +83,11 @@ impl TransferStatsAccounting {
|
||||
transfer_stats.up.average += bpsu;
|
||||
}
|
||||
let len = self.rolling_transfers.len() as u64;
|
||||
if len > 0 {
|
||||
transfer_stats.down.average /= len;
|
||||
transfer_stats.up.average /= len;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
@ -90,7 +102,7 @@ impl LatencyStatsAccounting {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn record_latency(&mut self, latency: TimestampDuration) -> veilid_api::LatencyStats {
|
||||
pub fn record_latency(&mut self, latency: TimestampDuration) -> LatencyStats {
|
||||
while self.rolling_latencies.len() >= ROLLING_LATENCIES_SIZE {
|
||||
self.rolling_latencies.pop_front();
|
||||
}
|
||||
@ -107,8 +119,274 @@ impl LatencyStatsAccounting {
|
||||
ls.average += *rl;
|
||||
}
|
||||
let len = self.rolling_latencies.len() as u64;
|
||||
if len > 0 {
|
||||
ls.average /= len;
|
||||
}
|
||||
|
||||
ls
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub struct StateReasonSpan {
|
||||
state_reason: BucketEntryStateReason,
|
||||
enter_ts: Timestamp,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub struct StateSpan {
|
||||
state: BucketEntryState,
|
||||
enter_ts: Timestamp,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct StateStatsAccounting {
|
||||
rolling_state_reason_spans: VecDeque<StateReasonSpan>,
|
||||
last_stats: Option<StateStats>,
|
||||
}
|
||||
|
||||
impl StateStatsAccounting {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
rolling_state_reason_spans: VecDeque::new(),
|
||||
last_stats: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn make_stats(&self, cur_ts: Timestamp) -> StateStats {
|
||||
let mut ss = StateStats::default();
|
||||
let srs = &mut ss.reason;
|
||||
|
||||
let mut last_ts = cur_ts;
|
||||
for rss in self.rolling_state_reason_spans.iter().rev() {
|
||||
let span_dur = last_ts.saturating_sub(rss.enter_ts);
|
||||
|
||||
match BucketEntryState::from(rss.state_reason) {
|
||||
BucketEntryState::Punished => ss.punished += span_dur,
|
||||
BucketEntryState::Dead => ss.dead += span_dur,
|
||||
BucketEntryState::Unreliable => ss.unreliable += span_dur,
|
||||
BucketEntryState::Reliable => ss.reliable += span_dur,
|
||||
}
|
||||
match rss.state_reason {
|
||||
BucketEntryStateReason::Punished(_) => {
|
||||
// Ignore punished nodes for now
|
||||
}
|
||||
BucketEntryStateReason::Dead(bucket_entry_dead_reason) => {
|
||||
match bucket_entry_dead_reason {
|
||||
BucketEntryDeadReason::CanNotSend => srs.can_not_send += span_dur,
|
||||
BucketEntryDeadReason::TooManyLostAnswers => {
|
||||
srs.too_many_lost_answers += span_dur
|
||||
}
|
||||
BucketEntryDeadReason::NoPingResponse => srs.no_ping_response += span_dur,
|
||||
}
|
||||
}
|
||||
BucketEntryStateReason::Unreliable(bucket_entry_unreliable_reason) => {
|
||||
match bucket_entry_unreliable_reason {
|
||||
BucketEntryUnreliableReason::FailedToSend => srs.failed_to_send += span_dur,
|
||||
BucketEntryUnreliableReason::LostAnswers => srs.lost_answers += span_dur,
|
||||
BucketEntryUnreliableReason::NotSeenConsecutively => {
|
||||
srs.not_seen_consecutively += span_dur
|
||||
}
|
||||
BucketEntryUnreliableReason::InUnreliablePingSpan => {
|
||||
srs.in_unreliable_ping_span += span_dur
|
||||
}
|
||||
}
|
||||
}
|
||||
BucketEntryStateReason::Reliable => {
|
||||
// Reliable nodes don't have a reason other than lack of unreliability
|
||||
}
|
||||
}
|
||||
|
||||
last_ts = rss.enter_ts;
|
||||
}
|
||||
ss.span = cur_ts.saturating_sub(last_ts);
|
||||
ss
|
||||
}
|
||||
|
||||
pub fn take_stats(&mut self) -> Option<StateStats> {
|
||||
self.last_stats.take()
|
||||
}
|
||||
|
||||
pub fn record_state_reason(&mut self, cur_ts: Timestamp, state_reason: BucketEntryStateReason) {
|
||||
let new_span = if let Some(cur_span) = self.rolling_state_reason_spans.back() {
|
||||
if state_reason != cur_span.state_reason {
|
||||
while self.rolling_state_reason_spans.len() >= ROLLING_STATE_REASON_SPAN_SIZE {
|
||||
self.rolling_state_reason_spans.pop_front();
|
||||
}
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
} else {
|
||||
true
|
||||
};
|
||||
if new_span {
|
||||
self.last_stats = Some(self.make_stats(cur_ts));
|
||||
self.rolling_state_reason_spans.push_back(StateReasonSpan {
|
||||
state_reason,
|
||||
enter_ts: cur_ts,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub struct AnswerSpan {
|
||||
enter_ts: Timestamp,
|
||||
questions: u32,
|
||||
answers: u32,
|
||||
lost_answers: u32,
|
||||
current_consecutive_answers: u32,
|
||||
current_consecutive_lost_answers: u32,
|
||||
consecutive_answers_maximum: u32,
|
||||
consecutive_answers_total: u32,
|
||||
consecutive_answers_count: u32,
|
||||
consecutive_answers_minimum: u32,
|
||||
consecutive_lost_answers_maximum: u32,
|
||||
consecutive_lost_answers_total: u32,
|
||||
consecutive_lost_answers_count: u32,
|
||||
consecutive_lost_answers_minimum: u32,
|
||||
}
|
||||
|
||||
impl AnswerSpan {
|
||||
pub fn new(cur_ts: Timestamp) -> Self {
|
||||
AnswerSpan {
|
||||
enter_ts: cur_ts,
|
||||
questions: 0,
|
||||
answers: 0,
|
||||
lost_answers: 0,
|
||||
current_consecutive_answers: 0,
|
||||
current_consecutive_lost_answers: 0,
|
||||
consecutive_answers_maximum: 0,
|
||||
consecutive_answers_total: 0,
|
||||
consecutive_answers_count: 0,
|
||||
consecutive_answers_minimum: 0,
|
||||
consecutive_lost_answers_maximum: 0,
|
||||
consecutive_lost_answers_total: 0,
|
||||
consecutive_lost_answers_count: 0,
|
||||
consecutive_lost_answers_minimum: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct AnswerStatsAccounting {
|
||||
rolling_answer_spans: VecDeque<AnswerSpan>,
|
||||
}
|
||||
|
||||
impl AnswerStatsAccounting {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
rolling_answer_spans: VecDeque::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn current_span(&mut self, cur_ts: Timestamp) -> &mut AnswerSpan {
|
||||
if self.rolling_answer_spans.is_empty() {
|
||||
self.rolling_answer_spans.push_back(AnswerSpan::new(cur_ts));
|
||||
}
|
||||
self.rolling_answer_spans.front_mut().unwrap()
|
||||
}
|
||||
|
||||
fn make_stats(&self, cur_ts: Timestamp) -> AnswerStats {
|
||||
let mut questions = 0u32;
|
||||
let mut answers = 0u32;
|
||||
let mut lost_answers = 0u32;
|
||||
let mut consecutive_answers_maximum = 0u32;
|
||||
let mut consecutive_answers_average = 0u32;
|
||||
let mut consecutive_answers_minimum = u32::MAX;
|
||||
let mut consecutive_lost_answers_maximum = 0u32;
|
||||
let mut consecutive_lost_answers_average = 0u32;
|
||||
let mut consecutive_lost_answers_minimum = u32::MAX;
|
||||
|
||||
let mut last_ts = cur_ts;
|
||||
for ras in self.rolling_answer_spans.iter().rev() {
|
||||
questions += ras.questions;
|
||||
answers += ras.answers;
|
||||
lost_answers += ras.lost_answers;
|
||||
|
||||
consecutive_answers_maximum.max_assign(ras.consecutive_answers_maximum);
|
||||
consecutive_answers_minimum.min_assign(ras.consecutive_answers_minimum);
|
||||
consecutive_answers_average += if ras.consecutive_answers_total > 0 {
|
||||
ras.consecutive_answers_count / ras.consecutive_answers_total
|
||||
} else {
|
||||
0
|
||||
};
|
||||
|
||||
consecutive_lost_answers_maximum.max_assign(ras.consecutive_lost_answers_maximum);
|
||||
consecutive_lost_answers_minimum.min_assign(ras.consecutive_lost_answers_minimum);
|
||||
consecutive_lost_answers_average += if ras.consecutive_lost_answers_total > 0 {
|
||||
ras.consecutive_lost_answers_count / ras.consecutive_lost_answers_total
|
||||
} else {
|
||||
0
|
||||
};
|
||||
|
||||
last_ts = ras.enter_ts;
|
||||
}
|
||||
|
||||
let len = self.rolling_answer_spans.len() as u32;
|
||||
if len > 0 {
|
||||
consecutive_answers_average /= len;
|
||||
consecutive_lost_answers_average /= len;
|
||||
}
|
||||
|
||||
let span = cur_ts.saturating_sub(last_ts);
|
||||
|
||||
AnswerStats {
|
||||
span,
|
||||
questions,
|
||||
answers,
|
||||
lost_answers,
|
||||
consecutive_answers_maximum,
|
||||
consecutive_answers_average,
|
||||
consecutive_answers_minimum,
|
||||
consecutive_lost_answers_maximum,
|
||||
consecutive_lost_answers_average,
|
||||
consecutive_lost_answers_minimum,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn roll_answers(&mut self, cur_ts: Timestamp) -> AnswerStats {
|
||||
let stats = self.make_stats(cur_ts);
|
||||
|
||||
while self.rolling_answer_spans.len() >= ROLLING_ANSWERS_SIZE {
|
||||
self.rolling_answer_spans.pop_front();
|
||||
}
|
||||
self.rolling_answer_spans.push_back(AnswerSpan::new(cur_ts));
|
||||
|
||||
stats
|
||||
}
|
||||
|
||||
pub fn record_question(&mut self, cur_ts: Timestamp) {
|
||||
let cas = self.current_span(cur_ts);
|
||||
cas.questions += 1;
|
||||
}
|
||||
pub fn record_answer(&mut self, cur_ts: Timestamp) {
|
||||
let cas = self.current_span(cur_ts);
|
||||
cas.answers += 1;
|
||||
if cas.current_consecutive_lost_answers > 0 {
|
||||
cas.consecutive_lost_answers_maximum
|
||||
.max_assign(cas.current_consecutive_lost_answers);
|
||||
cas.consecutive_lost_answers_minimum
|
||||
.min_assign(cas.current_consecutive_lost_answers);
|
||||
cas.consecutive_lost_answers_total += cas.current_consecutive_lost_answers;
|
||||
cas.consecutive_lost_answers_count += 1;
|
||||
cas.current_consecutive_lost_answers = 0;
|
||||
}
|
||||
cas.current_consecutive_answers = 1;
|
||||
}
|
||||
pub fn record_lost_answer(&mut self, cur_ts: Timestamp) {
|
||||
let cas = self.current_span(cur_ts);
|
||||
cas.lost_answers += 1;
|
||||
if cas.current_consecutive_answers > 0 {
|
||||
cas.consecutive_answers_maximum
|
||||
.max_assign(cas.current_consecutive_answers);
|
||||
cas.consecutive_answers_minimum
|
||||
.min_assign(cas.current_consecutive_answers);
|
||||
cas.consecutive_answers_total += cas.current_consecutive_answers;
|
||||
cas.consecutive_answers_count += 1;
|
||||
cas.current_consecutive_answers = 0;
|
||||
}
|
||||
cas.current_consecutive_lost_answers = 1;
|
||||
}
|
||||
}
|
||||
|
@ -404,7 +404,17 @@ impl RoutingTable {
|
||||
peer_map.into_values().collect()
|
||||
} else {
|
||||
// If not direct, resolve bootstrap servers and recurse their TXT entries
|
||||
let bsrecs = self.resolve_bootstrap(bootstrap).await?;
|
||||
let bsrecs = match self
|
||||
.resolve_bootstrap(bootstrap)
|
||||
.timeout_at(stop_token.clone())
|
||||
.await
|
||||
{
|
||||
Ok(v) => v?,
|
||||
Err(_) => {
|
||||
// Stop requested
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
let peers: Vec<Arc<PeerInfo>> = bsrecs
|
||||
.into_iter()
|
||||
.map(|bsrec| {
|
||||
|
@ -5,7 +5,7 @@ pub mod peer_minimum_refresh;
|
||||
pub mod ping_validator;
|
||||
pub mod private_route_management;
|
||||
pub mod relay_management;
|
||||
pub mod rolling_transfers;
|
||||
pub mod update_statistics;
|
||||
|
||||
use super::*;
|
||||
|
||||
@ -25,6 +25,34 @@ impl RoutingTable {
|
||||
});
|
||||
}
|
||||
|
||||
// Set update state stats tick task
|
||||
{
|
||||
let this = self.clone();
|
||||
self.unlocked_inner
|
||||
.update_state_stats_task
|
||||
.set_routine(move |s, l, t| {
|
||||
Box::pin(this.clone().update_state_stats_task_routine(
|
||||
s,
|
||||
Timestamp::new(l),
|
||||
Timestamp::new(t),
|
||||
))
|
||||
});
|
||||
}
|
||||
|
||||
// Set rolling answers tick task
|
||||
{
|
||||
let this = self.clone();
|
||||
self.unlocked_inner
|
||||
.rolling_answers_task
|
||||
.set_routine(move |s, l, t| {
|
||||
Box::pin(this.clone().rolling_answers_task_routine(
|
||||
s,
|
||||
Timestamp::new(l),
|
||||
Timestamp::new(t),
|
||||
))
|
||||
});
|
||||
}
|
||||
|
||||
// Set kick buckets tick task
|
||||
{
|
||||
let this = self.clone();
|
||||
@ -67,13 +95,58 @@ impl RoutingTable {
|
||||
});
|
||||
}
|
||||
|
||||
// Set ping validator tick task
|
||||
// Set ping validator PublicInternet tick task
|
||||
{
|
||||
let this = self.clone();
|
||||
self.unlocked_inner
|
||||
.ping_validator_task
|
||||
.ping_validator_public_internet_task
|
||||
.set_routine(move |s, l, t| {
|
||||
Box::pin(this.clone().ping_validator_task_routine(
|
||||
Box::pin(this.clone().ping_validator_public_internet_task_routine(
|
||||
s,
|
||||
Timestamp::new(l),
|
||||
Timestamp::new(t),
|
||||
))
|
||||
});
|
||||
}
|
||||
|
||||
// Set ping validator LocalNetwork tick task
|
||||
{
|
||||
let this = self.clone();
|
||||
self.unlocked_inner
|
||||
.ping_validator_local_network_task
|
||||
.set_routine(move |s, l, t| {
|
||||
Box::pin(this.clone().ping_validator_local_network_task_routine(
|
||||
s,
|
||||
Timestamp::new(l),
|
||||
Timestamp::new(t),
|
||||
))
|
||||
});
|
||||
}
|
||||
|
||||
// Set ping validator PublicInternet Relay tick task
|
||||
{
|
||||
let this = self.clone();
|
||||
self.unlocked_inner
|
||||
.ping_validator_public_internet_relay_task
|
||||
.set_routine(move |s, l, t| {
|
||||
Box::pin(
|
||||
this.clone()
|
||||
.ping_validator_public_internet_relay_task_routine(
|
||||
s,
|
||||
Timestamp::new(l),
|
||||
Timestamp::new(t),
|
||||
),
|
||||
)
|
||||
});
|
||||
}
|
||||
|
||||
// Set ping validator Active Watch tick task
|
||||
{
|
||||
let this = self.clone();
|
||||
self.unlocked_inner
|
||||
.ping_validator_active_watch_task
|
||||
.set_routine(move |s, l, t| {
|
||||
Box::pin(this.clone().ping_validator_active_watch_task_routine(
|
||||
s,
|
||||
Timestamp::new(l),
|
||||
Timestamp::new(t),
|
||||
@ -126,6 +199,12 @@ impl RoutingTable {
|
||||
// Do rolling transfers every ROLLING_TRANSFERS_INTERVAL_SECS secs
|
||||
self.unlocked_inner.rolling_transfers_task.tick().await?;
|
||||
|
||||
// Do state stats update every UPDATE_STATE_STATS_INTERVAL_SECS secs
|
||||
self.unlocked_inner.update_state_stats_task.tick().await?;
|
||||
|
||||
// Do rolling answers every ROLLING_ANSWER_INTERVAL_SECS secs
|
||||
self.unlocked_inner.rolling_answers_task.tick().await?;
|
||||
|
||||
// Kick buckets task
|
||||
let kick_bucket_queue_count = self.unlocked_inner.kick_queue.lock().len();
|
||||
if kick_bucket_queue_count > 0 {
|
||||
@ -165,22 +244,29 @@ impl RoutingTable {
|
||||
}
|
||||
|
||||
// Ping validate some nodes to groom the table
|
||||
self.unlocked_inner.ping_validator_task.tick().await?;
|
||||
self.unlocked_inner
|
||||
.ping_validator_public_internet_task
|
||||
.tick()
|
||||
.await?;
|
||||
self.unlocked_inner
|
||||
.ping_validator_local_network_task
|
||||
.tick()
|
||||
.await?;
|
||||
self.unlocked_inner
|
||||
.ping_validator_public_internet_relay_task
|
||||
.tick()
|
||||
.await?;
|
||||
self.unlocked_inner
|
||||
.ping_validator_active_watch_task
|
||||
.tick()
|
||||
.await?;
|
||||
|
||||
// Run the relay management task
|
||||
self.unlocked_inner.relay_management_task.tick().await?;
|
||||
|
||||
// Only perform these operations if we already have a valid network class
|
||||
// and if we didn't need to bootstrap or perform a peer minimum refresh as these operations
|
||||
// require having a suitably full routing table and guaranteed ability to contact other nodes
|
||||
if !needs_bootstrap
|
||||
&& !needs_peer_minimum_refresh
|
||||
&& self.has_valid_network_class(RoutingDomain::PublicInternet)
|
||||
{
|
||||
// Get more nodes if we need to
|
||||
if !needs_bootstrap && !needs_peer_minimum_refresh {
|
||||
// Run closest peers refresh task
|
||||
// this will also inform other close nodes of -our- existence so we would
|
||||
// much rather perform this action -after- we have a valid network class
|
||||
// so our PeerInfo is valid when informing the other nodes of our existence.
|
||||
self.unlocked_inner
|
||||
.closest_peers_refresh_task
|
||||
.tick()
|
||||
@ -212,6 +298,14 @@ impl RoutingTable {
|
||||
if let Err(e) = self.unlocked_inner.rolling_transfers_task.stop().await {
|
||||
error!("rolling_transfers_task not stopped: {}", e);
|
||||
}
|
||||
log_rtab!(debug "stopping update state stats task");
|
||||
if let Err(e) = self.unlocked_inner.update_state_stats_task.stop().await {
|
||||
error!("update_state_stats_task not stopped: {}", e);
|
||||
}
|
||||
log_rtab!(debug "stopping rolling answers task");
|
||||
if let Err(e) = self.unlocked_inner.rolling_answers_task.stop().await {
|
||||
error!("rolling_answers_task not stopped: {}", e);
|
||||
}
|
||||
log_rtab!(debug "stopping kick buckets task");
|
||||
if let Err(e) = self.unlocked_inner.kick_buckets_task.stop().await {
|
||||
error!("kick_buckets_task not stopped: {}", e);
|
||||
@ -224,10 +318,44 @@ impl RoutingTable {
|
||||
if let Err(e) = self.unlocked_inner.peer_minimum_refresh_task.stop().await {
|
||||
error!("peer_minimum_refresh_task not stopped: {}", e);
|
||||
}
|
||||
log_rtab!(debug "stopping ping_validator task");
|
||||
if let Err(e) = self.unlocked_inner.ping_validator_task.stop().await {
|
||||
error!("ping_validator_task not stopped: {}", e);
|
||||
|
||||
log_rtab!(debug "stopping ping_validator tasks");
|
||||
if let Err(e) = self
|
||||
.unlocked_inner
|
||||
.ping_validator_public_internet_task
|
||||
.stop()
|
||||
.await
|
||||
{
|
||||
error!("ping_validator_public_internet_task not stopped: {}", e);
|
||||
}
|
||||
if let Err(e) = self
|
||||
.unlocked_inner
|
||||
.ping_validator_local_network_task
|
||||
.stop()
|
||||
.await
|
||||
{
|
||||
error!("ping_validator_local_network_task not stopped: {}", e);
|
||||
}
|
||||
if let Err(e) = self
|
||||
.unlocked_inner
|
||||
.ping_validator_public_internet_relay_task
|
||||
.stop()
|
||||
.await
|
||||
{
|
||||
error!(
|
||||
"ping_validator_public_internet_relay_task not stopped: {}",
|
||||
e
|
||||
);
|
||||
}
|
||||
if let Err(e) = self
|
||||
.unlocked_inner
|
||||
.ping_validator_active_watch_task
|
||||
.stop()
|
||||
.await
|
||||
{
|
||||
error!("ping_validator_active_watch_task not stopped: {}", e);
|
||||
}
|
||||
|
||||
log_rtab!(debug "stopping relay management task");
|
||||
if let Err(e) = self.unlocked_inner.relay_management_task.stop().await {
|
||||
warn!("relay_management_task not stopped: {}", e);
|
||||
|
@ -7,17 +7,92 @@ const RELAY_KEEPALIVE_PING_INTERVAL_SECS: u32 = 10;
|
||||
/// Keepalive pings are done for active watch nodes to make sure they are still there
|
||||
const ACTIVE_WATCH_KEEPALIVE_PING_INTERVAL_SECS: u32 = 10;
|
||||
|
||||
/// Ping queue processing depth
|
||||
const MAX_PARALLEL_PINGS: usize = 16;
|
||||
/// Ping queue processing depth per validator
|
||||
const MAX_PARALLEL_PINGS: usize = 8;
|
||||
|
||||
use futures_util::stream::{FuturesUnordered, StreamExt};
|
||||
use futures_util::FutureExt;
|
||||
use stop_token::future::FutureExt as StopFutureExt;
|
||||
|
||||
type PingValidatorFuture =
|
||||
SendPinBoxFuture<Result<NetworkResult<Answer<Option<SenderInfo>>>, RPCError>>;
|
||||
type PingValidatorFuture = SendPinBoxFuture<Result<(), RPCError>>;
|
||||
|
||||
impl RoutingTable {
|
||||
// Task routine for PublicInternet status pings
|
||||
#[instrument(level = "trace", skip(self), err)]
|
||||
pub(crate) async fn ping_validator_public_internet_task_routine(
|
||||
self,
|
||||
stop_token: StopToken,
|
||||
_last_ts: Timestamp,
|
||||
cur_ts: Timestamp,
|
||||
) -> EyreResult<()> {
|
||||
let mut future_queue: VecDeque<PingValidatorFuture> = VecDeque::new();
|
||||
|
||||
self.ping_validator_public_internet(cur_ts, &mut future_queue)
|
||||
.await?;
|
||||
|
||||
self.process_ping_validation_queue("PublicInternet", stop_token, cur_ts, future_queue)
|
||||
.await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Task routine for LocalNetwork status pings
|
||||
#[instrument(level = "trace", skip(self), err)]
|
||||
pub(crate) async fn ping_validator_local_network_task_routine(
|
||||
self,
|
||||
stop_token: StopToken,
|
||||
_last_ts: Timestamp,
|
||||
cur_ts: Timestamp,
|
||||
) -> EyreResult<()> {
|
||||
let mut future_queue: VecDeque<PingValidatorFuture> = VecDeque::new();
|
||||
|
||||
self.ping_validator_local_network(cur_ts, &mut future_queue)
|
||||
.await?;
|
||||
|
||||
self.process_ping_validation_queue("LocalNetwork", stop_token, cur_ts, future_queue)
|
||||
.await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Task routine for PublicInternet relay keepalive pings
|
||||
#[instrument(level = "trace", skip(self), err)]
|
||||
pub(crate) async fn ping_validator_public_internet_relay_task_routine(
|
||||
self,
|
||||
stop_token: StopToken,
|
||||
_last_ts: Timestamp,
|
||||
cur_ts: Timestamp,
|
||||
) -> EyreResult<()> {
|
||||
let mut future_queue: VecDeque<PingValidatorFuture> = VecDeque::new();
|
||||
|
||||
self.relay_keepalive_public_internet(cur_ts, &mut future_queue)
|
||||
.await?;
|
||||
|
||||
self.process_ping_validation_queue("RelayKeepalive", stop_token, cur_ts, future_queue)
|
||||
.await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Task routine for active watch keepalive pings
|
||||
#[instrument(level = "trace", skip(self), err)]
|
||||
pub(crate) async fn ping_validator_active_watch_task_routine(
|
||||
self,
|
||||
stop_token: StopToken,
|
||||
_last_ts: Timestamp,
|
||||
cur_ts: Timestamp,
|
||||
) -> EyreResult<()> {
|
||||
let mut future_queue: VecDeque<PingValidatorFuture> = VecDeque::new();
|
||||
|
||||
self.active_watches_keepalive_public_internet(cur_ts, &mut future_queue)
|
||||
.await?;
|
||||
|
||||
self.process_ping_validation_queue("WatchKeepalive", stop_token, cur_ts, future_queue)
|
||||
.await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Ping the relay to keep it alive, over every protocol it is relaying for us
|
||||
#[instrument(level = "trace", skip(self, futurequeue), err)]
|
||||
async fn relay_keepalive_public_internet(
|
||||
@ -49,10 +124,9 @@ impl RoutingTable {
|
||||
return Ok(());
|
||||
}
|
||||
// Say we're doing this keepalive now
|
||||
self.edit_public_internet_routing_domain()
|
||||
.set_relay_node_keepalive(Some(cur_ts))
|
||||
.commit(false)
|
||||
.await;
|
||||
self.inner
|
||||
.write()
|
||||
.set_relay_node_last_keepalive(RoutingDomain::PublicInternet, cur_ts);
|
||||
|
||||
// We need to keep-alive at one connection per ordering for relays
|
||||
// but also one per NAT mapping that we need to keep open for our inbound dial info
|
||||
@ -107,13 +181,13 @@ impl RoutingTable {
|
||||
|
||||
for relay_nr_filtered in relay_noderefs {
|
||||
let rpc = rpc.clone();
|
||||
|
||||
log_rtab!("--> Keepalive ping to {:?}", relay_nr_filtered);
|
||||
|
||||
futurequeue.push_back(
|
||||
async move {
|
||||
rpc.rpc_call_status(Destination::direct(relay_nr_filtered))
|
||||
.await
|
||||
log_rtab!("--> PublicInternet Relay ping to {:?}", relay_nr_filtered);
|
||||
let _ = rpc
|
||||
.rpc_call_status(Destination::direct(relay_nr_filtered))
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
.boxed(),
|
||||
);
|
||||
@ -151,17 +225,15 @@ impl RoutingTable {
|
||||
|
||||
// Get all the active watches from the storage manager
|
||||
let storage_manager = self.unlocked_inner.network_manager.storage_manager();
|
||||
let watch_node_refs = storage_manager.get_active_watch_nodes().await;
|
||||
let watch_destinations = storage_manager.get_active_watch_nodes().await;
|
||||
|
||||
for watch_nr in watch_node_refs {
|
||||
for watch_destination in watch_destinations {
|
||||
let rpc = rpc.clone();
|
||||
|
||||
log_rtab!("--> Watch ping to {:?}", watch_nr);
|
||||
|
||||
futurequeue.push_back(
|
||||
async move {
|
||||
rpc.rpc_call_status(Destination::direct(watch_nr.default_filtered()))
|
||||
.await
|
||||
log_rtab!("--> Watch Keepalive ping to {:?}", watch_destination);
|
||||
let _ = rpc.rpc_call_status(watch_destination).await?;
|
||||
Ok(())
|
||||
}
|
||||
.boxed(),
|
||||
);
|
||||
@ -182,20 +254,19 @@ impl RoutingTable {
|
||||
// Get all nodes needing pings in the PublicInternet routing domain
|
||||
let node_refs = self.get_nodes_needing_ping(RoutingDomain::PublicInternet, cur_ts);
|
||||
|
||||
// If we have a relay, let's ping for NAT keepalives
|
||||
self.relay_keepalive_public_internet(cur_ts, futurequeue)
|
||||
.await?;
|
||||
|
||||
// Check active watch keepalives
|
||||
self.active_watches_keepalive_public_internet(cur_ts, futurequeue)
|
||||
.await?;
|
||||
|
||||
// Just do a single ping with the best protocol for all the other nodes to check for liveness
|
||||
for nr in node_refs {
|
||||
let nr = nr.sequencing_clone(Sequencing::PreferOrdered);
|
||||
|
||||
let rpc = rpc.clone();
|
||||
log_rtab!("--> Validator ping to {:?}", nr);
|
||||
futurequeue.push_back(
|
||||
async move { rpc.rpc_call_status(Destination::direct(nr)).await }.boxed(),
|
||||
async move {
|
||||
#[cfg(feature = "verbose-tracing")]
|
||||
log_rtab!(debug "--> PublicInternet Validator ping to {:?}", nr);
|
||||
let _ = rpc.rpc_call_status(Destination::direct(nr)).await?;
|
||||
Ok(())
|
||||
}
|
||||
.boxed(),
|
||||
);
|
||||
}
|
||||
|
||||
@ -215,76 +286,58 @@ impl RoutingTable {
|
||||
// Get all nodes needing pings in the LocalNetwork routing domain
|
||||
let node_refs = self.get_nodes_needing_ping(RoutingDomain::LocalNetwork, cur_ts);
|
||||
|
||||
// For all nodes needing pings, figure out how many and over what protocols
|
||||
// Just do a single ping with the best protocol for all the other nodes to check for liveness
|
||||
for nr in node_refs {
|
||||
let nr = nr.sequencing_clone(Sequencing::PreferOrdered);
|
||||
|
||||
let rpc = rpc.clone();
|
||||
|
||||
// Just do a single ping with the best protocol for all the nodes
|
||||
futurequeue.push_back(
|
||||
async move { rpc.rpc_call_status(Destination::direct(nr)).await }.boxed(),
|
||||
async move {
|
||||
#[cfg(feature = "verbose-tracing")]
|
||||
log_rtab!(debug "--> LocalNetwork Validator ping to {:?}", nr);
|
||||
let _ = rpc.rpc_call_status(Destination::direct(nr)).await?;
|
||||
Ok(())
|
||||
}
|
||||
.boxed(),
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Ping each node in the routing table if they need to be pinged
|
||||
// to determine their reliability
|
||||
#[instrument(level = "trace", skip(self), err)]
|
||||
pub(crate) async fn ping_validator_task_routine(
|
||||
self,
|
||||
// Common handler for running ping validations in a batch
|
||||
async fn process_ping_validation_queue(
|
||||
&self,
|
||||
name: &str,
|
||||
stop_token: StopToken,
|
||||
_last_ts: Timestamp,
|
||||
cur_ts: Timestamp,
|
||||
) -> EyreResult<()> {
|
||||
let mut futurequeue: VecDeque<PingValidatorFuture> = VecDeque::new();
|
||||
future_queue: VecDeque<PingValidatorFuture>,
|
||||
) {
|
||||
let count = future_queue.len();
|
||||
if count == 0 {
|
||||
return;
|
||||
}
|
||||
log_rtab!(debug "[{}] Ping validation queue: {} remaining", name, count);
|
||||
|
||||
// PublicInternet
|
||||
self.ping_validator_public_internet(cur_ts, &mut futurequeue)
|
||||
.await?;
|
||||
|
||||
// LocalNetwork
|
||||
self.ping_validator_local_network(cur_ts, &mut futurequeue)
|
||||
.await?;
|
||||
|
||||
// Wait for ping futures to complete in parallel
|
||||
let mut unord = FuturesUnordered::new();
|
||||
|
||||
while !unord.is_empty() || !futurequeue.is_empty() {
|
||||
log_rtab!(
|
||||
"Ping validation queue: {} remaining, {} in progress",
|
||||
futurequeue.len(),
|
||||
unord.len()
|
||||
let atomic_count = AtomicUsize::new(count);
|
||||
process_batched_future_queue(future_queue, MAX_PARALLEL_PINGS, stop_token, |res| async {
|
||||
if let Err(e) = res {
|
||||
log_rtab!(error "[{}] Error performing status ping: {}", name, e);
|
||||
}
|
||||
let remaining = atomic_count.fetch_sub(1, Ordering::AcqRel) - 1;
|
||||
if remaining > 0 {
|
||||
log_rtab!(debug "[{}] Ping validation queue: {} remaining", name, remaining);
|
||||
}
|
||||
})
|
||||
.await;
|
||||
let done_ts = Timestamp::now();
|
||||
log_rtab!(debug
|
||||
"[{}] Ping validation queue finished {} pings in {}",
|
||||
name,
|
||||
count,
|
||||
done_ts - cur_ts
|
||||
);
|
||||
|
||||
// Process one unordered futures if we have some
|
||||
match unord
|
||||
.next()
|
||||
.timeout_at(stop_token.clone())
|
||||
.in_current_span()
|
||||
.await
|
||||
{
|
||||
Ok(Some(_)) => {
|
||||
// Some ping completed
|
||||
}
|
||||
Ok(None) => {
|
||||
// We're empty
|
||||
}
|
||||
Err(_) => {
|
||||
// Timeout means we drop the rest because we were asked to stop
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Fill unord up to max parallelism
|
||||
while unord.len() < MAX_PARALLEL_PINGS {
|
||||
let Some(fq) = futurequeue.pop_front() else {
|
||||
break;
|
||||
};
|
||||
unord.push(fq);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -2,6 +2,7 @@ use super::*;
|
||||
|
||||
use futures_util::stream::{FuturesUnordered, StreamExt};
|
||||
use futures_util::FutureExt;
|
||||
use stop_token::future::FutureExt as _;
|
||||
|
||||
const BACKGROUND_SAFETY_ROUTE_COUNT: usize = 2;
|
||||
|
||||
@ -58,12 +59,12 @@ impl RoutingTable {
|
||||
}
|
||||
// If this has been published, always test if we need it
|
||||
// Also if the route has never been tested, test it at least once
|
||||
if v.is_published() || stats.last_tested_ts.is_none() {
|
||||
if v.is_published() || stats.last_known_valid_ts.is_none() {
|
||||
must_test_routes.push(*k);
|
||||
}
|
||||
// If this is a default route hop length, include it in routes to keep alive
|
||||
else if v.hop_count() == default_route_hop_count {
|
||||
unpublished_routes.push((*k, stats.latency_stats.average.as_u64()));
|
||||
unpublished_routes.push((*k, stats.latency.average.as_u64()));
|
||||
}
|
||||
// Else this is a route that hasnt been used recently enough and we can tear it down
|
||||
else {
|
||||
@ -102,10 +103,10 @@ impl RoutingTable {
|
||||
}
|
||||
|
||||
/// Test set of routes and remove the ones that don't test clean
|
||||
#[instrument(level = "trace", skip(self, _stop_token), err)]
|
||||
#[instrument(level = "trace", skip(self, stop_token), err)]
|
||||
async fn test_route_set(
|
||||
&self,
|
||||
_stop_token: StopToken,
|
||||
stop_token: StopToken,
|
||||
routes_needing_testing: Vec<RouteId>,
|
||||
) -> EyreResult<()> {
|
||||
if routes_needing_testing.is_empty() {
|
||||
@ -152,7 +153,7 @@ impl RoutingTable {
|
||||
}
|
||||
|
||||
// Wait for test_route futures to complete in parallel
|
||||
while unord.next().await.is_some() {}
|
||||
while let Ok(Some(())) = unord.next().timeout_at(stop_token.clone()).await {}
|
||||
}
|
||||
|
||||
// Process failed routes
|
||||
|
@ -14,7 +14,11 @@ impl RoutingTable {
|
||||
}
|
||||
|
||||
// If we -need- a relay always request one
|
||||
if let Some(rk) = own_node_info.requires_relay() {
|
||||
let requires_relay = self
|
||||
.inner
|
||||
.read()
|
||||
.with_routing_domain(RoutingDomain::PublicInternet, |rdd| rdd.requires_relay());
|
||||
if let Some(rk) = requires_relay {
|
||||
return Some(rk);
|
||||
}
|
||||
|
||||
@ -71,7 +75,7 @@ impl RoutingTable {
|
||||
BucketEntryStateReason::Dead(_) | BucketEntryStateReason::Punished(_)
|
||||
) {
|
||||
log_rtab!(debug "Relay node is now {:?}, dropping relay {}", state_reason, relay_node);
|
||||
editor.clear_relay_node();
|
||||
editor.set_relay_node(None);
|
||||
false
|
||||
}
|
||||
// Relay node no longer can relay
|
||||
@ -80,7 +84,7 @@ impl RoutingTable {
|
||||
"Relay node can no longer relay, dropping relay {}",
|
||||
relay_node
|
||||
);
|
||||
editor.clear_relay_node();
|
||||
editor.set_relay_node(None);
|
||||
false
|
||||
}
|
||||
// Relay node is no longer wanted
|
||||
@ -89,7 +93,7 @@ impl RoutingTable {
|
||||
"Relay node no longer desired, dropping relay {}",
|
||||
relay_node
|
||||
);
|
||||
editor.clear_relay_node();
|
||||
editor.set_relay_node(None);
|
||||
false
|
||||
} else {
|
||||
true
|
||||
@ -114,7 +118,7 @@ impl RoutingTable {
|
||||
match self.register_node_with_peer_info(outbound_relay_peerinfo, false) {
|
||||
Ok(nr) => {
|
||||
log_rtab!(debug "Outbound relay node selected: {}", nr);
|
||||
editor.set_relay_node(nr.unfiltered());
|
||||
editor.set_relay_node(Some(nr.unfiltered()));
|
||||
got_outbound_relay = true;
|
||||
}
|
||||
Err(e) => {
|
||||
@ -133,7 +137,7 @@ impl RoutingTable {
|
||||
relay_node_filter,
|
||||
) {
|
||||
log_rtab!(debug "Inbound relay node selected: {}", nr);
|
||||
editor.set_relay_node(nr);
|
||||
editor.set_relay_node(Some(nr));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -142,10 +146,6 @@ impl RoutingTable {
|
||||
if editor.commit(false).await {
|
||||
// Try to publish the peer info
|
||||
editor.publish();
|
||||
|
||||
self.network_manager()
|
||||
.connection_manager()
|
||||
.update_protections();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
@ -1,36 +0,0 @@
|
||||
use super::*;
|
||||
|
||||
impl RoutingTable {
|
||||
// Compute transfer statistics to determine how 'fast' a node is
|
||||
#[instrument(level = "trace", skip(self), err)]
|
||||
pub(crate) async fn rolling_transfers_task_routine(
|
||||
self,
|
||||
_stop_token: StopToken,
|
||||
last_ts: Timestamp,
|
||||
cur_ts: Timestamp,
|
||||
) -> EyreResult<()> {
|
||||
// log_rtab!("--- rolling_transfers task");
|
||||
{
|
||||
let inner = &mut *self.inner.write();
|
||||
|
||||
// Roll our own node's transfers
|
||||
inner.self_transfer_stats_accounting.roll_transfers(
|
||||
last_ts,
|
||||
cur_ts,
|
||||
&mut inner.self_transfer_stats,
|
||||
);
|
||||
|
||||
// Roll all bucket entry transfers
|
||||
let all_entries: Vec<Arc<BucketEntry>> = inner.all_entries.iter().collect();
|
||||
for entry in all_entries {
|
||||
entry.with_mut(inner, |_rti, e| e.roll_transfers(last_ts, cur_ts));
|
||||
}
|
||||
}
|
||||
|
||||
// Roll all route transfers
|
||||
let rss = self.route_spec_store();
|
||||
rss.roll_transfers(last_ts, cur_ts);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
81
veilid-core/src/routing_table/tasks/update_statistics.rs
Normal file
81
veilid-core/src/routing_table/tasks/update_statistics.rs
Normal file
@ -0,0 +1,81 @@
|
||||
use super::*;
|
||||
|
||||
impl RoutingTable {
|
||||
// Compute transfer statistics to determine how 'fast' a node is
|
||||
#[instrument(level = "trace", skip(self), err)]
|
||||
pub(crate) async fn rolling_transfers_task_routine(
|
||||
self,
|
||||
_stop_token: StopToken,
|
||||
last_ts: Timestamp,
|
||||
cur_ts: Timestamp,
|
||||
) -> EyreResult<()> {
|
||||
{
|
||||
let inner = &mut *self.inner.write();
|
||||
|
||||
// Roll our own node's transfers
|
||||
inner.self_transfer_stats_accounting.roll_transfers(
|
||||
last_ts,
|
||||
cur_ts,
|
||||
&mut inner.self_transfer_stats,
|
||||
);
|
||||
|
||||
// Roll all bucket entry transfers
|
||||
let all_entries: Vec<Arc<BucketEntry>> = inner.all_entries.iter().collect();
|
||||
for entry in all_entries {
|
||||
entry.with_mut(inner, |_rti, e| e.roll_transfers(last_ts, cur_ts));
|
||||
}
|
||||
}
|
||||
|
||||
// Roll all route transfers
|
||||
let rss = self.route_spec_store();
|
||||
rss.roll_transfers(last_ts, cur_ts);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Update state statistics in PeerStats
|
||||
#[instrument(level = "trace", skip(self), err)]
|
||||
pub(crate) async fn update_state_stats_task_routine(
|
||||
self,
|
||||
_stop_token: StopToken,
|
||||
_last_ts: Timestamp,
|
||||
_cur_ts: Timestamp,
|
||||
) -> EyreResult<()> {
|
||||
{
|
||||
let inner = &mut *self.inner.write();
|
||||
|
||||
// Roll all bucket entry transfers
|
||||
let all_entries: Vec<Arc<BucketEntry>> = inner.all_entries.iter().collect();
|
||||
for entry in all_entries {
|
||||
entry.with_mut(inner, |_rti, e| e.update_state_stats());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Update rolling answers in PeerStats
|
||||
#[instrument(level = "trace", skip(self), err)]
|
||||
pub(crate) async fn rolling_answers_task_routine(
|
||||
self,
|
||||
_stop_token: StopToken,
|
||||
_last_ts: Timestamp,
|
||||
cur_ts: Timestamp,
|
||||
) -> EyreResult<()> {
|
||||
{
|
||||
let inner = &mut *self.inner.write();
|
||||
|
||||
// Roll all bucket entry answers stats
|
||||
let all_entries: Vec<Arc<BucketEntry>> = inner.all_entries.iter().collect();
|
||||
for entry in all_entries {
|
||||
entry.with_mut(inner, |_rti, e| e.roll_answer_stats(cur_ts));
|
||||
}
|
||||
}
|
||||
|
||||
// Roll all route answers
|
||||
let rss = self.route_spec_store();
|
||||
rss.roll_answers(cur_ts);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
@ -7,6 +7,12 @@ pub struct DialInfoDetail {
|
||||
pub dial_info: DialInfo,
|
||||
}
|
||||
|
||||
impl fmt::Display for DialInfoDetail {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{:?}:{}", self.class, self.dial_info)
|
||||
}
|
||||
}
|
||||
|
||||
impl MatchesDialInfoFilter for DialInfoDetail {
|
||||
fn matches_filter(&self, filter: &DialInfoFilter) -> bool {
|
||||
self.dial_info.matches_filter(filter)
|
||||
|
@ -15,7 +15,7 @@ pub const CAP_BLOCKSTORE: Capability = FourCC(*b"BLOC");
|
||||
|
||||
pub const DISTANCE_METRIC_CAPABILITIES: &[Capability] = &[CAP_DHT, CAP_DHT_WATCH];
|
||||
|
||||
#[derive(Clone, Default, PartialEq, Eq, Debug, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct NodeInfo {
|
||||
network_class: NetworkClass,
|
||||
outbound_protocols: ProtocolTypeSet,
|
||||
@ -26,6 +26,22 @@ pub struct NodeInfo {
|
||||
dial_info_detail_list: Vec<DialInfoDetail>,
|
||||
}
|
||||
|
||||
impl fmt::Display for NodeInfo {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
writeln!(f, "network_class: {:?}", self.network_class)?;
|
||||
writeln!(f, "outbound_protocols: {:?}", self.outbound_protocols)?;
|
||||
writeln!(f, "address_types: {:?}", self.address_types)?;
|
||||
writeln!(f, "envelope_support: {:?}", self.envelope_support)?;
|
||||
writeln!(f, "crypto_support: {:?}", self.crypto_support)?;
|
||||
writeln!(f, "capabilities: {:?}", self.capabilities)?;
|
||||
writeln!(f, "dial_info_detail_list:")?;
|
||||
for did in &self.dial_info_detail_list {
|
||||
writeln!(f, " {}", did)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl NodeInfo {
|
||||
pub fn new(
|
||||
network_class: NetworkClass,
|
||||
@ -130,27 +146,6 @@ impl NodeInfo {
|
||||
!self.dial_info_detail_list.is_empty()
|
||||
}
|
||||
|
||||
/// Is some relay required either for signal or inbound relay or outbound relay?
|
||||
pub fn requires_relay(&self) -> Option<RelayKind> {
|
||||
match self.network_class {
|
||||
NetworkClass::InboundCapable => {
|
||||
for did in &self.dial_info_detail_list {
|
||||
if did.class.requires_relay() {
|
||||
return Some(RelayKind::Inbound);
|
||||
}
|
||||
}
|
||||
}
|
||||
NetworkClass::OutboundOnly => {
|
||||
return Some(RelayKind::Inbound);
|
||||
}
|
||||
NetworkClass::WebApp => {
|
||||
return Some(RelayKind::Outbound);
|
||||
}
|
||||
NetworkClass::Invalid => {}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
pub fn has_capability(&self, cap: Capability) -> bool {
|
||||
self.capabilities.contains(&cap)
|
||||
}
|
||||
|
@ -1,12 +1,34 @@
|
||||
use super::*;
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct PeerInfo {
|
||||
#[serde(
|
||||
default = "default_routing_domain",
|
||||
skip_serializing_if = "is_default_routing_domain"
|
||||
)]
|
||||
routing_domain: RoutingDomain,
|
||||
node_ids: TypedKeyGroup,
|
||||
signed_node_info: SignedNodeInfo,
|
||||
}
|
||||
|
||||
fn default_routing_domain() -> RoutingDomain {
|
||||
RoutingDomain::PublicInternet
|
||||
}
|
||||
|
||||
fn is_default_routing_domain(routing_domain: &RoutingDomain) -> bool {
|
||||
matches!(routing_domain, RoutingDomain::PublicInternet)
|
||||
}
|
||||
|
||||
impl fmt::Display for PeerInfo {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
writeln!(f, "routing_domain: {:?}", self.routing_domain)?;
|
||||
writeln!(f, "node_ids: {}", self.node_ids)?;
|
||||
writeln!(f, "signed_node_info:")?;
|
||||
write!(f, "{}", indent_all_string(&self.signed_node_info))?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl PeerInfo {
|
||||
pub fn new(
|
||||
routing_domain: RoutingDomain,
|
||||
|
@ -1,12 +1,26 @@
|
||||
use super::*;
|
||||
|
||||
/// Signed NodeInfo that can be passed around amongst peers and verifiable
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct SignedDirectNodeInfo {
|
||||
node_info: NodeInfo,
|
||||
timestamp: Timestamp,
|
||||
signatures: Vec<TypedSignature>,
|
||||
}
|
||||
|
||||
impl fmt::Display for SignedDirectNodeInfo {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
writeln!(f, "node_info:")?;
|
||||
write!(f, "{}", indent_all_string(&self.node_info))?;
|
||||
writeln!(f, "timestamp: {}", self.timestamp)?;
|
||||
writeln!(f, "signatures:")?;
|
||||
for sig in &self.signatures {
|
||||
writeln!(f, "{}", indent_all_string(sig))?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl SignedDirectNodeInfo {
|
||||
/// Returns a new SignedDirectNodeInfo that has its signatures validated.
|
||||
/// On success, this will modify the node_ids set to only include node_ids whose signatures validate.
|
||||
|
@ -1,11 +1,28 @@
|
||||
use super::*;
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub enum SignedNodeInfo {
|
||||
Direct(SignedDirectNodeInfo),
|
||||
Relayed(SignedRelayedNodeInfo),
|
||||
}
|
||||
|
||||
impl fmt::Display for SignedNodeInfo {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Self::Direct(arg0) => {
|
||||
writeln!(f, "direct:")?;
|
||||
write!(f, "{}", indent_all_string(arg0))?;
|
||||
Ok(())
|
||||
}
|
||||
Self::Relayed(arg0) => {
|
||||
writeln!(f, "relayed:")?;
|
||||
write!(f, "{}", indent_all_string(&arg0))?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SignedNodeInfo {
|
||||
pub fn validate(
|
||||
&self,
|
||||
|
@ -1,7 +1,7 @@
|
||||
use super::*;
|
||||
|
||||
/// Signed NodeInfo with a relay that can be passed around amongst peers and verifiable
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct SignedRelayedNodeInfo {
|
||||
node_info: NodeInfo,
|
||||
relay_ids: TypedKeyGroup,
|
||||
@ -10,6 +10,22 @@ pub struct SignedRelayedNodeInfo {
|
||||
signatures: Vec<TypedSignature>,
|
||||
}
|
||||
|
||||
impl fmt::Display for SignedRelayedNodeInfo {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
writeln!(f, "node_info:")?;
|
||||
write!(f, "{}", indent_all_string(&self.node_info))?;
|
||||
writeln!(f, "relay_ids: {}", self.relay_ids)?;
|
||||
writeln!(f, "relay_info:")?;
|
||||
write!(f, "{}", indent_all_string(&self.relay_info))?;
|
||||
writeln!(f, "timestamp: {}", self.timestamp)?;
|
||||
writeln!(f, "signatures:")?;
|
||||
for sig in &self.signatures {
|
||||
writeln!(f, "{}", indent_all_string(sig))?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl SignedRelayedNodeInfo {
|
||||
/// Returns a new SignedRelayedNodeInfo that has its signatures validated.
|
||||
/// On success, this will modify the node_ids set to only include node_ids whose signatures validate.
|
||||
|
@ -692,7 +692,7 @@ impl RPCProcessor {
|
||||
match &out {
|
||||
Err(e) => {
|
||||
log_rpc!(debug "RPC Lost (id={} {}): {}", id, debug_string, e);
|
||||
self.record_question_lost(
|
||||
self.record_lost_answer(
|
||||
waitable_reply.send_ts,
|
||||
waitable_reply.node_ref.clone(),
|
||||
waitable_reply.safety_route,
|
||||
@ -702,7 +702,7 @@ impl RPCProcessor {
|
||||
}
|
||||
Ok(TimeoutOr::Timeout) => {
|
||||
log_rpc!(debug "RPC Lost (id={} {}): Timeout", id, debug_string);
|
||||
self.record_question_lost(
|
||||
self.record_lost_answer(
|
||||
waitable_reply.send_ts,
|
||||
waitable_reply.node_ref.clone(),
|
||||
waitable_reply.safety_route,
|
||||
@ -1008,11 +1008,8 @@ impl RPCProcessor {
|
||||
let routing_table = self.routing_table();
|
||||
|
||||
if let Some(published_peer_info) = routing_table.get_published_peer_info(routing_domain) {
|
||||
// Get our node info timestamp
|
||||
let our_node_info_ts = published_peer_info.signed_node_info().timestamp();
|
||||
|
||||
// If the target has not yet seen our published peer info, send it along if we have it
|
||||
if !node.has_seen_our_node_info_ts(routing_domain, our_node_info_ts) {
|
||||
if !node.has_seen_our_node_info_ts(routing_domain) {
|
||||
return SenderPeerInfo::new(published_peer_info, target_node_info_ts);
|
||||
}
|
||||
}
|
||||
@ -1056,7 +1053,7 @@ impl RPCProcessor {
|
||||
|
||||
/// Record question lost to node or route
|
||||
#[instrument(level = "trace", target = "rpc", skip_all)]
|
||||
fn record_question_lost(
|
||||
fn record_lost_answer(
|
||||
&self,
|
||||
send_ts: Timestamp,
|
||||
node_ref: NodeRef,
|
||||
@ -1066,7 +1063,7 @@ impl RPCProcessor {
|
||||
) {
|
||||
// Record for node if this was not sent via a route
|
||||
if safety_route.is_none() && remote_private_route.is_none() {
|
||||
node_ref.stats_question_lost();
|
||||
node_ref.stats_lost_answer();
|
||||
|
||||
// Also clear the last_connections for the entry so we make a new connection next time
|
||||
node_ref.clear_last_flows();
|
||||
@ -1080,19 +1077,19 @@ impl RPCProcessor {
|
||||
if let Some(sr_pubkey) = &safety_route {
|
||||
let rss = self.routing_table.route_spec_store();
|
||||
rss.with_route_stats_mut(send_ts, sr_pubkey, |s| {
|
||||
s.record_question_lost();
|
||||
s.record_lost_answer();
|
||||
});
|
||||
}
|
||||
// If remote private route was used, record question lost there
|
||||
if let Some(rpr_pubkey) = &remote_private_route {
|
||||
rss.with_route_stats_mut(send_ts, rpr_pubkey, |s| {
|
||||
s.record_question_lost();
|
||||
s.record_lost_answer();
|
||||
});
|
||||
}
|
||||
// If private route was used, record question lost there
|
||||
if let Some(pr_pubkey) = &private_route {
|
||||
rss.with_route_stats_mut(send_ts, pr_pubkey, |s| {
|
||||
s.record_question_lost();
|
||||
s.record_lost_answer();
|
||||
});
|
||||
}
|
||||
}
|
||||
@ -1169,8 +1166,8 @@ impl RPCProcessor {
|
||||
// If safety route was used, record route there
|
||||
if let Some(sr_pubkey) = &safety_route {
|
||||
rss.with_route_stats_mut(send_ts, sr_pubkey, |s| {
|
||||
// If we received an answer, the safety route we sent over can be considered tested
|
||||
s.record_tested(recv_ts);
|
||||
// Record received bytes
|
||||
s.record_answer_received(recv_ts, bytes);
|
||||
|
||||
// If we used a safety route to send, use our last tested latency
|
||||
total_local_latency += s.latency_stats().average
|
||||
@ -1181,7 +1178,7 @@ impl RPCProcessor {
|
||||
if let Some(pr_pubkey) = &reply_private_route {
|
||||
rss.with_route_stats_mut(send_ts, pr_pubkey, |s| {
|
||||
// Record received bytes
|
||||
s.record_received(recv_ts, bytes);
|
||||
s.record_answer_received(recv_ts, bytes);
|
||||
|
||||
// If we used a private route to receive, use our last tested latency
|
||||
total_local_latency += s.latency_stats().average
|
||||
@ -1192,7 +1189,7 @@ impl RPCProcessor {
|
||||
if let Some(rpr_pubkey) = &remote_private_route {
|
||||
rss.with_route_stats_mut(send_ts, rpr_pubkey, |s| {
|
||||
// Record received bytes
|
||||
s.record_received(recv_ts, bytes);
|
||||
s.record_answer_received(recv_ts, bytes);
|
||||
|
||||
// The remote route latency is recorded using the total latency minus the total local latency
|
||||
let remote_latency = total_latency.saturating_sub(total_local_latency);
|
||||
@ -1248,7 +1245,7 @@ impl RPCProcessor {
|
||||
// This may record nothing if the remote safety route is not also
|
||||
// a remote private route that been imported, but that's okay
|
||||
rss.with_route_stats_mut(recv_ts, &d.remote_safety_route, |s| {
|
||||
s.record_received(recv_ts, bytes);
|
||||
s.record_question_received(recv_ts, bytes);
|
||||
});
|
||||
}
|
||||
// Process messages that arrived to our private route
|
||||
@ -1260,12 +1257,12 @@ impl RPCProcessor {
|
||||
// it could also be a node id if no remote safety route was used
|
||||
// in which case this also will do nothing
|
||||
rss.with_route_stats_mut(recv_ts, &d.remote_safety_route, |s| {
|
||||
s.record_received(recv_ts, bytes);
|
||||
s.record_question_received(recv_ts, bytes);
|
||||
});
|
||||
|
||||
// Record for our local private route we received over
|
||||
rss.with_route_stats_mut(recv_ts, &d.private_route, |s| {
|
||||
s.record_received(recv_ts, bytes);
|
||||
s.record_question_received(recv_ts, bytes);
|
||||
});
|
||||
}
|
||||
}
|
||||
@ -1748,7 +1745,7 @@ impl RPCProcessor {
|
||||
log_rpc!(debug "Could not complete rpc operation: id = {}: {}", op_id, e);
|
||||
}
|
||||
RPCError::Ignore(_) => {
|
||||
log_rpc!("Answer late: id = {}", op_id);
|
||||
log_rpc!(debug "Answer late: id = {}", op_id);
|
||||
}
|
||||
};
|
||||
// Don't throw an error here because it's okay if the original operation timed out
|
||||
|
@ -149,7 +149,7 @@ where
|
||||
inner
|
||||
.waiting_op_table
|
||||
.remove(&op_id)
|
||||
.ok_or_else(RPCError::else_internal(format!(
|
||||
.ok_or_else(RPCError::else_ignore(format!(
|
||||
"Unmatched operation id: {}",
|
||||
op_id
|
||||
)))?
|
||||
|
@ -62,6 +62,9 @@ impl RPCError {
|
||||
pub fn map_ignore<M: ToString, X: ToString>(message: M) -> impl FnOnce(X) -> Self {
|
||||
move |x| Self::Ignore(format!("{}: {}", message.to_string(), x.to_string()))
|
||||
}
|
||||
pub fn else_ignore<M: ToString>(message: M) -> impl FnOnce() -> Self {
|
||||
move || Self::Ignore(message.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<RPCError> for VeilidAPIError {
|
||||
|
@ -1,10 +1,16 @@
|
||||
use super::*;
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, PartialOrd, Ord, Eq, Hash, Default)]
|
||||
#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Ord, Eq, Hash, Default)]
|
||||
pub struct SenderInfo {
|
||||
pub socket_address: SocketAddress,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Ord, Eq, Hash, Default)]
|
||||
pub struct StatusResult {
|
||||
pub opt_sender_info: Option<SenderInfo>,
|
||||
pub opt_previous_sender_info: Option<SenderInfo>,
|
||||
}
|
||||
|
||||
impl RPCProcessor {
|
||||
// Send StatusQ RPC request, receive StatusA answer
|
||||
// Can be sent via relays or routes, but will have less information via routes
|
||||
@ -19,7 +25,7 @@ impl RPCProcessor {
|
||||
pub async fn rpc_call_status(
|
||||
self,
|
||||
dest: Destination,
|
||||
) -> RPCNetworkResult<Answer<Option<SenderInfo>>> {
|
||||
) -> RPCNetworkResult<Answer<StatusResult>> {
|
||||
let _guard = self
|
||||
.unlocked_inner
|
||||
.startup_lock
|
||||
@ -105,6 +111,7 @@ impl RPCProcessor {
|
||||
// Don't need to validate these addresses for the current routing domain
|
||||
// the address itself is irrelevant, and the remote node can lie anyway
|
||||
let mut opt_sender_info = None;
|
||||
let mut opt_previous_sender_info = None;
|
||||
match dest {
|
||||
Destination::Direct {
|
||||
node: target,
|
||||
@ -120,24 +127,23 @@ impl RPCProcessor {
|
||||
{
|
||||
// Directly requested status that actually gets sent directly and not over a relay will tell us what our IP address appears as
|
||||
// If this changes, we'd want to know about that to reset the networking stack
|
||||
match routing_domain {
|
||||
RoutingDomain::PublicInternet => self
|
||||
.network_manager()
|
||||
.report_public_internet_socket_address(
|
||||
sender_info.socket_address,
|
||||
send_data_method.unique_flow.flow,
|
||||
target.unfiltered(),
|
||||
),
|
||||
RoutingDomain::LocalNetwork => {
|
||||
self.network_manager().report_local_network_socket_address(
|
||||
sender_info.socket_address,
|
||||
send_data_method.unique_flow.flow,
|
||||
target.unfiltered(),
|
||||
)
|
||||
}
|
||||
}
|
||||
opt_previous_sender_info = target.report_sender_info(
|
||||
routing_domain,
|
||||
send_data_method.unique_flow.flow.protocol_type(),
|
||||
send_data_method.unique_flow.flow.address_type(),
|
||||
sender_info,
|
||||
);
|
||||
};
|
||||
opt_sender_info = Some(sender_info.clone());
|
||||
opt_sender_info = Some(sender_info);
|
||||
|
||||
// Report ping status results to network manager
|
||||
self.network_manager().report_socket_address_change(
|
||||
routing_domain,
|
||||
sender_info.socket_address,
|
||||
opt_previous_sender_info.map(|s| s.socket_address),
|
||||
send_data_method.unique_flow.flow,
|
||||
target.unfiltered(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -156,7 +162,10 @@ impl RPCProcessor {
|
||||
Ok(NetworkResult::value(Answer::new(
|
||||
latency,
|
||||
reply_private_route,
|
||||
StatusResult {
|
||||
opt_sender_info,
|
||||
opt_previous_sender_info,
|
||||
},
|
||||
)))
|
||||
}
|
||||
|
||||
|
@ -272,7 +272,7 @@ impl StorageManager {
|
||||
kind,
|
||||
value_nodes: ctx.value_nodes.clone(),
|
||||
};
|
||||
log_network_result!(debug "GetValue Fanout: {:?}", fanout_result);
|
||||
log_dht!(debug "GetValue Fanout: {:?}", fanout_result);
|
||||
|
||||
if let Err(e) = out_tx.send(Ok(OutboundGetValueResult {
|
||||
fanout_result,
|
||||
|
@ -300,7 +300,7 @@ impl StorageManager {
|
||||
fanout_results.push(fanout_result);
|
||||
}
|
||||
|
||||
log_network_result!(debug "InspectValue Fanout ({:?}):\n{}", kind, debug_fanout_results(&fanout_results));
|
||||
log_dht!(debug "InspectValue Fanout ({:?}):\n{}", kind, debug_fanout_results(&fanout_results));
|
||||
|
||||
Ok(OutboundInspectValueResult {
|
||||
fanout_results,
|
||||
|
@ -207,12 +207,20 @@ impl StorageManager {
|
||||
}
|
||||
|
||||
/// Get the set of nodes in our active watches
|
||||
pub async fn get_active_watch_nodes(&self) -> Vec<NodeRef> {
|
||||
pub async fn get_active_watch_nodes(&self) -> Vec<Destination> {
|
||||
let inner = self.inner.lock().await;
|
||||
inner
|
||||
.opened_records
|
||||
.values()
|
||||
.filter_map(|v| v.active_watch().map(|aw| aw.watch_node))
|
||||
.filter_map(|v| {
|
||||
v.active_watch().map(|aw| {
|
||||
Destination::direct(
|
||||
aw.watch_node
|
||||
.routing_domain_filtered(RoutingDomain::PublicInternet),
|
||||
)
|
||||
.with_safety(v.safety_selection())
|
||||
})
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
|
@ -1268,7 +1268,7 @@ where
|
||||
out += &format!(
|
||||
" {} age={} len={} subkeys={}\n",
|
||||
rik.key,
|
||||
debug_duration(get_timestamp() - rec.last_touched().as_u64()),
|
||||
display_duration(get_timestamp() - rec.last_touched().as_u64()),
|
||||
rec.record_data_size(),
|
||||
rec.stored_subkeys(),
|
||||
);
|
||||
|
@ -88,7 +88,7 @@ impl StorageManager {
|
||||
let context = context.clone();
|
||||
let descriptor = descriptor.clone();
|
||||
async move {
|
||||
let send_descriptor = true; // xxx check if next_node needs the descriptor or not
|
||||
let send_descriptor = true; // xxx check if next_node needs the descriptor or not, see issue #203
|
||||
|
||||
// get most recent value to send
|
||||
let value = {
|
||||
@ -274,7 +274,7 @@ impl StorageManager {
|
||||
kind,
|
||||
value_nodes: ctx.value_nodes.clone(),
|
||||
};
|
||||
log_network_result!(debug "SetValue Fanout: {:?}", fanout_result);
|
||||
log_dht!(debug "SetValue Fanout: {:?}", fanout_result);
|
||||
|
||||
if let Err(e) = out_tx.send(Ok(OutboundSetValueResult {
|
||||
fanout_result,
|
||||
|
@ -743,6 +743,7 @@ impl StorageManagerInner {
|
||||
receiver: flume::Receiver<T>,
|
||||
handler: impl FnMut(T) -> SendPinBoxFuture<bool> + Send + 'static,
|
||||
) -> bool {
|
||||
self.deferred_result_processor.add(receiver, handler)
|
||||
self.deferred_result_processor
|
||||
.add(receiver.into_stream(), handler)
|
||||
}
|
||||
}
|
||||
|
@ -204,9 +204,7 @@ impl StorageManager {
|
||||
}
|
||||
}
|
||||
std::collections::hash_map::Entry::Vacant(_) => {
|
||||
panic!(
|
||||
"offline write work items should always be on offline_subkey_writes entries that exist"
|
||||
)
|
||||
warn!("offline write work items should always be on offline_subkey_writes entries that exist: ignoring key {}", result.key)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -62,7 +62,7 @@ impl StorageManager {
|
||||
)?;
|
||||
|
||||
if wva.answer.accepted {
|
||||
log_dht!(debug "WatchValue canceled: id={} expiration_ts={} ({})", wva.answer.watch_id, debug_ts(wva.answer.expiration_ts.as_u64()), watch_node);
|
||||
log_dht!(debug "WatchValue canceled: id={} expiration_ts={} ({})", wva.answer.watch_id, display_ts(wva.answer.expiration_ts.as_u64()), watch_node);
|
||||
Ok(Some(OutboundWatchValueResult {
|
||||
expiration_ts: wva.answer.expiration_ts,
|
||||
watch_id: wva.answer.watch_id,
|
||||
@ -127,9 +127,9 @@ impl StorageManager {
|
||||
|
||||
if wva.answer.accepted {
|
||||
if watch_id != wva.answer.watch_id {
|
||||
log_dht!(debug "WatchValue changed: id={}->{} expiration_ts={} ({})", watch_id, wva.answer.watch_id, debug_ts(wva.answer.expiration_ts.as_u64()), watch_node);
|
||||
log_dht!(debug "WatchValue changed: id={}->{} expiration_ts={} ({})", watch_id, wva.answer.watch_id, display_ts(wva.answer.expiration_ts.as_u64()), watch_node);
|
||||
} else {
|
||||
log_dht!(debug "WatchValue renewed: id={} expiration_ts={} ({})", watch_id, debug_ts(wva.answer.expiration_ts.as_u64()), watch_node);
|
||||
log_dht!(debug "WatchValue renewed: id={} expiration_ts={} ({})", watch_id, display_ts(wva.answer.expiration_ts.as_u64()), watch_node);
|
||||
}
|
||||
|
||||
Ok(Some(OutboundWatchValueResult {
|
||||
@ -280,7 +280,7 @@ impl StorageManager {
|
||||
let mut done = false;
|
||||
if wva.answer.expiration_ts.as_u64() > 0 {
|
||||
// If the expiration time is greater than zero this watch is active
|
||||
log_dht!(debug "Watch created: id={} expiration_ts={} ({})", wva.answer.watch_id, debug_ts(wva.answer.expiration_ts.as_u64()), next_node);
|
||||
log_dht!(debug "Watch created: id={} expiration_ts={} ({})", wva.answer.watch_id, display_ts(wva.answer.expiration_ts.as_u64()), next_node);
|
||||
done = true;
|
||||
} else {
|
||||
// If the returned expiration time is zero, this watch was cancelled or rejected
|
||||
|
@ -330,6 +330,17 @@ fn get_filtered_node_ref(
|
||||
routing_table: RoutingTable,
|
||||
) -> impl FnOnce(&str) -> Option<FilteredNodeRef> {
|
||||
move |text| {
|
||||
// Safety selection
|
||||
let (text, seq) = if let Some((first, second)) = text.split_once('+') {
|
||||
let seq = get_sequencing(second)?;
|
||||
(first, Some(seq))
|
||||
} else {
|
||||
(text, None)
|
||||
};
|
||||
if text.is_empty() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let (text, mods) = text
|
||||
.split_once('/')
|
||||
.map(|x| (x.0, Some(x.1)))
|
||||
@ -342,10 +353,15 @@ fn get_filtered_node_ref(
|
||||
} else {
|
||||
return None;
|
||||
};
|
||||
if let Some(mods) = mods {
|
||||
Some(get_node_ref_modifiers(nr)(mods)?)
|
||||
let nr = if let Some(mods) = mods {
|
||||
get_node_ref_modifiers(nr)(mods)?
|
||||
} else {
|
||||
Some(nr.default_filtered())
|
||||
nr.default_filtered()
|
||||
};
|
||||
if let Some(seq) = seq {
|
||||
Some(nr.sequencing_clone(seq))
|
||||
} else {
|
||||
Some(nr)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -610,9 +626,12 @@ impl VeilidAPI {
|
||||
|
||||
let mut min_state = BucketEntryState::Unreliable;
|
||||
let mut capabilities = vec![];
|
||||
let mut fastest = false;
|
||||
for arg in args {
|
||||
if let Some(ms) = get_bucket_entry_state(&arg) {
|
||||
min_state = ms;
|
||||
} else if arg == "fastest" {
|
||||
fastest = true;
|
||||
} else {
|
||||
for cap in arg.split(',') {
|
||||
if let Ok(capfcc) = FourCC::from_str(cap) {
|
||||
@ -626,7 +645,10 @@ impl VeilidAPI {
|
||||
|
||||
// Dump routing table entries
|
||||
let routing_table = self.network_manager()?.routing_table();
|
||||
Ok(routing_table.debug_info_entries(min_state, capabilities))
|
||||
Ok(match fastest {
|
||||
true => routing_table.debug_info_entries_fastest(min_state, capabilities, 100000),
|
||||
false => routing_table.debug_info_entries(min_state, capabilities),
|
||||
})
|
||||
}
|
||||
|
||||
async fn debug_entry(&self, args: String) -> VeilidAPIResult<String> {
|
||||
@ -656,7 +678,8 @@ impl VeilidAPI {
|
||||
"debug_relay",
|
||||
"node_id",
|
||||
get_node_ref(routing_table),
|
||||
)?;
|
||||
)
|
||||
.ok();
|
||||
|
||||
let routing_domain = get_debug_argument_at(
|
||||
&args,
|
||||
@ -716,7 +739,7 @@ impl VeilidAPI {
|
||||
// Dump connection table
|
||||
let connman = connection_manager.debug_print().await;
|
||||
|
||||
Ok(format!("{}\n\n{}\n\n{}\n\n", nodeinfo, peertable, connman))
|
||||
Ok(format!("{}\n{}\n{}\n", nodeinfo, peertable, connman))
|
||||
}
|
||||
|
||||
async fn debug_nodeid(&self, _args: String) -> VeilidAPIResult<String> {
|
||||
@ -1746,7 +1769,7 @@ impl VeilidAPI {
|
||||
if ts.as_u64() == 0 {
|
||||
return Ok("Failed to watch value".to_owned());
|
||||
}
|
||||
Ok(format!("Success: expiration={:?}", debug_ts(ts.as_u64())))
|
||||
Ok(format!("Success: expiration={:?}", display_ts(ts.as_u64())))
|
||||
}
|
||||
|
||||
async fn debug_record_cancel(&self, args: Vec<String>) -> VeilidAPIResult<String> {
|
||||
@ -1986,12 +2009,12 @@ Routing:
|
||||
buckets [dead|reliable] - Display the routing table bucket statistics (default is only non-dead nodes)
|
||||
entries [dead|reliable] [<capabilities>] - Display the index of nodes in the routing table
|
||||
entry <node> - Display all the details about a particular node in the routing table
|
||||
contact <node>[<modifiers>] - Explain what mechanism would be used to contact a particular node
|
||||
contact <node>[+<sequencing>][<modifiers>] - Explain what mechanism would be used to contact a particular node
|
||||
resolve <destination> - Search the network for a particular node or private route
|
||||
relay <relay> [public|local] - Change the relay in use for this node
|
||||
punish list - List all punishments this node has assigned to other nodes / networks
|
||||
clear - Clear all punishments from this node
|
||||
route allocate [ord|*ord] [rel] [<count>] [in|out] - Allocate a route
|
||||
route allocate [<sequencing>] [rel] [<count>] [in|out] - Allocate a route
|
||||
release <route> - Release a route
|
||||
publish <route> [full] - Publish a route 'blob' that can be imported on another machine
|
||||
unpublish <route> - Mark a route as 'no longer published'
|
||||
@ -2042,9 +2065,12 @@ TableDB Operations:
|
||||
* direct: <node>[+<safety>][<modifiers>]
|
||||
* relay: <relay>@<target>[+<safety>][<modifiers>]
|
||||
* private: #<id>[+<safety>]
|
||||
<sequencing> is:
|
||||
* prefer_ordered: ord
|
||||
* ensure_ordered: *ord
|
||||
<safety> is:
|
||||
* unsafe: -[ord|*ord]
|
||||
* safe: [route][,ord|*ord][,rel][,<count>]
|
||||
* unsafe: -<sequencing>
|
||||
* safe: [route][,<sequencing>][,rel][,<count>]
|
||||
<modifiers> is: [/<protocoltype>][/<addresstype>][/<routingdomain>]
|
||||
<protocoltype> is: udp|tcp|ws|wss
|
||||
<addresstype> is: ipv4|ipv6
|
||||
@ -2243,3 +2269,29 @@ TableDB Operations:
|
||||
Ok((key, rc))
|
||||
}
|
||||
}
|
||||
|
||||
const DEFAULT_INDENT: usize = 4;
|
||||
pub fn indent_string<S: ToString>(s: &S) -> String {
|
||||
indent_by(DEFAULT_INDENT, s.to_string())
|
||||
}
|
||||
pub fn indent_all_string<S: ToString>(s: &S) -> String {
|
||||
indent_all_by(DEFAULT_INDENT, s.to_string())
|
||||
}
|
||||
|
||||
pub trait ToMultilineString {
|
||||
fn to_multiline_string(&self) -> String;
|
||||
}
|
||||
|
||||
impl<T> ToMultilineString for Vec<T>
|
||||
where
|
||||
T: fmt::Display,
|
||||
{
|
||||
fn to_multiline_string(&self) -> String {
|
||||
let mut out = String::new();
|
||||
for x in self {
|
||||
out += &x.to_string();
|
||||
out += "\n";
|
||||
}
|
||||
out
|
||||
}
|
||||
}
|
||||
|
@ -377,7 +377,7 @@ impl RoutingContext {
|
||||
count: u32,
|
||||
) -> VeilidAPIResult<Timestamp> {
|
||||
event!(target: "veilid_api", Level::DEBUG,
|
||||
"RoutingContext::watch_dht_values(self: {:?}, key: {:?}, subkeys: {:?}, expiration: {:?}, count: {:?})", self, key, subkeys, expiration, count);
|
||||
"RoutingContext::watch_dht_values(self: {:?}, key: {:?}, subkeys: {:?}, expiration: {}, count: {})", self, key, subkeys, expiration, count);
|
||||
|
||||
Crypto::validate_crypto_kind(key.kind)?;
|
||||
let storage_manager = self.api.storage_manager()?;
|
||||
|
@ -26,6 +26,21 @@ pub fn fix_transferstatsdownup() -> TransferStatsDownUp {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn fix_answerstats() -> AnswerStats {
|
||||
AnswerStats {
|
||||
span: TimestampDuration::new_secs(10),
|
||||
questions: 10,
|
||||
answers: 8,
|
||||
lost_answers: 0,
|
||||
consecutive_answers_maximum: 1,
|
||||
consecutive_answers_average: 2,
|
||||
consecutive_answers_minimum: 3,
|
||||
consecutive_lost_answers_maximum: 4,
|
||||
consecutive_lost_answers_average: 5,
|
||||
consecutive_lost_answers_minimum: 6,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn fix_rpcstats() -> RPCStats {
|
||||
RPCStats {
|
||||
messages_sent: 1_000_000,
|
||||
@ -36,6 +51,26 @@ pub fn fix_rpcstats() -> RPCStats {
|
||||
first_consecutive_seen_ts: Some(Timestamp::from(1685569111851)),
|
||||
recent_lost_answers: 5,
|
||||
failed_to_send: 3,
|
||||
answer: fix_answerstats(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn fix_statestats() -> StateStats {
|
||||
StateStats {
|
||||
span: TimestampDuration::new_secs(10),
|
||||
reliable: TimestampDuration::new_secs(5),
|
||||
unreliable: TimestampDuration::new_secs(5),
|
||||
dead: TimestampDuration::new_secs(0),
|
||||
punished: TimestampDuration::new_secs(0),
|
||||
reason: StateReasonStats {
|
||||
can_not_send: TimestampDuration::new_secs(1),
|
||||
too_many_lost_answers: TimestampDuration::new_secs(2),
|
||||
no_ping_response: TimestampDuration::new_secs(3),
|
||||
failed_to_send: TimestampDuration::new_secs(4),
|
||||
lost_answers: TimestampDuration::new_secs(5),
|
||||
not_seen_consecutively: TimestampDuration::new_secs(6),
|
||||
in_unreliable_ping_span: TimestampDuration::new_secs(7),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@ -45,6 +80,7 @@ pub fn fix_peerstats() -> PeerStats {
|
||||
rpc_stats: fix_rpcstats(),
|
||||
latency: Some(fix_latencystats()),
|
||||
transfer: fix_transferstatsdownup(),
|
||||
state: fix_statestats(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,22 +1,54 @@
|
||||
use super::*;
|
||||
|
||||
/// Measurement of communications latency to this node over all RPC questions
|
||||
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
|
||||
#[cfg_attr(target_arch = "wasm32", derive(Tsify))]
|
||||
pub struct LatencyStats {
|
||||
pub fastest: TimestampDuration, // fastest latency in the ROLLING_LATENCIES_SIZE last latencies
|
||||
pub average: TimestampDuration, // average latency over the ROLLING_LATENCIES_SIZE last latencies
|
||||
pub slowest: TimestampDuration, // slowest latency in the ROLLING_LATENCIES_SIZE last latencies
|
||||
/// fastest latency in the ROLLING_LATENCIES_SIZE last latencies
|
||||
pub fastest: TimestampDuration,
|
||||
/// average latency over the ROLLING_LATENCIES_SIZE last latencies
|
||||
pub average: TimestampDuration,
|
||||
/// slowest latency in the ROLLING_LATENCIES_SIZE last latencies
|
||||
pub slowest: TimestampDuration,
|
||||
}
|
||||
|
||||
impl fmt::Display for LatencyStats {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"{} slow / {} avg / {} fast",
|
||||
self.slowest, self.average, self.fastest
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Measurement of how much data has transferred to or from this node over a time span
|
||||
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
|
||||
#[cfg_attr(target_arch = "wasm32", derive(Tsify))]
|
||||
pub struct TransferStats {
|
||||
pub total: ByteCount, // total amount transferred ever
|
||||
pub maximum: ByteCount, // maximum rate over the ROLLING_TRANSFERS_SIZE last amounts
|
||||
pub average: ByteCount, // average rate over the ROLLING_TRANSFERS_SIZE last amounts
|
||||
pub minimum: ByteCount, // minimum rate over the ROLLING_TRANSFERS_SIZE last amounts
|
||||
/// total amount transferred ever
|
||||
pub total: ByteCount,
|
||||
/// maximum rate over the ROLLING_TRANSFERS_SIZE last amounts
|
||||
pub maximum: ByteCount,
|
||||
/// average rate over the ROLLING_TRANSFERS_SIZE last amounts
|
||||
pub average: ByteCount,
|
||||
/// minimum rate over the ROLLING_TRANSFERS_SIZE last amounts
|
||||
pub minimum: ByteCount,
|
||||
}
|
||||
|
||||
impl fmt::Display for TransferStats {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"{} min / {} avg / {} max / {} total",
|
||||
self.minimum, self.average, self.maximum, self.total
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Transfer statistics from a node to our own (down) and
|
||||
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
|
||||
#[cfg_attr(target_arch = "wasm32", derive(Tsify))]
|
||||
pub struct TransferStatsDownUp {
|
||||
@ -24,24 +56,243 @@ pub struct TransferStatsDownUp {
|
||||
pub up: TransferStats,
|
||||
}
|
||||
|
||||
impl fmt::Display for TransferStatsDownUp {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
writeln!(f, "down: {}", self.down)?;
|
||||
writeln!(f, "up: {}", self.up)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Measurement of what states the node has been in over a time span
|
||||
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
|
||||
#[cfg_attr(target_arch = "wasm32", derive(Tsify))]
|
||||
pub struct StateStats {
|
||||
/// total amount of time measured
|
||||
pub span: TimestampDuration,
|
||||
/// amount of time spent in a reliable state
|
||||
pub reliable: TimestampDuration,
|
||||
/// amount of time spent in an unreliable state
|
||||
pub unreliable: TimestampDuration,
|
||||
/// amount of time spent in a dead state
|
||||
pub dead: TimestampDuration,
|
||||
/// amount of time spent in a punished state
|
||||
pub punished: TimestampDuration,
|
||||
/// state reason stats for this peer
|
||||
#[serde(default)]
|
||||
pub reason: StateReasonStats,
|
||||
}
|
||||
|
||||
impl fmt::Display for StateStats {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
writeln!(f, "span: {}", self.span)?;
|
||||
writeln!(f, "reliable: {}", self.reliable)?;
|
||||
writeln!(f, "unreliable: {}", self.unreliable)?;
|
||||
writeln!(f, "dead: {}", self.dead)?;
|
||||
writeln!(f, "punished: {}", self.punished)?;
|
||||
write!(f, "reason:\n{}", indent_all_string(&self.reason))?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Measurement of what state reasons the node has been in over a time span
|
||||
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
|
||||
#[cfg_attr(target_arch = "wasm32", derive(Tsify))]
|
||||
pub struct StateReasonStats {
|
||||
/// time spent dead due to being unable to send
|
||||
pub can_not_send: TimestampDuration,
|
||||
/// time spent dead because of too many lost answers
|
||||
pub too_many_lost_answers: TimestampDuration,
|
||||
/// time spent dead because of no ping response
|
||||
pub no_ping_response: TimestampDuration,
|
||||
/// time spent unreliable because of failures to send
|
||||
pub failed_to_send: TimestampDuration,
|
||||
/// time spent unreliable because of lost answers
|
||||
pub lost_answers: TimestampDuration,
|
||||
/// time spent unreliable because of not being seen consecutively
|
||||
pub not_seen_consecutively: TimestampDuration,
|
||||
/// time spent unreliable because we are in the unreliable ping span
|
||||
pub in_unreliable_ping_span: TimestampDuration,
|
||||
}
|
||||
|
||||
impl fmt::Display for StateReasonStats {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
writeln!(f, "(dead) can_not_send: {}", self.can_not_send)?;
|
||||
writeln!(f, "(dead) lost_answers: {}", self.too_many_lost_answers)?;
|
||||
writeln!(f, "(dead) no_ping_response: {}", self.no_ping_response)?;
|
||||
writeln!(f, "(urel) failed_to_send: {}", self.failed_to_send)?;
|
||||
writeln!(f, "(urel) lost_answers: {}", self.lost_answers)?;
|
||||
writeln!(
|
||||
f,
|
||||
"(urel) not_consecutive: {}",
|
||||
self.not_seen_consecutively
|
||||
)?;
|
||||
writeln!(
|
||||
f,
|
||||
"(urel) unreliable_ping: {}",
|
||||
self.in_unreliable_ping_span
|
||||
)?;
|
||||
writeln!(f, "(urel) can_not_send: {}", self.can_not_send)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Measurement of round-trip RPC question/answer performance
|
||||
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
|
||||
#[cfg_attr(target_arch = "wasm32", derive(Tsify))]
|
||||
pub struct AnswerStats {
|
||||
/// total amount of time measured
|
||||
pub span: TimestampDuration,
|
||||
/// number of questions sent in this span
|
||||
pub questions: u32,
|
||||
/// number of answers received in this span
|
||||
pub answers: u32,
|
||||
/// number of lost answers in this span
|
||||
pub lost_answers: u32,
|
||||
/// maximum number of received answers before a lost answer in this span
|
||||
pub consecutive_answers_maximum: u32,
|
||||
/// average number of received answers before a lost answer in this span
|
||||
pub consecutive_answers_average: u32,
|
||||
/// minimum number of received answers before a lost answer in this span
|
||||
pub consecutive_answers_minimum: u32,
|
||||
/// maximum number of timeouts before a received answer in this span
|
||||
pub consecutive_lost_answers_maximum: u32,
|
||||
/// average number of timeouts before a received answer in this span
|
||||
pub consecutive_lost_answers_average: u32,
|
||||
/// minimum number of timeouts before a received answer in this span
|
||||
pub consecutive_lost_answers_minimum: u32,
|
||||
}
|
||||
|
||||
impl fmt::Display for AnswerStats {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
writeln!(f, "span: {}", self.span)?;
|
||||
writeln!(
|
||||
f,
|
||||
"questions/answers/lost: {} / {} / {}",
|
||||
self.questions, self.answers, self.lost_answers
|
||||
)?;
|
||||
writeln!(
|
||||
f,
|
||||
"consecutive answers min/avg/max: {} / {} / {}",
|
||||
self.consecutive_answers_minimum,
|
||||
self.consecutive_answers_average,
|
||||
self.consecutive_answers_maximum
|
||||
)?;
|
||||
writeln!(
|
||||
f,
|
||||
"consecutive lost min/avg/max: {} / {} / {}",
|
||||
self.consecutive_lost_answers_minimum,
|
||||
self.consecutive_lost_answers_average,
|
||||
self.consecutive_lost_answers_maximum
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Statistics for RPC operations performed on a node
|
||||
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
|
||||
#[cfg_attr(target_arch = "wasm32", derive(Tsify))]
|
||||
pub struct RPCStats {
|
||||
pub messages_sent: u32, // number of rpcs that have been sent in the total_time range
|
||||
pub messages_rcvd: u32, // number of rpcs that have been received in the total_time range
|
||||
pub questions_in_flight: u32, // number of questions issued that have yet to be answered
|
||||
pub last_question_ts: Option<Timestamp>, // when the peer was last questioned (either successfully or not) and we wanted an answer
|
||||
pub last_seen_ts: Option<Timestamp>, // when the peer was last seen for any reason, including when we first attempted to reach out to it
|
||||
pub first_consecutive_seen_ts: Option<Timestamp>, // the timestamp of the first consecutive proof-of-life for this node (an answer or received question)
|
||||
pub recent_lost_answers: u32, // number of answers that have been lost since we lost reliability
|
||||
pub failed_to_send: u32, // number of messages that have failed to send or connections dropped since we last successfully sent one
|
||||
/// number of rpcs that have been sent in the total entry time range
|
||||
pub messages_sent: u32,
|
||||
/// number of rpcs that have been received in the total entry time range
|
||||
pub messages_rcvd: u32,
|
||||
/// number of questions issued that have yet to be answered
|
||||
pub questions_in_flight: u32,
|
||||
/// when the peer was last questioned (either successfully or not) and we wanted an answer
|
||||
pub last_question_ts: Option<Timestamp>,
|
||||
/// when the peer was last seen for any reason, including when we first attempted to reach out to it
|
||||
pub last_seen_ts: Option<Timestamp>,
|
||||
/// the timestamp of the first consecutive proof-of-life for this node (an answer or received question)
|
||||
pub first_consecutive_seen_ts: Option<Timestamp>,
|
||||
/// number of answers that have been lost consecutively
|
||||
pub recent_lost_answers: u32,
|
||||
/// number of messages that have failed to send or connections dropped since we last successfully sent one
|
||||
pub failed_to_send: u32,
|
||||
/// rpc answer stats for this peer
|
||||
#[serde(default)]
|
||||
pub answer: AnswerStats,
|
||||
}
|
||||
|
||||
impl fmt::Display for RPCStats {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
writeln!(
|
||||
f,
|
||||
"# sent/received/in-flight: {} / {} / {}",
|
||||
self.messages_sent, self.messages_rcvd, self.questions_in_flight
|
||||
)?;
|
||||
writeln!(
|
||||
f,
|
||||
"# recently-lost/failed-to-send: {} / {}",
|
||||
self.recent_lost_answers, self.failed_to_send
|
||||
)?;
|
||||
writeln!(
|
||||
f,
|
||||
"last_question: {}",
|
||||
if let Some(ts) = &self.last_question_ts {
|
||||
ts.to_string()
|
||||
} else {
|
||||
"None".to_owned()
|
||||
}
|
||||
)?;
|
||||
writeln!(
|
||||
f,
|
||||
"last_seen: {}",
|
||||
if let Some(ts) = &self.last_seen_ts {
|
||||
ts.to_string()
|
||||
} else {
|
||||
"None".to_owned()
|
||||
}
|
||||
)?;
|
||||
writeln!(
|
||||
f,
|
||||
"first_consecutive: {}",
|
||||
if let Some(ts) = &self.first_consecutive_seen_ts {
|
||||
ts.to_string()
|
||||
} else {
|
||||
"None".to_owned()
|
||||
}
|
||||
)?;
|
||||
|
||||
write!(f, "answers:\n{}", indent_all_string(&self.answer))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Statistics for a peer in the routing table
|
||||
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
|
||||
#[cfg_attr(target_arch = "wasm32", derive(Tsify))]
|
||||
pub struct PeerStats {
|
||||
pub time_added: Timestamp, // when the peer was added to the routing table
|
||||
pub rpc_stats: RPCStats, // information about RPCs
|
||||
pub latency: Option<LatencyStats>, // latencies for communications with the peer
|
||||
pub transfer: TransferStatsDownUp, // Stats for communications with the peer
|
||||
/// when the peer was added to the routing table
|
||||
pub time_added: Timestamp,
|
||||
#[serde(default)]
|
||||
/// information about RPCs
|
||||
pub rpc_stats: RPCStats,
|
||||
#[serde(default)]
|
||||
/// latency stats for this peer
|
||||
pub latency: Option<LatencyStats>,
|
||||
/// transfer stats for this peer
|
||||
#[serde(default)]
|
||||
pub transfer: TransferStatsDownUp,
|
||||
/// state stats for this peer
|
||||
#[serde(default)]
|
||||
pub state: StateStats,
|
||||
}
|
||||
|
||||
impl fmt::Display for PeerStats {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
writeln!(f, "time_added: {}", self.time_added)?;
|
||||
write!(f, "rpc_stats:\n{}", indent_all_string(&self.rpc_stats))?;
|
||||
if let Some(ls) = &self.latency {
|
||||
writeln!(f, "latency: {}", ls)?;
|
||||
} else {
|
||||
writeln!(f, "latency: None")?;
|
||||
}
|
||||
write!(f, "transfer:\n{}", indent_all_string(&self.transfer))?;
|
||||
write!(f, "state:\n{}", indent_all_string(&self.state))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -2,11 +2,11 @@
|
||||
use super::*;
|
||||
|
||||
aligned_u64_type!(Timestamp);
|
||||
aligned_u64_type_default_display_impl!(Timestamp);
|
||||
aligned_u64_type_default_debug_impl!(Timestamp);
|
||||
|
||||
impl fmt::Debug for Timestamp {
|
||||
impl fmt::Display for Timestamp {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", debug_ts(self.as_u64()))
|
||||
write!(f, "{}", display_ts(self.as_u64()))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2,20 +2,20 @@
|
||||
use super::*;
|
||||
|
||||
aligned_u64_type!(TimestampDuration);
|
||||
aligned_u64_type_default_display_impl!(TimestampDuration);
|
||||
aligned_u64_type_default_debug_impl!(TimestampDuration);
|
||||
aligned_u64_type_default_math_impl!(TimestampDuration);
|
||||
|
||||
impl fmt::Debug for TimestampDuration {
|
||||
impl fmt::Display for TimestampDuration {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", debug_duration(self.as_u64()))
|
||||
write!(f, "{}", display_duration(self.as_u64()))
|
||||
}
|
||||
}
|
||||
|
||||
impl TimestampDuration {
|
||||
pub fn new_secs<N: num_traits::Unsigned + num_traits::ToPrimitive>(secs: N) -> Self {
|
||||
TimestampDuration::new(secs.to_u64().unwrap() * 1_000_000u64)
|
||||
pub const fn new_secs(secs: u32) -> Self {
|
||||
TimestampDuration::new(secs as u64 * 1_000_000u64)
|
||||
}
|
||||
pub fn new_ms<N: num_traits::Unsigned + num_traits::ToPrimitive>(ms: N) -> Self {
|
||||
TimestampDuration::new(ms.to_u64().unwrap() * 1_000u64)
|
||||
pub const fn new_ms(ms: u64) -> Self {
|
||||
TimestampDuration::new(ms * 1_000u64)
|
||||
}
|
||||
}
|
||||
|
@ -293,31 +293,27 @@ pub struct VeilidConfigTLS {
|
||||
|
||||
impl Default for VeilidConfigTLS {
|
||||
fn default() -> Self {
|
||||
let certificate_path = get_default_ssl_directory("certs/server.crt");
|
||||
let private_key_path = get_default_ssl_directory("keys/server.key");
|
||||
Self {
|
||||
certificate_path,
|
||||
private_key_path,
|
||||
certificate_path: "".to_string(),
|
||||
private_key_path: "".to_string(),
|
||||
connection_initial_timeout_ms: 2000,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg_attr(target_arch = "wasm32", allow(unused_variables))]
|
||||
pub fn get_default_ssl_directory(sub_path: &str) -> String {
|
||||
pub fn get_default_ssl_directory(
|
||||
program_name: &str,
|
||||
organization: &str,
|
||||
qualifier: &str,
|
||||
sub_path: &str,
|
||||
) -> String {
|
||||
cfg_if::cfg_if! {
|
||||
if #[cfg(target_arch = "wasm32")] {
|
||||
"".to_owned()
|
||||
} else {
|
||||
use std::path::PathBuf;
|
||||
#[cfg(unix)]
|
||||
{
|
||||
let default_path = PathBuf::from("/etc/veilid-server/ssl").join(sub_path);
|
||||
if default_path.exists() {
|
||||
return default_path.to_string_lossy().into();
|
||||
}
|
||||
}
|
||||
ProjectDirs::from("org", "Veilid", "Veilid")
|
||||
ProjectDirs::from(qualifier, organization, program_name)
|
||||
.map(|dirs| dirs.data_local_dir().join("ssl").join(sub_path))
|
||||
.unwrap_or_else(|| PathBuf::from("./ssl").join(sub_path))
|
||||
.to_string_lossy()
|
||||
@ -535,37 +531,26 @@ impl Default for VeilidConfigNetwork {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
|
||||
#[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
|
||||
#[cfg_attr(target_arch = "wasm32", derive(Tsify))]
|
||||
pub struct VeilidConfigTableStore {
|
||||
pub directory: String,
|
||||
pub delete: bool,
|
||||
}
|
||||
|
||||
impl Default for VeilidConfigTableStore {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
directory: get_default_store_path("table_store"),
|
||||
delete: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg_attr(target_arch = "wasm32", allow(unused_variables))]
|
||||
fn get_default_store_path(store_type: &str) -> String {
|
||||
fn get_default_store_path(
|
||||
program_name: &str,
|
||||
organization: &str,
|
||||
qualifier: &str,
|
||||
store_type: &str,
|
||||
) -> String {
|
||||
cfg_if::cfg_if! {
|
||||
if #[cfg(target_arch = "wasm32")] {
|
||||
"".to_owned()
|
||||
} else {
|
||||
use std::path::PathBuf;
|
||||
#[cfg(unix)]
|
||||
{
|
||||
let globalpath = PathBuf::from(format!("/var/db/veilid-server/{}", store_type));
|
||||
if globalpath.exists() {
|
||||
return globalpath.to_string_lossy().into();
|
||||
}
|
||||
}
|
||||
ProjectDirs::from("org", "Veilid", "Veilid")
|
||||
ProjectDirs::from(qualifier, organization, program_name)
|
||||
.map(|dirs| dirs.data_local_dir().to_path_buf())
|
||||
.unwrap_or_else(|| PathBuf::from("./"))
|
||||
.join(store_type)
|
||||
@ -585,7 +570,7 @@ pub struct VeilidConfigBlockStore {
|
||||
impl Default for VeilidConfigBlockStore {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
directory: get_default_store_path("block_store"),
|
||||
directory: "".to_string(),
|
||||
delete: false,
|
||||
}
|
||||
}
|
||||
@ -608,7 +593,7 @@ impl Default for VeilidConfigProtectedStore {
|
||||
Self {
|
||||
allow_insecure_fallback: false,
|
||||
always_use_insecure_storage: false,
|
||||
directory: get_default_store_path("protected_store"),
|
||||
directory: "".to_string(),
|
||||
delete: false,
|
||||
device_encryption_key_password: "".to_owned(),
|
||||
new_device_encryption_key_password: None,
|
||||
@ -744,6 +729,81 @@ pub struct VeilidConfigInner {
|
||||
pub network: VeilidConfigNetwork,
|
||||
}
|
||||
|
||||
impl VeilidConfigInner {
|
||||
/// Create a new 'VeilidConfigInner' for use with `setup_from_config`
|
||||
/// Should match the application bundle name if used elsewhere in the format:
|
||||
/// `qualifier.organization.program_name` - for example `org.veilid.veilidchat`
|
||||
///
|
||||
/// The 'bundle name' will be used when choosing the default storage location for the
|
||||
/// application in a platform-dependent fashion, unless 'storage_directory' is
|
||||
/// specified to override this location
|
||||
///
|
||||
/// * `program_name` - Pick a program name and do not change it from release to release,
|
||||
/// see `VeilidConfigInner::program_name` for details.
|
||||
/// * `organization_name` - Similar to program_name, but for the organization publishing this app
|
||||
/// * `qualifier` - Suffix for the application bundle name
|
||||
/// * `storage_directory` - Override for the path where veilid-core stores its content
|
||||
/// such as the table store, protected store, and block store
|
||||
/// * `config_directory` - Override for the path where veilid-core can retrieve extra configuration files
|
||||
/// such as certificates and keys
|
||||
pub fn new(
|
||||
program_name: &str,
|
||||
organization: &str,
|
||||
qualifier: &str,
|
||||
storage_directory: Option<&str>,
|
||||
config_directory: Option<&str>,
|
||||
) -> Self {
|
||||
let mut out = Self {
|
||||
program_name: program_name.to_owned(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
if let Some(storage_directory) = storage_directory {
|
||||
out.protected_store.directory = (std::path::PathBuf::from(storage_directory)
|
||||
.join("protected_store"))
|
||||
.to_string_lossy()
|
||||
.to_string();
|
||||
out.table_store.directory = (std::path::PathBuf::from(storage_directory)
|
||||
.join("table_store"))
|
||||
.to_string_lossy()
|
||||
.to_string();
|
||||
out.block_store.directory = (std::path::PathBuf::from(storage_directory)
|
||||
.join("block_store"))
|
||||
.to_string_lossy()
|
||||
.to_string();
|
||||
} else {
|
||||
out.protected_store.directory =
|
||||
get_default_store_path(program_name, organization, qualifier, "protected_store");
|
||||
out.table_store.directory =
|
||||
get_default_store_path(program_name, organization, qualifier, "table_store");
|
||||
out.block_store.directory =
|
||||
get_default_store_path(program_name, organization, qualifier, "block_store");
|
||||
}
|
||||
|
||||
if let Some(config_directory) = config_directory {
|
||||
out.network.tls.certificate_path = (std::path::PathBuf::from(config_directory)
|
||||
.join("ssl/certs/server.crt"))
|
||||
.to_string_lossy()
|
||||
.to_string();
|
||||
out.network.tls.private_key_path = (std::path::PathBuf::from(config_directory)
|
||||
.join("ssl/keys/server.key"))
|
||||
.to_string_lossy()
|
||||
.to_string();
|
||||
} else {
|
||||
out.network.tls.certificate_path = get_default_ssl_directory(
|
||||
program_name,
|
||||
organization,
|
||||
qualifier,
|
||||
"certs/server.crt",
|
||||
);
|
||||
out.network.tls.private_key_path =
|
||||
get_default_ssl_directory(program_name, organization, qualifier, "keys/server.key");
|
||||
}
|
||||
|
||||
out
|
||||
}
|
||||
}
|
||||
|
||||
/// The configuration built for each Veilid node during API startup
|
||||
#[derive(Clone)]
|
||||
pub struct VeilidConfig {
|
||||
|
@ -283,10 +283,10 @@ packages:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: path_provider_android
|
||||
sha256: "6f01f8e37ec30b07bc424b4deabac37cacb1bc7e2e515ad74486039918a37eb7"
|
||||
sha256: c464428172cb986b758c6d1724c603097febb8fb855aa265aeecc9280c294d4a
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "2.2.10"
|
||||
version: "2.2.12"
|
||||
path_provider_foundation:
|
||||
dependency: transitive
|
||||
description:
|
||||
@ -478,10 +478,10 @@ packages:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: xdg_directories
|
||||
sha256: faea9dee56b520b55a566385b84f2e8de55e7496104adada9962e0bd11bcff1d
|
||||
sha256: "7a3f37b05d989967cdddcbb571f1ea834867ae2faa29725fd085180e0883aa15"
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "1.0.4"
|
||||
version: "1.1.0"
|
||||
xterm:
|
||||
dependency: "direct main"
|
||||
description:
|
||||
@ -499,5 +499,5 @@ packages:
|
||||
source: hosted
|
||||
version: "0.0.6"
|
||||
sdks:
|
||||
dart: ">=3.4.0 <4.0.0"
|
||||
flutter: ">=3.22.0"
|
||||
dart: ">=3.5.0 <4.0.0"
|
||||
flutter: ">=3.24.0"
|
||||
|
@ -68,8 +68,13 @@ mixin _$DHTSchema {
|
||||
required TResult orElse(),
|
||||
}) =>
|
||||
throw _privateConstructorUsedError;
|
||||
|
||||
/// Serializes this DHTSchema to a JSON map.
|
||||
Map<String, dynamic> toJson() => throw _privateConstructorUsedError;
|
||||
@JsonKey(ignore: true)
|
||||
|
||||
/// Create a copy of DHTSchema
|
||||
/// with the given fields replaced by the non-null parameter values.
|
||||
@JsonKey(includeFromJson: false, includeToJson: false)
|
||||
$DHTSchemaCopyWith<DHTSchema> get copyWith =>
|
||||
throw _privateConstructorUsedError;
|
||||
}
|
||||
@ -92,6 +97,8 @@ class _$DHTSchemaCopyWithImpl<$Res, $Val extends DHTSchema>
|
||||
// ignore: unused_field
|
||||
final $Res Function($Val) _then;
|
||||
|
||||
/// Create a copy of DHTSchema
|
||||
/// with the given fields replaced by the non-null parameter values.
|
||||
@pragma('vm:prefer-inline')
|
||||
@override
|
||||
$Res call({
|
||||
@ -125,6 +132,8 @@ class __$$DHTSchemaDFLTImplCopyWithImpl<$Res>
|
||||
_$DHTSchemaDFLTImpl _value, $Res Function(_$DHTSchemaDFLTImpl) _then)
|
||||
: super(_value, _then);
|
||||
|
||||
/// Create a copy of DHTSchema
|
||||
/// with the given fields replaced by the non-null parameter values.
|
||||
@pragma('vm:prefer-inline')
|
||||
@override
|
||||
$Res call({
|
||||
@ -167,11 +176,13 @@ class _$DHTSchemaDFLTImpl implements DHTSchemaDFLT {
|
||||
(identical(other.oCnt, oCnt) || other.oCnt == oCnt));
|
||||
}
|
||||
|
||||
@JsonKey(ignore: true)
|
||||
@JsonKey(includeFromJson: false, includeToJson: false)
|
||||
@override
|
||||
int get hashCode => Object.hash(runtimeType, oCnt);
|
||||
|
||||
@JsonKey(ignore: true)
|
||||
/// Create a copy of DHTSchema
|
||||
/// with the given fields replaced by the non-null parameter values.
|
||||
@JsonKey(includeFromJson: false, includeToJson: false)
|
||||
@override
|
||||
@pragma('vm:prefer-inline')
|
||||
_$$DHTSchemaDFLTImplCopyWith<_$DHTSchemaDFLTImpl> get copyWith =>
|
||||
@ -255,8 +266,11 @@ abstract class DHTSchemaDFLT implements DHTSchema {
|
||||
|
||||
@override
|
||||
int get oCnt;
|
||||
|
||||
/// Create a copy of DHTSchema
|
||||
/// with the given fields replaced by the non-null parameter values.
|
||||
@override
|
||||
@JsonKey(ignore: true)
|
||||
@JsonKey(includeFromJson: false, includeToJson: false)
|
||||
_$$DHTSchemaDFLTImplCopyWith<_$DHTSchemaDFLTImpl> get copyWith =>
|
||||
throw _privateConstructorUsedError;
|
||||
}
|
||||
@ -280,6 +294,8 @@ class __$$DHTSchemaSMPLImplCopyWithImpl<$Res>
|
||||
_$DHTSchemaSMPLImpl _value, $Res Function(_$DHTSchemaSMPLImpl) _then)
|
||||
: super(_value, _then);
|
||||
|
||||
/// Create a copy of DHTSchema
|
||||
/// with the given fields replaced by the non-null parameter values.
|
||||
@pragma('vm:prefer-inline')
|
||||
@override
|
||||
$Res call({
|
||||
@ -339,12 +355,14 @@ class _$DHTSchemaSMPLImpl implements DHTSchemaSMPL {
|
||||
const DeepCollectionEquality().equals(other._members, _members));
|
||||
}
|
||||
|
||||
@JsonKey(ignore: true)
|
||||
@JsonKey(includeFromJson: false, includeToJson: false)
|
||||
@override
|
||||
int get hashCode => Object.hash(
|
||||
runtimeType, oCnt, const DeepCollectionEquality().hash(_members));
|
||||
|
||||
@JsonKey(ignore: true)
|
||||
/// Create a copy of DHTSchema
|
||||
/// with the given fields replaced by the non-null parameter values.
|
||||
@JsonKey(includeFromJson: false, includeToJson: false)
|
||||
@override
|
||||
@pragma('vm:prefer-inline')
|
||||
_$$DHTSchemaSMPLImplCopyWith<_$DHTSchemaSMPLImpl> get copyWith =>
|
||||
@ -431,8 +449,11 @@ abstract class DHTSchemaSMPL implements DHTSchema {
|
||||
@override
|
||||
int get oCnt;
|
||||
List<DHTSchemaMember> get members;
|
||||
|
||||
/// Create a copy of DHTSchema
|
||||
/// with the given fields replaced by the non-null parameter values.
|
||||
@override
|
||||
@JsonKey(ignore: true)
|
||||
@JsonKey(includeFromJson: false, includeToJson: false)
|
||||
_$$DHTSchemaSMPLImplCopyWith<_$DHTSchemaSMPLImpl> get copyWith =>
|
||||
throw _privateConstructorUsedError;
|
||||
}
|
||||
@ -446,8 +467,12 @@ mixin _$DHTSchemaMember {
|
||||
FixedEncodedString43 get mKey => throw _privateConstructorUsedError;
|
||||
int get mCnt => throw _privateConstructorUsedError;
|
||||
|
||||
/// Serializes this DHTSchemaMember to a JSON map.
|
||||
Map<String, dynamic> toJson() => throw _privateConstructorUsedError;
|
||||
@JsonKey(ignore: true)
|
||||
|
||||
/// Create a copy of DHTSchemaMember
|
||||
/// with the given fields replaced by the non-null parameter values.
|
||||
@JsonKey(includeFromJson: false, includeToJson: false)
|
||||
$DHTSchemaMemberCopyWith<DHTSchemaMember> get copyWith =>
|
||||
throw _privateConstructorUsedError;
|
||||
}
|
||||
@ -471,6 +496,8 @@ class _$DHTSchemaMemberCopyWithImpl<$Res, $Val extends DHTSchemaMember>
|
||||
// ignore: unused_field
|
||||
final $Res Function($Val) _then;
|
||||
|
||||
/// Create a copy of DHTSchemaMember
|
||||
/// with the given fields replaced by the non-null parameter values.
|
||||
@pragma('vm:prefer-inline')
|
||||
@override
|
||||
$Res call({
|
||||
@ -509,6 +536,8 @@ class __$$DHTSchemaMemberImplCopyWithImpl<$Res>
|
||||
_$DHTSchemaMemberImpl _value, $Res Function(_$DHTSchemaMemberImpl) _then)
|
||||
: super(_value, _then);
|
||||
|
||||
/// Create a copy of DHTSchemaMember
|
||||
/// with the given fields replaced by the non-null parameter values.
|
||||
@pragma('vm:prefer-inline')
|
||||
@override
|
||||
$Res call({
|
||||
@ -556,11 +585,13 @@ class _$DHTSchemaMemberImpl implements _DHTSchemaMember {
|
||||
(identical(other.mCnt, mCnt) || other.mCnt == mCnt));
|
||||
}
|
||||
|
||||
@JsonKey(ignore: true)
|
||||
@JsonKey(includeFromJson: false, includeToJson: false)
|
||||
@override
|
||||
int get hashCode => Object.hash(runtimeType, mKey, mCnt);
|
||||
|
||||
@JsonKey(ignore: true)
|
||||
/// Create a copy of DHTSchemaMember
|
||||
/// with the given fields replaced by the non-null parameter values.
|
||||
@JsonKey(includeFromJson: false, includeToJson: false)
|
||||
@override
|
||||
@pragma('vm:prefer-inline')
|
||||
_$$DHTSchemaMemberImplCopyWith<_$DHTSchemaMemberImpl> get copyWith =>
|
||||
@ -587,8 +618,11 @@ abstract class _DHTSchemaMember implements DHTSchemaMember {
|
||||
FixedEncodedString43 get mKey;
|
||||
@override
|
||||
int get mCnt;
|
||||
|
||||
/// Create a copy of DHTSchemaMember
|
||||
/// with the given fields replaced by the non-null parameter values.
|
||||
@override
|
||||
@JsonKey(ignore: true)
|
||||
@JsonKey(includeFromJson: false, includeToJson: false)
|
||||
_$$DHTSchemaMemberImplCopyWith<_$DHTSchemaMemberImpl> get copyWith =>
|
||||
throw _privateConstructorUsedError;
|
||||
}
|
||||
@ -604,8 +638,12 @@ mixin _$DHTRecordDescriptor {
|
||||
DHTSchema get schema => throw _privateConstructorUsedError;
|
||||
FixedEncodedString43? get ownerSecret => throw _privateConstructorUsedError;
|
||||
|
||||
/// Serializes this DHTRecordDescriptor to a JSON map.
|
||||
Map<String, dynamic> toJson() => throw _privateConstructorUsedError;
|
||||
@JsonKey(ignore: true)
|
||||
|
||||
/// Create a copy of DHTRecordDescriptor
|
||||
/// with the given fields replaced by the non-null parameter values.
|
||||
@JsonKey(includeFromJson: false, includeToJson: false)
|
||||
$DHTRecordDescriptorCopyWith<DHTRecordDescriptor> get copyWith =>
|
||||
throw _privateConstructorUsedError;
|
||||
}
|
||||
@ -635,6 +673,8 @@ class _$DHTRecordDescriptorCopyWithImpl<$Res, $Val extends DHTRecordDescriptor>
|
||||
// ignore: unused_field
|
||||
final $Res Function($Val) _then;
|
||||
|
||||
/// Create a copy of DHTRecordDescriptor
|
||||
/// with the given fields replaced by the non-null parameter values.
|
||||
@pragma('vm:prefer-inline')
|
||||
@override
|
||||
$Res call({
|
||||
@ -663,6 +703,8 @@ class _$DHTRecordDescriptorCopyWithImpl<$Res, $Val extends DHTRecordDescriptor>
|
||||
) as $Val);
|
||||
}
|
||||
|
||||
/// Create a copy of DHTRecordDescriptor
|
||||
/// with the given fields replaced by the non-null parameter values.
|
||||
@override
|
||||
@pragma('vm:prefer-inline')
|
||||
$DHTSchemaCopyWith<$Res> get schema {
|
||||
@ -698,6 +740,8 @@ class __$$DHTRecordDescriptorImplCopyWithImpl<$Res>
|
||||
$Res Function(_$DHTRecordDescriptorImpl) _then)
|
||||
: super(_value, _then);
|
||||
|
||||
/// Create a copy of DHTRecordDescriptor
|
||||
/// with the given fields replaced by the non-null parameter values.
|
||||
@pragma('vm:prefer-inline')
|
||||
@override
|
||||
$Res call({
|
||||
@ -765,11 +809,13 @@ class _$DHTRecordDescriptorImpl implements _DHTRecordDescriptor {
|
||||
other.ownerSecret == ownerSecret));
|
||||
}
|
||||
|
||||
@JsonKey(ignore: true)
|
||||
@JsonKey(includeFromJson: false, includeToJson: false)
|
||||
@override
|
||||
int get hashCode => Object.hash(runtimeType, key, owner, schema, ownerSecret);
|
||||
|
||||
@JsonKey(ignore: true)
|
||||
/// Create a copy of DHTRecordDescriptor
|
||||
/// with the given fields replaced by the non-null parameter values.
|
||||
@JsonKey(includeFromJson: false, includeToJson: false)
|
||||
@override
|
||||
@pragma('vm:prefer-inline')
|
||||
_$$DHTRecordDescriptorImplCopyWith<_$DHTRecordDescriptorImpl> get copyWith =>
|
||||
@ -802,8 +848,11 @@ abstract class _DHTRecordDescriptor implements DHTRecordDescriptor {
|
||||
DHTSchema get schema;
|
||||
@override
|
||||
FixedEncodedString43? get ownerSecret;
|
||||
|
||||
/// Create a copy of DHTRecordDescriptor
|
||||
/// with the given fields replaced by the non-null parameter values.
|
||||
@override
|
||||
@JsonKey(ignore: true)
|
||||
@JsonKey(includeFromJson: false, includeToJson: false)
|
||||
_$$DHTRecordDescriptorImplCopyWith<_$DHTRecordDescriptorImpl> get copyWith =>
|
||||
throw _privateConstructorUsedError;
|
||||
}
|
||||
@ -819,8 +868,12 @@ mixin _$ValueData {
|
||||
Uint8List get data => throw _privateConstructorUsedError;
|
||||
FixedEncodedString43 get writer => throw _privateConstructorUsedError;
|
||||
|
||||
/// Serializes this ValueData to a JSON map.
|
||||
Map<String, dynamic> toJson() => throw _privateConstructorUsedError;
|
||||
@JsonKey(ignore: true)
|
||||
|
||||
/// Create a copy of ValueData
|
||||
/// with the given fields replaced by the non-null parameter values.
|
||||
@JsonKey(includeFromJson: false, includeToJson: false)
|
||||
$ValueDataCopyWith<ValueData> get copyWith =>
|
||||
throw _privateConstructorUsedError;
|
||||
}
|
||||
@ -846,6 +899,8 @@ class _$ValueDataCopyWithImpl<$Res, $Val extends ValueData>
|
||||
// ignore: unused_field
|
||||
final $Res Function($Val) _then;
|
||||
|
||||
/// Create a copy of ValueData
|
||||
/// with the given fields replaced by the non-null parameter values.
|
||||
@pragma('vm:prefer-inline')
|
||||
@override
|
||||
$Res call({
|
||||
@ -892,6 +947,8 @@ class __$$ValueDataImplCopyWithImpl<$Res>
|
||||
_$ValueDataImpl _value, $Res Function(_$ValueDataImpl) _then)
|
||||
: super(_value, _then);
|
||||
|
||||
/// Create a copy of ValueData
|
||||
/// with the given fields replaced by the non-null parameter values.
|
||||
@pragma('vm:prefer-inline')
|
||||
@override
|
||||
$Res call({
|
||||
@ -951,12 +1008,14 @@ class _$ValueDataImpl implements _ValueData {
|
||||
(identical(other.writer, writer) || other.writer == writer));
|
||||
}
|
||||
|
||||
@JsonKey(ignore: true)
|
||||
@JsonKey(includeFromJson: false, includeToJson: false)
|
||||
@override
|
||||
int get hashCode => Object.hash(
|
||||
runtimeType, seq, const DeepCollectionEquality().hash(data), writer);
|
||||
|
||||
@JsonKey(ignore: true)
|
||||
/// Create a copy of ValueData
|
||||
/// with the given fields replaced by the non-null parameter values.
|
||||
@JsonKey(includeFromJson: false, includeToJson: false)
|
||||
@override
|
||||
@pragma('vm:prefer-inline')
|
||||
_$$ValueDataImplCopyWith<_$ValueDataImpl> get copyWith =>
|
||||
@ -986,8 +1045,11 @@ abstract class _ValueData implements ValueData {
|
||||
Uint8List get data;
|
||||
@override
|
||||
FixedEncodedString43 get writer;
|
||||
|
||||
/// Create a copy of ValueData
|
||||
/// with the given fields replaced by the non-null parameter values.
|
||||
@override
|
||||
@JsonKey(ignore: true)
|
||||
@JsonKey(includeFromJson: false, includeToJson: false)
|
||||
_$$ValueDataImplCopyWith<_$ValueDataImpl> get copyWith =>
|
||||
throw _privateConstructorUsedError;
|
||||
}
|
||||
@ -1003,8 +1065,12 @@ mixin _$SafetySpec {
|
||||
Sequencing get sequencing => throw _privateConstructorUsedError;
|
||||
String? get preferredRoute => throw _privateConstructorUsedError;
|
||||
|
||||
/// Serializes this SafetySpec to a JSON map.
|
||||
Map<String, dynamic> toJson() => throw _privateConstructorUsedError;
|
||||
@JsonKey(ignore: true)
|
||||
|
||||
/// Create a copy of SafetySpec
|
||||
/// with the given fields replaced by the non-null parameter values.
|
||||
@JsonKey(includeFromJson: false, includeToJson: false)
|
||||
$SafetySpecCopyWith<SafetySpec> get copyWith =>
|
||||
throw _privateConstructorUsedError;
|
||||
}
|
||||
@ -1032,6 +1098,8 @@ class _$SafetySpecCopyWithImpl<$Res, $Val extends SafetySpec>
|
||||
// ignore: unused_field
|
||||
final $Res Function($Val) _then;
|
||||
|
||||
/// Create a copy of SafetySpec
|
||||
/// with the given fields replaced by the non-null parameter values.
|
||||
@pragma('vm:prefer-inline')
|
||||
@override
|
||||
$Res call({
|
||||
@ -1084,6 +1152,8 @@ class __$$SafetySpecImplCopyWithImpl<$Res>
|
||||
_$SafetySpecImpl _value, $Res Function(_$SafetySpecImpl) _then)
|
||||
: super(_value, _then);
|
||||
|
||||
/// Create a copy of SafetySpec
|
||||
/// with the given fields replaced by the non-null parameter values.
|
||||
@pragma('vm:prefer-inline')
|
||||
@override
|
||||
$Res call({
|
||||
@ -1154,12 +1224,14 @@ class _$SafetySpecImpl implements _SafetySpec {
|
||||
other.preferredRoute == preferredRoute));
|
||||
}
|
||||
|
||||
@JsonKey(ignore: true)
|
||||
@JsonKey(includeFromJson: false, includeToJson: false)
|
||||
@override
|
||||
int get hashCode =>
|
||||
Object.hash(runtimeType, hopCount, stability, sequencing, preferredRoute);
|
||||
|
||||
@JsonKey(ignore: true)
|
||||
/// Create a copy of SafetySpec
|
||||
/// with the given fields replaced by the non-null parameter values.
|
||||
@JsonKey(includeFromJson: false, includeToJson: false)
|
||||
@override
|
||||
@pragma('vm:prefer-inline')
|
||||
_$$SafetySpecImplCopyWith<_$SafetySpecImpl> get copyWith =>
|
||||
@ -1191,8 +1263,11 @@ abstract class _SafetySpec implements SafetySpec {
|
||||
Sequencing get sequencing;
|
||||
@override
|
||||
String? get preferredRoute;
|
||||
|
||||
/// Create a copy of SafetySpec
|
||||
/// with the given fields replaced by the non-null parameter values.
|
||||
@override
|
||||
@JsonKey(ignore: true)
|
||||
@JsonKey(includeFromJson: false, includeToJson: false)
|
||||
_$$SafetySpecImplCopyWith<_$SafetySpecImpl> get copyWith =>
|
||||
throw _privateConstructorUsedError;
|
||||
}
|
||||
@ -1207,8 +1282,12 @@ mixin _$RouteBlob {
|
||||
@Uint8ListJsonConverter()
|
||||
Uint8List get blob => throw _privateConstructorUsedError;
|
||||
|
||||
/// Serializes this RouteBlob to a JSON map.
|
||||
Map<String, dynamic> toJson() => throw _privateConstructorUsedError;
|
||||
@JsonKey(ignore: true)
|
||||
|
||||
/// Create a copy of RouteBlob
|
||||
/// with the given fields replaced by the non-null parameter values.
|
||||
@JsonKey(includeFromJson: false, includeToJson: false)
|
||||
$RouteBlobCopyWith<RouteBlob> get copyWith =>
|
||||
throw _privateConstructorUsedError;
|
||||
}
|
||||
@ -1231,6 +1310,8 @@ class _$RouteBlobCopyWithImpl<$Res, $Val extends RouteBlob>
|
||||
// ignore: unused_field
|
||||
final $Res Function($Val) _then;
|
||||
|
||||
/// Create a copy of RouteBlob
|
||||
/// with the given fields replaced by the non-null parameter values.
|
||||
@pragma('vm:prefer-inline')
|
||||
@override
|
||||
$Res call({
|
||||
@ -1269,6 +1350,8 @@ class __$$RouteBlobImplCopyWithImpl<$Res>
|
||||
_$RouteBlobImpl _value, $Res Function(_$RouteBlobImpl) _then)
|
||||
: super(_value, _then);
|
||||
|
||||
/// Create a copy of RouteBlob
|
||||
/// with the given fields replaced by the non-null parameter values.
|
||||
@pragma('vm:prefer-inline')
|
||||
@override
|
||||
$Res call({
|
||||
@ -1317,12 +1400,14 @@ class _$RouteBlobImpl implements _RouteBlob {
|
||||
const DeepCollectionEquality().equals(other.blob, blob));
|
||||
}
|
||||
|
||||
@JsonKey(ignore: true)
|
||||
@JsonKey(includeFromJson: false, includeToJson: false)
|
||||
@override
|
||||
int get hashCode => Object.hash(
|
||||
runtimeType, routeId, const DeepCollectionEquality().hash(blob));
|
||||
|
||||
@JsonKey(ignore: true)
|
||||
/// Create a copy of RouteBlob
|
||||
/// with the given fields replaced by the non-null parameter values.
|
||||
@JsonKey(includeFromJson: false, includeToJson: false)
|
||||
@override
|
||||
@pragma('vm:prefer-inline')
|
||||
_$$RouteBlobImplCopyWith<_$RouteBlobImpl> get copyWith =>
|
||||
@ -1350,8 +1435,11 @@ abstract class _RouteBlob implements RouteBlob {
|
||||
@override
|
||||
@Uint8ListJsonConverter()
|
||||
Uint8List get blob;
|
||||
|
||||
/// Create a copy of RouteBlob
|
||||
/// with the given fields replaced by the non-null parameter values.
|
||||
@override
|
||||
@JsonKey(ignore: true)
|
||||
@JsonKey(includeFromJson: false, includeToJson: false)
|
||||
_$$RouteBlobImplCopyWith<_$RouteBlobImpl> get copyWith =>
|
||||
throw _privateConstructorUsedError;
|
||||
}
|
||||
@ -1368,8 +1456,12 @@ mixin _$DHTRecordReport {
|
||||
List<int> get localSeqs => throw _privateConstructorUsedError;
|
||||
List<int> get networkSeqs => throw _privateConstructorUsedError;
|
||||
|
||||
/// Serializes this DHTRecordReport to a JSON map.
|
||||
Map<String, dynamic> toJson() => throw _privateConstructorUsedError;
|
||||
@JsonKey(ignore: true)
|
||||
|
||||
/// Create a copy of DHTRecordReport
|
||||
/// with the given fields replaced by the non-null parameter values.
|
||||
@JsonKey(includeFromJson: false, includeToJson: false)
|
||||
$DHTRecordReportCopyWith<DHTRecordReport> get copyWith =>
|
||||
throw _privateConstructorUsedError;
|
||||
}
|
||||
@ -1397,6 +1489,8 @@ class _$DHTRecordReportCopyWithImpl<$Res, $Val extends DHTRecordReport>
|
||||
// ignore: unused_field
|
||||
final $Res Function($Val) _then;
|
||||
|
||||
/// Create a copy of DHTRecordReport
|
||||
/// with the given fields replaced by the non-null parameter values.
|
||||
@pragma('vm:prefer-inline')
|
||||
@override
|
||||
$Res call({
|
||||
@ -1449,6 +1543,8 @@ class __$$DHTRecordReportImplCopyWithImpl<$Res>
|
||||
_$DHTRecordReportImpl _value, $Res Function(_$DHTRecordReportImpl) _then)
|
||||
: super(_value, _then);
|
||||
|
||||
/// Create a copy of DHTRecordReport
|
||||
/// with the given fields replaced by the non-null parameter values.
|
||||
@pragma('vm:prefer-inline')
|
||||
@override
|
||||
$Res call({
|
||||
@ -1545,7 +1641,7 @@ class _$DHTRecordReportImpl implements _DHTRecordReport {
|
||||
.equals(other._networkSeqs, _networkSeqs));
|
||||
}
|
||||
|
||||
@JsonKey(ignore: true)
|
||||
@JsonKey(includeFromJson: false, includeToJson: false)
|
||||
@override
|
||||
int get hashCode => Object.hash(
|
||||
runtimeType,
|
||||
@ -1554,7 +1650,9 @@ class _$DHTRecordReportImpl implements _DHTRecordReport {
|
||||
const DeepCollectionEquality().hash(_localSeqs),
|
||||
const DeepCollectionEquality().hash(_networkSeqs));
|
||||
|
||||
@JsonKey(ignore: true)
|
||||
/// Create a copy of DHTRecordReport
|
||||
/// with the given fields replaced by the non-null parameter values.
|
||||
@JsonKey(includeFromJson: false, includeToJson: false)
|
||||
@override
|
||||
@pragma('vm:prefer-inline')
|
||||
_$$DHTRecordReportImplCopyWith<_$DHTRecordReportImpl> get copyWith =>
|
||||
@ -1587,8 +1685,11 @@ abstract class _DHTRecordReport implements DHTRecordReport {
|
||||
List<int> get localSeqs;
|
||||
@override
|
||||
List<int> get networkSeqs;
|
||||
|
||||
/// Create a copy of DHTRecordReport
|
||||
/// with the given fields replaced by the non-null parameter values.
|
||||
@override
|
||||
@JsonKey(ignore: true)
|
||||
@JsonKey(includeFromJson: false, includeToJson: false)
|
||||
_$$DHTRecordReportImplCopyWith<_$DHTRecordReportImpl> get copyWith =>
|
||||
throw _privateConstructorUsedError;
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -87,17 +87,74 @@ class TransferStatsDownUp with _$TransferStatsDownUp {
|
||||
|
||||
////////////
|
||||
|
||||
@freezed
|
||||
class StateStats with _$StateStats {
|
||||
const factory StateStats({
|
||||
required TimestampDuration span,
|
||||
required TimestampDuration reliable,
|
||||
required TimestampDuration unreliable,
|
||||
required TimestampDuration dead,
|
||||
required TimestampDuration punished,
|
||||
required StateReasonStats reason,
|
||||
}) = _StateStats;
|
||||
|
||||
factory StateStats.fromJson(dynamic json) =>
|
||||
_$StateStatsFromJson(json as Map<String, dynamic>);
|
||||
}
|
||||
|
||||
////////////
|
||||
|
||||
@freezed
|
||||
class StateReasonStats with _$StateReasonStats {
|
||||
const factory StateReasonStats({
|
||||
required TimestampDuration canNotSend,
|
||||
required TimestampDuration tooManyLostAnswers,
|
||||
required TimestampDuration noPingResponse,
|
||||
required TimestampDuration failedToSend,
|
||||
required TimestampDuration lostAnswers,
|
||||
required TimestampDuration notSeenConsecutively,
|
||||
required TimestampDuration inUnreliablePingSpan,
|
||||
}) = _StateReasonStats;
|
||||
|
||||
factory StateReasonStats.fromJson(dynamic json) =>
|
||||
_$StateReasonStatsFromJson(json as Map<String, dynamic>);
|
||||
}
|
||||
|
||||
////////////
|
||||
|
||||
@freezed
|
||||
class AnswerStats with _$AnswerStats {
|
||||
const factory AnswerStats({
|
||||
required TimestampDuration span,
|
||||
required int questions,
|
||||
required int answers,
|
||||
required int lostAnswers,
|
||||
required int consecutiveAnswersMaximum,
|
||||
required int consecutiveAnswersAverage,
|
||||
required int consecutiveAnswersMinimum,
|
||||
required int consecutiveLostAnswersMaximum,
|
||||
required int consecutiveLostAnswersAverage,
|
||||
required int consecutiveLostAnswersMinimum,
|
||||
}) = _AnswerStats;
|
||||
|
||||
factory AnswerStats.fromJson(dynamic json) =>
|
||||
_$AnswerStatsFromJson(json as Map<String, dynamic>);
|
||||
}
|
||||
|
||||
////////////
|
||||
|
||||
@freezed
|
||||
class RPCStats with _$RPCStats {
|
||||
const factory RPCStats({
|
||||
required int messagesSent,
|
||||
required int messagesRcvd,
|
||||
required int questionsInFlight,
|
||||
required Timestamp? lastQuestion,
|
||||
required Timestamp? lastQuestionTs,
|
||||
required Timestamp? lastSeenTs,
|
||||
required Timestamp? firstConsecutiveSeenTs,
|
||||
required int recentLostAnswers,
|
||||
required int failedToSend,
|
||||
required AnswerStats answer,
|
||||
}) = _RPCStats;
|
||||
|
||||
factory RPCStats.fromJson(dynamic json) =>
|
||||
@ -112,6 +169,7 @@ class PeerStats with _$PeerStats {
|
||||
required Timestamp timeAdded,
|
||||
required RPCStats rpcStats,
|
||||
required TransferStatsDownUp transfer,
|
||||
required StateStats state,
|
||||
LatencyStats? latency,
|
||||
}) = _PeerStats;
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -50,14 +50,98 @@ Map<String, dynamic> _$$TransferStatsDownUpImplToJson(
|
||||
'up': instance.up.toJson(),
|
||||
};
|
||||
|
||||
_$StateStatsImpl _$$StateStatsImplFromJson(Map<String, dynamic> json) =>
|
||||
_$StateStatsImpl(
|
||||
span: TimestampDuration.fromJson(json['span']),
|
||||
reliable: TimestampDuration.fromJson(json['reliable']),
|
||||
unreliable: TimestampDuration.fromJson(json['unreliable']),
|
||||
dead: TimestampDuration.fromJson(json['dead']),
|
||||
punished: TimestampDuration.fromJson(json['punished']),
|
||||
reason: StateReasonStats.fromJson(json['reason']),
|
||||
);
|
||||
|
||||
Map<String, dynamic> _$$StateStatsImplToJson(_$StateStatsImpl instance) =>
|
||||
<String, dynamic>{
|
||||
'span': instance.span.toJson(),
|
||||
'reliable': instance.reliable.toJson(),
|
||||
'unreliable': instance.unreliable.toJson(),
|
||||
'dead': instance.dead.toJson(),
|
||||
'punished': instance.punished.toJson(),
|
||||
'reason': instance.reason.toJson(),
|
||||
};
|
||||
|
||||
_$StateReasonStatsImpl _$$StateReasonStatsImplFromJson(
|
||||
Map<String, dynamic> json) =>
|
||||
_$StateReasonStatsImpl(
|
||||
canNotSend: TimestampDuration.fromJson(json['can_not_send']),
|
||||
tooManyLostAnswers:
|
||||
TimestampDuration.fromJson(json['too_many_lost_answers']),
|
||||
noPingResponse: TimestampDuration.fromJson(json['no_ping_response']),
|
||||
failedToSend: TimestampDuration.fromJson(json['failed_to_send']),
|
||||
lostAnswers: TimestampDuration.fromJson(json['lost_answers']),
|
||||
notSeenConsecutively:
|
||||
TimestampDuration.fromJson(json['not_seen_consecutively']),
|
||||
inUnreliablePingSpan:
|
||||
TimestampDuration.fromJson(json['in_unreliable_ping_span']),
|
||||
);
|
||||
|
||||
Map<String, dynamic> _$$StateReasonStatsImplToJson(
|
||||
_$StateReasonStatsImpl instance) =>
|
||||
<String, dynamic>{
|
||||
'can_not_send': instance.canNotSend.toJson(),
|
||||
'too_many_lost_answers': instance.tooManyLostAnswers.toJson(),
|
||||
'no_ping_response': instance.noPingResponse.toJson(),
|
||||
'failed_to_send': instance.failedToSend.toJson(),
|
||||
'lost_answers': instance.lostAnswers.toJson(),
|
||||
'not_seen_consecutively': instance.notSeenConsecutively.toJson(),
|
||||
'in_unreliable_ping_span': instance.inUnreliablePingSpan.toJson(),
|
||||
};
|
||||
|
||||
_$AnswerStatsImpl _$$AnswerStatsImplFromJson(Map<String, dynamic> json) =>
|
||||
_$AnswerStatsImpl(
|
||||
span: TimestampDuration.fromJson(json['span']),
|
||||
questions: (json['questions'] as num).toInt(),
|
||||
answers: (json['answers'] as num).toInt(),
|
||||
lostAnswers: (json['lost_answers'] as num).toInt(),
|
||||
consecutiveAnswersMaximum:
|
||||
(json['consecutive_answers_maximum'] as num).toInt(),
|
||||
consecutiveAnswersAverage:
|
||||
(json['consecutive_answers_average'] as num).toInt(),
|
||||
consecutiveAnswersMinimum:
|
||||
(json['consecutive_answers_minimum'] as num).toInt(),
|
||||
consecutiveLostAnswersMaximum:
|
||||
(json['consecutive_lost_answers_maximum'] as num).toInt(),
|
||||
consecutiveLostAnswersAverage:
|
||||
(json['consecutive_lost_answers_average'] as num).toInt(),
|
||||
consecutiveLostAnswersMinimum:
|
||||
(json['consecutive_lost_answers_minimum'] as num).toInt(),
|
||||
);
|
||||
|
||||
Map<String, dynamic> _$$AnswerStatsImplToJson(_$AnswerStatsImpl instance) =>
|
||||
<String, dynamic>{
|
||||
'span': instance.span.toJson(),
|
||||
'questions': instance.questions,
|
||||
'answers': instance.answers,
|
||||
'lost_answers': instance.lostAnswers,
|
||||
'consecutive_answers_maximum': instance.consecutiveAnswersMaximum,
|
||||
'consecutive_answers_average': instance.consecutiveAnswersAverage,
|
||||
'consecutive_answers_minimum': instance.consecutiveAnswersMinimum,
|
||||
'consecutive_lost_answers_maximum':
|
||||
instance.consecutiveLostAnswersMaximum,
|
||||
'consecutive_lost_answers_average':
|
||||
instance.consecutiveLostAnswersAverage,
|
||||
'consecutive_lost_answers_minimum':
|
||||
instance.consecutiveLostAnswersMinimum,
|
||||
};
|
||||
|
||||
_$RPCStatsImpl _$$RPCStatsImplFromJson(Map<String, dynamic> json) =>
|
||||
_$RPCStatsImpl(
|
||||
messagesSent: (json['messages_sent'] as num).toInt(),
|
||||
messagesRcvd: (json['messages_rcvd'] as num).toInt(),
|
||||
questionsInFlight: (json['questions_in_flight'] as num).toInt(),
|
||||
lastQuestion: json['last_question'] == null
|
||||
lastQuestionTs: json['last_question_ts'] == null
|
||||
? null
|
||||
: Timestamp.fromJson(json['last_question']),
|
||||
: Timestamp.fromJson(json['last_question_ts']),
|
||||
lastSeenTs: json['last_seen_ts'] == null
|
||||
? null
|
||||
: Timestamp.fromJson(json['last_seen_ts']),
|
||||
@ -66,6 +150,7 @@ _$RPCStatsImpl _$$RPCStatsImplFromJson(Map<String, dynamic> json) =>
|
||||
: Timestamp.fromJson(json['first_consecutive_seen_ts']),
|
||||
recentLostAnswers: (json['recent_lost_answers'] as num).toInt(),
|
||||
failedToSend: (json['failed_to_send'] as num).toInt(),
|
||||
answer: AnswerStats.fromJson(json['answer']),
|
||||
);
|
||||
|
||||
Map<String, dynamic> _$$RPCStatsImplToJson(_$RPCStatsImpl instance) =>
|
||||
@ -73,11 +158,12 @@ Map<String, dynamic> _$$RPCStatsImplToJson(_$RPCStatsImpl instance) =>
|
||||
'messages_sent': instance.messagesSent,
|
||||
'messages_rcvd': instance.messagesRcvd,
|
||||
'questions_in_flight': instance.questionsInFlight,
|
||||
'last_question': instance.lastQuestion?.toJson(),
|
||||
'last_question_ts': instance.lastQuestionTs?.toJson(),
|
||||
'last_seen_ts': instance.lastSeenTs?.toJson(),
|
||||
'first_consecutive_seen_ts': instance.firstConsecutiveSeenTs?.toJson(),
|
||||
'recent_lost_answers': instance.recentLostAnswers,
|
||||
'failed_to_send': instance.failedToSend,
|
||||
'answer': instance.answer.toJson(),
|
||||
};
|
||||
|
||||
_$PeerStatsImpl _$$PeerStatsImplFromJson(Map<String, dynamic> json) =>
|
||||
@ -85,6 +171,7 @@ _$PeerStatsImpl _$$PeerStatsImplFromJson(Map<String, dynamic> json) =>
|
||||
timeAdded: Timestamp.fromJson(json['time_added']),
|
||||
rpcStats: RPCStats.fromJson(json['rpc_stats']),
|
||||
transfer: TransferStatsDownUp.fromJson(json['transfer']),
|
||||
state: StateStats.fromJson(json['state']),
|
||||
latency: json['latency'] == null
|
||||
? null
|
||||
: LatencyStats.fromJson(json['latency']),
|
||||
@ -95,6 +182,7 @@ Map<String, dynamic> _$$PeerStatsImplToJson(_$PeerStatsImpl instance) =>
|
||||
'time_added': instance.timeAdded.toJson(),
|
||||
'rpc_stats': instance.rpcStats.toJson(),
|
||||
'transfer': instance.transfer.toJson(),
|
||||
'state': instance.state.toJson(),
|
||||
'latency': instance.latency?.toJson(),
|
||||
};
|
||||
|
||||
|
@ -9,7 +9,7 @@ readme = "README.md"
|
||||
packages = [{ include = "veilid" }]
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.11"
|
||||
python = "^3.12.5"
|
||||
jsonschema = "^4.17.3"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
|
@ -395,13 +395,17 @@ async def test_dht_integration_writer_reader():
|
||||
print(f' {n}')
|
||||
|
||||
print('syncing records to the network')
|
||||
recleft = len(records)
|
||||
for desc0 in records:
|
||||
while True:
|
||||
rr = await rc0.inspect_dht_record(desc0.key, [])
|
||||
if len(rr.offline_subkeys) == 0:
|
||||
left = 0; [left := left + (x[1]-x[0]+1) for x in rr.offline_subkeys]
|
||||
if left == 0:
|
||||
await rc0.close_dht_record(desc0.key)
|
||||
break
|
||||
time.sleep(1)
|
||||
print(f' {recleft} records {left} subkeys left')
|
||||
time.sleep(0.1)
|
||||
recleft-=1
|
||||
|
||||
# read dht records on server 1
|
||||
print(f'reading {COUNT} records')
|
||||
@ -455,19 +459,31 @@ async def test_dht_write_read_local():
|
||||
|
||||
print(f' {n}')
|
||||
|
||||
print(f'syncing records to the network')
|
||||
print('syncing records to the network')
|
||||
|
||||
syncrecords = records.copy()
|
||||
while len(syncrecords) > 0:
|
||||
donerecords = set()
|
||||
subkeysleft = 0
|
||||
for desc0 in records:
|
||||
while True:
|
||||
rr = await rc0.inspect_dht_record(desc0.key, [])
|
||||
if len(rr.offline_subkeys) == 0:
|
||||
await rc0.close_dht_record(desc0.key)
|
||||
break
|
||||
time.sleep(0.1)
|
||||
left = 0; [left := left + (x[1]-x[0]+1) for x in rr.offline_subkeys]
|
||||
if left == 0:
|
||||
donerecords.add(desc0)
|
||||
else:
|
||||
subkeysleft += left
|
||||
syncrecords = [x for x in syncrecords if x not in donerecords]
|
||||
print(f' {len(syncrecords)} records {subkeysleft} subkeys left')
|
||||
time.sleep(1)
|
||||
|
||||
await api0.debug("record purge local")
|
||||
await api0.debug("record purge remote")
|
||||
|
||||
# read dht records on server 0
|
||||
print(f'reading {COUNT} records')
|
||||
n = 0
|
||||
for desc0 in records:
|
||||
await rc0.close_dht_record(desc0.key)
|
||||
desc1 = await rc0.open_dht_record(desc0.key)
|
||||
|
||||
vd0 = await rc0.get_dht_value(desc1.key, ValueSubkey(0), force_refresh=True)
|
||||
|
@ -2,6 +2,8 @@ import asyncio
|
||||
import importlib.resources as importlib_resources
|
||||
import json
|
||||
import os
|
||||
import traceback
|
||||
|
||||
from typing import Awaitable, Callable, Optional, Self
|
||||
|
||||
from jsonschema import exceptions, validators
|
||||
@ -49,6 +51,8 @@ from .types import (
|
||||
urlsafe_b64decode_no_pad,
|
||||
)
|
||||
|
||||
_STREAM_LIMIT = (65536 * 4)
|
||||
|
||||
##############################################################
|
||||
|
||||
|
||||
@ -139,17 +143,15 @@ class _JsonVeilidAPI(VeilidAPI):
|
||||
self.lock.release()
|
||||
# Cancel it
|
||||
handle_recv_messages_task.cancel()
|
||||
try:
|
||||
await handle_recv_messages_task
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
|
||||
self.done = True
|
||||
|
||||
@classmethod
|
||||
async def connect(
|
||||
cls, host: str, port: int, update_callback: Callable[[VeilidUpdate], Awaitable]
|
||||
) -> Self:
|
||||
reader, writer = await asyncio.open_connection(host, port)
|
||||
reader, writer = await asyncio.open_connection(host, port, limit=_STREAM_LIMIT)
|
||||
veilid_api = cls(reader, writer, update_callback)
|
||||
veilid_api.handle_recv_messages_task = asyncio.create_task(
|
||||
veilid_api.handle_recv_messages(), name="JsonVeilidAPI.handle_recv_messages"
|
||||
@ -173,9 +175,9 @@ class _JsonVeilidAPI(VeilidAPI):
|
||||
lambda: protocol, path, **kwds)
|
||||
writer = asyncio.StreamWriter(transport, protocol, reader, loop)
|
||||
return reader, writer
|
||||
reader, writer = await open_windows_pipe(ipc_path)
|
||||
reader, writer = await open_windows_pipe(ipc_path, limit=_STREAM_LIMIT)
|
||||
else:
|
||||
reader, writer = await asyncio.open_unix_connection(ipc_path)
|
||||
reader, writer = await asyncio.open_unix_connection(ipc_path, limit=_STREAM_LIMIT)
|
||||
|
||||
veilid_api = cls(reader, writer, update_callback)
|
||||
veilid_api.handle_recv_messages_task = asyncio.create_task(
|
||||
@ -211,12 +213,15 @@ class _JsonVeilidAPI(VeilidAPI):
|
||||
|
||||
if self.validate_schema:
|
||||
_schema_validate(_VALIDATOR_RECV_MESSAGE, j)
|
||||
|
||||
# Process the message
|
||||
if j["type"] == "Response":
|
||||
await self.handle_recv_message_response(j)
|
||||
elif j["type"] == "Update":
|
||||
await self.update_callback(VeilidUpdate.from_json(j))
|
||||
except ValueError:
|
||||
pass
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
finally:
|
||||
await self._cleanup_close()
|
||||
|
||||
@ -236,6 +241,7 @@ class _JsonVeilidAPI(VeilidAPI):
|
||||
try:
|
||||
reqfuture = self.in_flight_requests.pop(id, None)
|
||||
if reqfuture is not None:
|
||||
print("ass")
|
||||
reqfuture.cancel()
|
||||
finally:
|
||||
self.lock.release()
|
||||
@ -267,6 +273,9 @@ class _JsonVeilidAPI(VeilidAPI):
|
||||
id = self.next_id
|
||||
self.next_id += 1
|
||||
writer = self.writer
|
||||
|
||||
if self.writer is None:
|
||||
return
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
|
@ -2797,6 +2797,82 @@
|
||||
}
|
||||
],
|
||||
"definitions": {
|
||||
"AnswerStats": {
|
||||
"description": "Measurement of round-trip RPC question/answer performance",
|
||||
"type": "object",
|
||||
"required": [
|
||||
"answers",
|
||||
"consecutive_answers_average",
|
||||
"consecutive_answers_maximum",
|
||||
"consecutive_answers_minimum",
|
||||
"consecutive_lost_answers_average",
|
||||
"consecutive_lost_answers_maximum",
|
||||
"consecutive_lost_answers_minimum",
|
||||
"lost_answers",
|
||||
"questions",
|
||||
"span"
|
||||
],
|
||||
"properties": {
|
||||
"answers": {
|
||||
"description": "number of answers received in this span",
|
||||
"type": "integer",
|
||||
"format": "uint32",
|
||||
"minimum": 0.0
|
||||
},
|
||||
"consecutive_answers_average": {
|
||||
"description": "average number of received answers before a lost answer in this span",
|
||||
"type": "integer",
|
||||
"format": "uint32",
|
||||
"minimum": 0.0
|
||||
},
|
||||
"consecutive_answers_maximum": {
|
||||
"description": "maximum number of received answers before a lost answer in this span",
|
||||
"type": "integer",
|
||||
"format": "uint32",
|
||||
"minimum": 0.0
|
||||
},
|
||||
"consecutive_answers_minimum": {
|
||||
"description": "minimum number of received answers before a lost answer in this span",
|
||||
"type": "integer",
|
||||
"format": "uint32",
|
||||
"minimum": 0.0
|
||||
},
|
||||
"consecutive_lost_answers_average": {
|
||||
"description": "average number of timeouts before a received answer in this span",
|
||||
"type": "integer",
|
||||
"format": "uint32",
|
||||
"minimum": 0.0
|
||||
},
|
||||
"consecutive_lost_answers_maximum": {
|
||||
"description": "maximum number of timeouts before a received answer in this span",
|
||||
"type": "integer",
|
||||
"format": "uint32",
|
||||
"minimum": 0.0
|
||||
},
|
||||
"consecutive_lost_answers_minimum": {
|
||||
"description": "minimum number of timeouts before a received answer in this span",
|
||||
"type": "integer",
|
||||
"format": "uint32",
|
||||
"minimum": 0.0
|
||||
},
|
||||
"lost_answers": {
|
||||
"description": "number of lost answers in this span",
|
||||
"type": "integer",
|
||||
"format": "uint32",
|
||||
"minimum": 0.0
|
||||
},
|
||||
"questions": {
|
||||
"description": "number of questions sent in this span",
|
||||
"type": "integer",
|
||||
"format": "uint32",
|
||||
"minimum": 0.0
|
||||
},
|
||||
"span": {
|
||||
"description": "total amount of time measured",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"AttachmentState": {
|
||||
"description": "Attachment abstraction for network 'signal strength'.",
|
||||
"type": "string",
|
||||
@ -3006,6 +3082,7 @@
|
||||
"minItems": 4
|
||||
},
|
||||
"LatencyStats": {
|
||||
"description": "Measurement of communications latency to this node over all RPC questions",
|
||||
"type": "object",
|
||||
"required": [
|
||||
"average",
|
||||
@ -3014,12 +3091,15 @@
|
||||
],
|
||||
"properties": {
|
||||
"average": {
|
||||
"description": "average latency over the ROLLING_LATENCIES_SIZE last latencies",
|
||||
"type": "string"
|
||||
},
|
||||
"fastest": {
|
||||
"description": "fastest latency in the ROLLING_LATENCIES_SIZE last latencies",
|
||||
"type": "string"
|
||||
},
|
||||
"slowest": {
|
||||
"description": "slowest latency in the ROLLING_LATENCIES_SIZE last latencies",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
@ -3040,14 +3120,15 @@
|
||||
}
|
||||
},
|
||||
"PeerStats": {
|
||||
"description": "Statistics for a peer in the routing table",
|
||||
"type": "object",
|
||||
"required": [
|
||||
"rpc_stats",
|
||||
"time_added",
|
||||
"transfer"
|
||||
"time_added"
|
||||
],
|
||||
"properties": {
|
||||
"latency": {
|
||||
"description": "latency stats for this peer",
|
||||
"default": null,
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/LatencyStats"
|
||||
@ -3058,14 +3139,85 @@
|
||||
]
|
||||
},
|
||||
"rpc_stats": {
|
||||
"description": "information about RPCs",
|
||||
"default": {
|
||||
"answer": {
|
||||
"answers": 0,
|
||||
"consecutive_answers_average": 0,
|
||||
"consecutive_answers_maximum": 0,
|
||||
"consecutive_answers_minimum": 0,
|
||||
"consecutive_lost_answers_average": 0,
|
||||
"consecutive_lost_answers_maximum": 0,
|
||||
"consecutive_lost_answers_minimum": 0,
|
||||
"lost_answers": 0,
|
||||
"questions": 0,
|
||||
"span": "0"
|
||||
},
|
||||
"failed_to_send": 0,
|
||||
"first_consecutive_seen_ts": null,
|
||||
"last_question_ts": null,
|
||||
"last_seen_ts": null,
|
||||
"messages_rcvd": 0,
|
||||
"messages_sent": 0,
|
||||
"questions_in_flight": 0,
|
||||
"recent_lost_answers": 0
|
||||
},
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/RPCStats"
|
||||
}
|
||||
]
|
||||
},
|
||||
"state": {
|
||||
"description": "state stats for this peer",
|
||||
"default": {
|
||||
"dead": "0",
|
||||
"punished": "0",
|
||||
"reason": {
|
||||
"can_not_send": "0",
|
||||
"failed_to_send": "0",
|
||||
"in_unreliable_ping_span": "0",
|
||||
"lost_answers": "0",
|
||||
"no_ping_response": "0",
|
||||
"not_seen_consecutively": "0",
|
||||
"too_many_lost_answers": "0"
|
||||
},
|
||||
"reliable": "0",
|
||||
"span": "0",
|
||||
"unreliable": "0"
|
||||
},
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/StateStats"
|
||||
}
|
||||
]
|
||||
},
|
||||
"time_added": {
|
||||
"description": "when the peer was added to the routing table",
|
||||
"type": "string"
|
||||
},
|
||||
"transfer": {
|
||||
"description": "transfer stats for this peer",
|
||||
"default": {
|
||||
"down": {
|
||||
"average": "0",
|
||||
"maximum": "0",
|
||||
"minimum": "0",
|
||||
"total": "0"
|
||||
},
|
||||
"up": {
|
||||
"average": "0",
|
||||
"maximum": "0",
|
||||
"minimum": "0",
|
||||
"total": "0"
|
||||
}
|
||||
},
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/TransferStatsDownUp"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"PeerTableData": {
|
||||
@ -3099,6 +3251,7 @@
|
||||
}
|
||||
},
|
||||
"RPCStats": {
|
||||
"description": "Statistics for RPC operations performed on a node",
|
||||
"type": "object",
|
||||
"required": [
|
||||
"failed_to_send",
|
||||
@ -3108,45 +3261,73 @@
|
||||
"recent_lost_answers"
|
||||
],
|
||||
"properties": {
|
||||
"answer": {
|
||||
"description": "rpc answer stats for this peer",
|
||||
"default": {
|
||||
"answers": 0,
|
||||
"consecutive_answers_average": 0,
|
||||
"consecutive_answers_maximum": 0,
|
||||
"consecutive_answers_minimum": 0,
|
||||
"consecutive_lost_answers_average": 0,
|
||||
"consecutive_lost_answers_maximum": 0,
|
||||
"consecutive_lost_answers_minimum": 0,
|
||||
"lost_answers": 0,
|
||||
"questions": 0,
|
||||
"span": "0"
|
||||
},
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/AnswerStats"
|
||||
}
|
||||
]
|
||||
},
|
||||
"failed_to_send": {
|
||||
"description": "number of messages that have failed to send or connections dropped since we last successfully sent one",
|
||||
"type": "integer",
|
||||
"format": "uint32",
|
||||
"minimum": 0.0
|
||||
},
|
||||
"first_consecutive_seen_ts": {
|
||||
"description": "the timestamp of the first consecutive proof-of-life for this node (an answer or received question)",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"last_question_ts": {
|
||||
"description": "when the peer was last questioned (either successfully or not) and we wanted an answer",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"last_seen_ts": {
|
||||
"description": "when the peer was last seen for any reason, including when we first attempted to reach out to it",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"messages_rcvd": {
|
||||
"description": "number of rpcs that have been received in the total entry time range",
|
||||
"type": "integer",
|
||||
"format": "uint32",
|
||||
"minimum": 0.0
|
||||
},
|
||||
"messages_sent": {
|
||||
"description": "number of rpcs that have been sent in the total entry time range",
|
||||
"type": "integer",
|
||||
"format": "uint32",
|
||||
"minimum": 0.0
|
||||
},
|
||||
"questions_in_flight": {
|
||||
"description": "number of questions issued that have yet to be answered",
|
||||
"type": "integer",
|
||||
"format": "uint32",
|
||||
"minimum": 0.0
|
||||
},
|
||||
"recent_lost_answers": {
|
||||
"description": "number of answers that have been lost consecutively",
|
||||
"type": "integer",
|
||||
"format": "uint32",
|
||||
"minimum": 0.0
|
||||
@ -3239,7 +3420,101 @@
|
||||
"Reliable"
|
||||
]
|
||||
},
|
||||
"StateReasonStats": {
|
||||
"description": "Measurement of what state reasons the node has been in over a time span",
|
||||
"type": "object",
|
||||
"required": [
|
||||
"can_not_send",
|
||||
"failed_to_send",
|
||||
"in_unreliable_ping_span",
|
||||
"lost_answers",
|
||||
"no_ping_response",
|
||||
"not_seen_consecutively",
|
||||
"too_many_lost_answers"
|
||||
],
|
||||
"properties": {
|
||||
"can_not_send": {
|
||||
"description": "time spent dead due to being unable to send",
|
||||
"type": "string"
|
||||
},
|
||||
"failed_to_send": {
|
||||
"description": "time spent unreliable because of failures to send",
|
||||
"type": "string"
|
||||
},
|
||||
"in_unreliable_ping_span": {
|
||||
"description": "time spent unreliable because we are in the unreliable ping span",
|
||||
"type": "string"
|
||||
},
|
||||
"lost_answers": {
|
||||
"description": "time spent unreliable because of lost answers",
|
||||
"type": "string"
|
||||
},
|
||||
"no_ping_response": {
|
||||
"description": "time spent dead because of no ping response",
|
||||
"type": "string"
|
||||
},
|
||||
"not_seen_consecutively": {
|
||||
"description": "time spent unreliable because of not being seen consecutively",
|
||||
"type": "string"
|
||||
},
|
||||
"too_many_lost_answers": {
|
||||
"description": "time spent dead because of too many lost answers",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"StateStats": {
|
||||
"description": "Measurement of what states the node has been in over a time span",
|
||||
"type": "object",
|
||||
"required": [
|
||||
"dead",
|
||||
"punished",
|
||||
"reliable",
|
||||
"span",
|
||||
"unreliable"
|
||||
],
|
||||
"properties": {
|
||||
"dead": {
|
||||
"description": "amount of time spent in a dead state",
|
||||
"type": "string"
|
||||
},
|
||||
"punished": {
|
||||
"description": "amount of time spent in a punished state",
|
||||
"type": "string"
|
||||
},
|
||||
"reason": {
|
||||
"description": "state reason stats for this peer",
|
||||
"default": {
|
||||
"can_not_send": "0",
|
||||
"failed_to_send": "0",
|
||||
"in_unreliable_ping_span": "0",
|
||||
"lost_answers": "0",
|
||||
"no_ping_response": "0",
|
||||
"not_seen_consecutively": "0",
|
||||
"too_many_lost_answers": "0"
|
||||
},
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/StateReasonStats"
|
||||
}
|
||||
]
|
||||
},
|
||||
"reliable": {
|
||||
"description": "amount of time spent in a reliable state",
|
||||
"type": "string"
|
||||
},
|
||||
"span": {
|
||||
"description": "total amount of time measured",
|
||||
"type": "string"
|
||||
},
|
||||
"unreliable": {
|
||||
"description": "amount of time spent in an unreliable state",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"TransferStats": {
|
||||
"description": "Measurement of how much data has transferred to or from this node over a time span",
|
||||
"type": "object",
|
||||
"required": [
|
||||
"average",
|
||||
@ -3249,20 +3524,25 @@
|
||||
],
|
||||
"properties": {
|
||||
"average": {
|
||||
"description": "average rate over the ROLLING_TRANSFERS_SIZE last amounts",
|
||||
"type": "string"
|
||||
},
|
||||
"maximum": {
|
||||
"description": "maximum rate over the ROLLING_TRANSFERS_SIZE last amounts",
|
||||
"type": "string"
|
||||
},
|
||||
"minimum": {
|
||||
"description": "minimum rate over the ROLLING_TRANSFERS_SIZE last amounts",
|
||||
"type": "string"
|
||||
},
|
||||
"total": {
|
||||
"description": "total amount transferred ever",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"TransferStatsDownUp": {
|
||||
"description": "Transfer statistics from a node to our own (down) and",
|
||||
"type": "object",
|
||||
"required": [
|
||||
"down",
|
||||
@ -3603,7 +3883,7 @@
|
||||
}
|
||||
},
|
||||
"VeilidConfigDHT": {
|
||||
"description": "Configure the Distributed Hash Table (DHT).",
|
||||
"description": "Configure the Distributed Hash Table (DHT). Defaults should be used here unless you are absolutely sure you know what you're doing. If you change the count/fanout/timeout parameters, you may render your node inoperable for correct DHT operations.",
|
||||
"type": "object",
|
||||
"required": [
|
||||
"get_value_count",
|
||||
@ -3795,6 +4075,7 @@
|
||||
}
|
||||
},
|
||||
"VeilidConfigInner": {
|
||||
"description": "Top level of the Veilid configuration tree",
|
||||
"type": "object",
|
||||
"required": [
|
||||
"block_store",
|
||||
@ -3807,26 +4088,53 @@
|
||||
],
|
||||
"properties": {
|
||||
"block_store": {
|
||||
"description": "Configuring the block store (storage of large content-addressable content)",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/VeilidConfigBlockStore"
|
||||
}
|
||||
]
|
||||
},
|
||||
"capabilities": {
|
||||
"description": "Capabilities to enable for your application/node",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/VeilidConfigCapabilities"
|
||||
}
|
||||
]
|
||||
},
|
||||
"namespace": {
|
||||
"description": "To run multiple Veilid nodes within the same application, either through a single process running api_startup/api_startup_json multiple times, or your application running mulitple times side-by-side there needs to be a key used to partition the application's storage (in the TableStore, ProtectedStore, etc). An empty value here is the default, but if you run multiple veilid nodes concurrently, you should set this to a string that uniquely identifies this -instance- within the same 'program_name'. Must be a valid filename for all Veilid-capable systems, which means no backslashes or forward slashes in the name. Stick to a-z,0-9,_ and space and you should be fine.",
|
||||
"type": "string"
|
||||
},
|
||||
"network": {
|
||||
"description": "Configuring how Veilid interacts with the low level network",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/VeilidConfigNetwork"
|
||||
}
|
||||
]
|
||||
},
|
||||
"program_name": {
|
||||
"description": "An identifier used to describe the program using veilid-core. Used to partition storage locations in places like the ProtectedStore. Must be non-empty and a valid filename for all Veilid-capable systems, which means no backslashes or forward slashes in the name. Stick to a-z,0-9,_ and space and you should be fine.\n\nCaution: If you change this string, there is no migration support. Your app's protected store and table store will very likely experience data loss. Pick a program name and stick with it. This is not a 'visible' identifier and it should uniquely identify your application.",
|
||||
"type": "string"
|
||||
},
|
||||
"protected_store": {
|
||||
"description": "Configuring the protected store (keychain/keyring/etc)",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/VeilidConfigProtectedStore"
|
||||
}
|
||||
]
|
||||
},
|
||||
"table_store": {
|
||||
"description": "Configuring the table store (persistent encrypted database)",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/VeilidConfigTableStore"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"VeilidConfigNetwork": {
|
||||
|
@ -52,6 +52,60 @@ class VeilidStateAttachment:
|
||||
)
|
||||
|
||||
|
||||
|
||||
class AnswerStats:
|
||||
span: TimestampDuration
|
||||
questions: int
|
||||
answers: int
|
||||
lost_answers: int
|
||||
consecutive_answers_maximum: int
|
||||
consecutive_answers_average: int
|
||||
consecutive_answers_minimum: int
|
||||
consecutive_lost_answers_maximum: int
|
||||
consecutive_lost_answers_average: int
|
||||
consecutive_lost_answers_minimum: int
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
span: TimestampDuration,
|
||||
questions: int,
|
||||
answers: int,
|
||||
lost_answers: int,
|
||||
consecutive_answers_maximum: int,
|
||||
consecutive_answers_average: int,
|
||||
consecutive_answers_minimum: int,
|
||||
consecutive_lost_answers_maximum: int,
|
||||
consecutive_lost_answers_average: int,
|
||||
consecutive_lost_answers_minimum: int,
|
||||
):
|
||||
self.span = span
|
||||
self.questions = questions
|
||||
self.answers = answers
|
||||
self.lost_answers = lost_answers
|
||||
self.consecutive_answers_maximum = consecutive_answers_maximum
|
||||
self.consecutive_answers_average = consecutive_answers_average
|
||||
self.consecutive_answers_minimum = consecutive_answers_minimum
|
||||
self.consecutive_lost_answers_maximum = consecutive_lost_answers_maximum
|
||||
self.consecutive_lost_answers_average = consecutive_lost_answers_average
|
||||
self.consecutive_lost_answers_minimum = consecutive_lost_answers_minimum
|
||||
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, j: dict) -> Self:
|
||||
"""JSON object hook"""
|
||||
return cls(
|
||||
j["span"],
|
||||
j["questions"],
|
||||
j["answers"],
|
||||
j["lost_answers"],
|
||||
j["consecutive_answers_maximum"],
|
||||
j["consecutive_answers_average"],
|
||||
j["consecutive_answers_minimum"],
|
||||
j["consecutive_lost_answers_maximum"],
|
||||
j["consecutive_lost_answers_average"],
|
||||
j["consecutive_lost_answers_minimum"],
|
||||
)
|
||||
|
||||
class RPCStats:
|
||||
messages_sent: int
|
||||
messages_rcvd: int
|
||||
@ -61,6 +115,7 @@ class RPCStats:
|
||||
first_consecutive_seen_ts: Optional[Timestamp]
|
||||
recent_lost_answers: int
|
||||
failed_to_send: int
|
||||
answer: AnswerStats
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@ -72,6 +127,7 @@ class RPCStats:
|
||||
first_consecutive_seen_ts: Optional[Timestamp],
|
||||
recent_lost_answers: int,
|
||||
failed_to_send: int,
|
||||
answer: AnswerStats,
|
||||
):
|
||||
self.messages_sent = messages_sent
|
||||
self.messages_rcvd = messages_rcvd
|
||||
@ -81,6 +137,7 @@ class RPCStats:
|
||||
self.first_consecutive_seen_ts = first_consecutive_seen_ts
|
||||
self.recent_lost_answers = recent_lost_answers
|
||||
self.failed_to_send = failed_to_send
|
||||
self.answer = answer
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, j: dict) -> Self:
|
||||
@ -96,6 +153,7 @@ class RPCStats:
|
||||
else Timestamp(j["first_consecutive_seen_ts"]),
|
||||
j["recent_lost_answers"],
|
||||
j["failed_to_send"],
|
||||
AnswerStats.from_json(j["answer"]),
|
||||
)
|
||||
|
||||
|
||||
@ -166,12 +224,89 @@ class TransferStatsDownUp:
|
||||
"""JSON object hook"""
|
||||
return cls(TransferStats.from_json(j["down"]), TransferStats.from_json(j["up"]))
|
||||
|
||||
class StateReasonStats:
|
||||
can_not_send: TimestampDuration
|
||||
too_many_lost_answers: TimestampDuration
|
||||
no_ping_response: TimestampDuration
|
||||
failed_to_send: TimestampDuration
|
||||
lost_answers: TimestampDuration
|
||||
not_seen_consecutively: TimestampDuration
|
||||
in_unreliable_ping_span: TimestampDuration
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
can_not_send: TimestampDuration,
|
||||
too_many_lost_answers: TimestampDuration,
|
||||
no_ping_response: TimestampDuration,
|
||||
failed_to_send: TimestampDuration,
|
||||
lost_answers: TimestampDuration,
|
||||
not_seen_consecutively: TimestampDuration,
|
||||
in_unreliable_ping_span: TimestampDuration,
|
||||
):
|
||||
self.can_not_send = can_not_send
|
||||
self.too_many_lost_answers = too_many_lost_answers
|
||||
self.no_ping_response = no_ping_response
|
||||
self.failed_to_send = failed_to_send
|
||||
self.lost_answers = lost_answers
|
||||
self.not_seen_consecutively = not_seen_consecutively
|
||||
self.in_unreliable_ping_span = in_unreliable_ping_span
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, j: dict) -> Self:
|
||||
"""JSON object hook"""
|
||||
return cls(
|
||||
j["can_not_send"],
|
||||
j["too_many_lost_answers"],
|
||||
j["no_ping_response"],
|
||||
j["failed_to_send"],
|
||||
j["lost_answers"],
|
||||
j["not_seen_consecutively"],
|
||||
j["in_unreliable_ping_span"],
|
||||
)
|
||||
|
||||
class StateStats:
|
||||
span: TimestampDuration
|
||||
reliable: TimestampDuration
|
||||
unreliable: TimestampDuration
|
||||
dead: TimestampDuration
|
||||
punished: TimestampDuration
|
||||
reason: StateReasonStats
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
span: TimestampDuration,
|
||||
reliable: TimestampDuration,
|
||||
unreliable: TimestampDuration,
|
||||
dead: TimestampDuration,
|
||||
punished: TimestampDuration,
|
||||
reason: StateReasonStats,
|
||||
):
|
||||
self.span = span
|
||||
self.reliable = reliable
|
||||
self.unreliable = unreliable
|
||||
self.dead = dead
|
||||
self.punished = punished
|
||||
self.reason = reason
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, j: dict) -> Self:
|
||||
"""JSON object hook"""
|
||||
return cls(
|
||||
j["span"],
|
||||
j["reliable"],
|
||||
j["unreliable"],
|
||||
j["dead"],
|
||||
j["punished"],
|
||||
StateReasonStats.from_json(j["reason"]),
|
||||
)
|
||||
|
||||
|
||||
class PeerStats:
|
||||
time_added: Timestamp
|
||||
rpc_stats: RPCStats
|
||||
latency: Optional[LatencyStats]
|
||||
transfer: TransferStatsDownUp
|
||||
state: StateStats
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@ -179,11 +314,13 @@ class PeerStats:
|
||||
rpc_stats: RPCStats,
|
||||
latency: Optional[LatencyStats],
|
||||
transfer: TransferStatsDownUp,
|
||||
state: StateStats,
|
||||
):
|
||||
self.time_added = time_added
|
||||
self.rpc_stats = rpc_stats
|
||||
self.latency = latency
|
||||
self.transfer = transfer
|
||||
self.state = state
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, j: dict) -> Self:
|
||||
@ -193,6 +330,7 @@ class PeerStats:
|
||||
RPCStats.from_json(j["rpc_stats"]),
|
||||
None if j["latency"] is None else LatencyStats.from_json(j["latency"]),
|
||||
TransferStatsDownUp.from_json(j["transfer"]),
|
||||
StateStats.from_json(j["state"]),
|
||||
)
|
||||
|
||||
|
||||
|
@ -133,7 +133,13 @@ impl ClientApi {
|
||||
// Make wait group for all incoming connections
|
||||
let awg = AsyncWaitGroup::new();
|
||||
|
||||
let stop_token = self.inner.lock().stop.as_ref().unwrap().token();
|
||||
let stop_token = match self.inner.lock().stop.as_ref() {
|
||||
Some(stop) => stop.token(),
|
||||
None => {
|
||||
debug!(target: "client_api", "Already stopped");
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
while let Ok(Some(stream_result)) =
|
||||
incoming_stream.next().timeout_at(stop_token.clone()).await
|
||||
{
|
||||
@ -174,7 +180,13 @@ impl ClientApi {
|
||||
// Make wait group for all incoming connections
|
||||
let awg = AsyncWaitGroup::new();
|
||||
|
||||
let stop_token = self.inner.lock().stop.as_ref().unwrap().token();
|
||||
let stop_token = match self.inner.lock().stop.as_ref() {
|
||||
Some(stop) => stop.token(),
|
||||
None => {
|
||||
debug!(target: "client_api", "Already stopped");
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
while let Ok(Some(stream_result)) =
|
||||
incoming_stream.next().timeout_at(stop_token.clone()).await
|
||||
{
|
||||
|
@ -53,7 +53,7 @@ pub struct CmdlineArgs {
|
||||
foreground: bool,
|
||||
|
||||
/// Specify a configuration file to use
|
||||
#[arg(short, long, value_name = "FILE", default_value = OsString::from(Settings::get_default_config_path()))]
|
||||
#[arg(short, long, value_name = "FILE", default_value = OsString::from(Settings::get_default_veilid_server_conf_path()))]
|
||||
config_file: Option<OsString>,
|
||||
|
||||
/// Specify configuration value to set (key in dot format, value in json format), eg: logging.api.enabled=true
|
||||
|
@ -196,23 +196,23 @@ core:
|
||||
)
|
||||
.replace(
|
||||
"%TABLE_STORE_DIRECTORY%",
|
||||
&VeilidConfigTableStore::default().directory,
|
||||
&Settings::get_default_table_store_directory().to_string_lossy(),
|
||||
)
|
||||
.replace(
|
||||
"%BLOCK_STORE_DIRECTORY%",
|
||||
&VeilidConfigBlockStore::default().directory,
|
||||
&Settings::get_default_block_store_directory().to_string_lossy(),
|
||||
)
|
||||
.replace(
|
||||
"%DIRECTORY%",
|
||||
&VeilidConfigProtectedStore::default().directory,
|
||||
&Settings::get_default_protected_store_directory().to_string_lossy(),
|
||||
)
|
||||
.replace(
|
||||
"%CERTIFICATE_PATH%",
|
||||
&VeilidConfigTLS::default().certificate_path,
|
||||
&Settings::get_default_tls_certificate_path().to_string_lossy(),
|
||||
)
|
||||
.replace(
|
||||
"%PRIVATE_KEY_PATH%",
|
||||
&VeilidConfigTLS::default().private_key_path,
|
||||
&Settings::get_default_tls_private_key_path().to_string_lossy(),
|
||||
)
|
||||
.replace(
|
||||
"%REMOTE_MAX_SUBKEY_CACHE_MEMORY_MB%",
|
||||
@ -860,18 +860,23 @@ impl Settings {
|
||||
/// `C:\Users\<user>\AppData\Roaming\Veilid\Veilid`, and for macOS, at
|
||||
/// `/Users/<user>/Library/Application Support/org.Veilid.Veilid`
|
||||
///
|
||||
pub fn get_default_config_path() -> PathBuf {
|
||||
pub fn get_default_config_path(subpath: &str) -> PathBuf {
|
||||
#[cfg(unix)]
|
||||
{
|
||||
let default_path = PathBuf::from("/etc/veilid-server/veilid-server.conf");
|
||||
if default_path.exists() {
|
||||
return default_path;
|
||||
let globalpath = PathBuf::from("/var/db/veilid-server");
|
||||
|
||||
if globalpath.exists() {
|
||||
return globalpath.join(subpath);
|
||||
}
|
||||
}
|
||||
|
||||
ProjectDirs::from("org", "Veilid", "Veilid")
|
||||
.map(|dirs| dirs.config_dir().join("veilid-server.conf"))
|
||||
.unwrap_or_else(|| PathBuf::from("./veilid-server.conf"))
|
||||
let mut ts_path = if let Some(my_proj_dirs) = ProjectDirs::from("org", "Veilid", "Veilid") {
|
||||
PathBuf::from(my_proj_dirs.config_dir())
|
||||
} else {
|
||||
PathBuf::from("./")
|
||||
};
|
||||
ts_path.push(subpath);
|
||||
ts_path
|
||||
}
|
||||
|
||||
/// Determine default flamegraph output path
|
||||
@ -935,6 +940,25 @@ impl Settings {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_default_veilid_server_conf_path() -> PathBuf {
|
||||
Settings::get_default_config_path("veilid-server.conf")
|
||||
}
|
||||
pub fn get_default_table_store_directory() -> PathBuf {
|
||||
Settings::get_default_directory("table_store")
|
||||
}
|
||||
pub fn get_default_block_store_directory() -> PathBuf {
|
||||
Settings::get_default_directory("block_store")
|
||||
}
|
||||
pub fn get_default_protected_store_directory() -> PathBuf {
|
||||
Settings::get_default_directory("protected_store")
|
||||
}
|
||||
pub fn get_default_tls_certificate_path() -> PathBuf {
|
||||
Settings::get_default_config_path("ssl/certs/server.crt")
|
||||
}
|
||||
pub fn get_default_tls_private_key_path() -> PathBuf {
|
||||
Settings::get_default_config_path("ssl/keys/server.key")
|
||||
}
|
||||
|
||||
pub fn get_default_remote_max_subkey_cache_memory_mb() -> u32 {
|
||||
if sysinfo::IS_SUPPORTED_SYSTEM {
|
||||
((SYSTEM.free_memory() / (1024u64 * 1024u64)) / 16) as u32
|
||||
@ -1594,13 +1618,17 @@ mod tests {
|
||||
|
||||
assert_eq!(
|
||||
s.core.table_store.directory,
|
||||
VeilidConfigTableStore::default().directory,
|
||||
Settings::get_default_table_store_directory()
|
||||
.to_string_lossy()
|
||||
.to_string()
|
||||
);
|
||||
assert!(!s.core.table_store.delete);
|
||||
|
||||
assert_eq!(
|
||||
s.core.block_store.directory,
|
||||
VeilidConfigBlockStore::default().directory,
|
||||
Settings::get_default_block_store_directory()
|
||||
.to_string_lossy()
|
||||
.to_string()
|
||||
);
|
||||
assert!(!s.core.block_store.delete);
|
||||
|
||||
@ -1608,7 +1636,9 @@ mod tests {
|
||||
assert!(s.core.protected_store.always_use_insecure_storage);
|
||||
assert_eq!(
|
||||
s.core.protected_store.directory,
|
||||
VeilidConfigProtectedStore::default().directory
|
||||
Settings::get_default_protected_store_directory()
|
||||
.to_string_lossy()
|
||||
.to_string()
|
||||
);
|
||||
assert!(!s.core.protected_store.delete);
|
||||
assert_eq!(s.core.protected_store.device_encryption_key_password, "");
|
||||
@ -1669,11 +1699,15 @@ mod tests {
|
||||
//
|
||||
assert_eq!(
|
||||
s.core.network.tls.certificate_path,
|
||||
VeilidConfigTLS::default().certificate_path
|
||||
Settings::get_default_tls_certificate_path()
|
||||
.to_string_lossy()
|
||||
.to_string()
|
||||
);
|
||||
assert_eq!(
|
||||
s.core.network.tls.private_key_path,
|
||||
VeilidConfigTLS::default().private_key_path
|
||||
Settings::get_default_tls_private_key_path()
|
||||
.to_string_lossy()
|
||||
.to_string()
|
||||
);
|
||||
assert_eq!(s.core.network.tls.connection_initial_timeout_ms, 2_000u32);
|
||||
//
|
||||
|
@ -9,6 +9,7 @@ use super::*;
|
||||
|
||||
/// Background processor for streams
|
||||
/// Handles streams to completion, passing each item from the stream to a callback
|
||||
#[derive(Debug)]
|
||||
pub struct DeferredStreamProcessor {
|
||||
pub opt_deferred_stream_channel: Option<flume::Sender<SendPinBoxFuture<()>>>,
|
||||
pub opt_stopper: Option<StopSource>,
|
||||
@ -98,9 +99,9 @@ impl DeferredStreamProcessor {
|
||||
/// * 'handler' is the callback to handle each item from the stream
|
||||
///
|
||||
/// Returns 'true' if the stream was added for processing, and 'false' if the stream could not be added, possibly due to not being initialized.
|
||||
pub fn add<T: Send + 'static>(
|
||||
pub fn add<T: Send + 'static, S: futures_util::Stream<Item = T> + Unpin + Send + 'static>(
|
||||
&mut self,
|
||||
receiver: flume::Receiver<T>,
|
||||
mut receiver: S,
|
||||
mut handler: impl FnMut(T) -> SendPinBoxFuture<bool> + Send + 'static,
|
||||
) -> bool {
|
||||
let Some(st) = self.opt_stopper.as_ref().map(|s| s.token()) else {
|
||||
@ -110,7 +111,7 @@ impl DeferredStreamProcessor {
|
||||
return false;
|
||||
};
|
||||
let drp = Box::pin(async move {
|
||||
while let Ok(Ok(res)) = receiver.recv_async().timeout_at(st.clone()).await {
|
||||
while let Ok(Some(res)) = receiver.next().timeout_at(st.clone()).await {
|
||||
if !handler(res).await {
|
||||
break;
|
||||
}
|
||||
|
21
veilid-tools/src/future_queue.rs
Normal file
21
veilid-tools/src/future_queue.rs
Normal file
@ -0,0 +1,21 @@
|
||||
use super::*;
|
||||
use futures_util::StreamExt as _;
|
||||
use stop_token::future::FutureExt as _;
|
||||
|
||||
pub async fn process_batched_future_queue<I, C, F, R>(
|
||||
future_queue: I,
|
||||
batch_size: usize,
|
||||
stop_token: StopToken,
|
||||
result_callback: C,
|
||||
) where
|
||||
I: IntoIterator,
|
||||
C: Fn(R) -> F,
|
||||
F: Future<Output = ()>,
|
||||
<I as std::iter::IntoIterator>::Item: core::future::Future<Output = R>,
|
||||
{
|
||||
let mut buffered_futures =
|
||||
futures_util::stream::iter(future_queue).buffer_unordered(batch_size);
|
||||
while let Ok(Some(res)) = buffered_futures.next().timeout_at(stop_token.clone()).await {
|
||||
result_callback(res).await;
|
||||
}
|
||||
}
|
@ -34,6 +34,7 @@ pub mod eventual;
|
||||
pub mod eventual_base;
|
||||
pub mod eventual_value;
|
||||
pub mod eventual_value_clone;
|
||||
pub mod future_queue;
|
||||
pub mod interval;
|
||||
pub mod ip_addr_port;
|
||||
pub mod ip_extra;
|
||||
@ -201,6 +202,8 @@ pub use eventual_value::*;
|
||||
#[doc(inline)]
|
||||
pub use eventual_value_clone::*;
|
||||
#[doc(inline)]
|
||||
pub use future_queue::*;
|
||||
#[doc(inline)]
|
||||
pub use interval::*;
|
||||
#[doc(inline)]
|
||||
pub use ip_addr_port::*;
|
||||
|
@ -24,26 +24,7 @@ pub trait IoNetworkResultExt<T> {
|
||||
fn into_network_result(self) -> io::Result<NetworkResult<T>>;
|
||||
}
|
||||
|
||||
impl<T> IoNetworkResultExt<T> for io::Result<T> {
|
||||
fn into_network_result(self) -> io::Result<NetworkResult<T>> {
|
||||
match self {
|
||||
Ok(v) => Ok(NetworkResult::Value(v)),
|
||||
// #[cfg(feature = "io_error_more")]
|
||||
// Err(e) => match e.kind() {
|
||||
// io::ErrorKind::TimedOut => Ok(NetworkResult::Timeout),
|
||||
// io::ErrorKind::UnexpectedEof
|
||||
// | io::ErrorKind::NotConnected
|
||||
// | io::ErrorKind::BrokenPipe
|
||||
// | io::ErrorKind::ConnectionAborted
|
||||
// | io::ErrorKind::ConnectionRefused
|
||||
// | io::ErrorKind::ConnectionReset
|
||||
// | io::ErrorKind::HostUnreachable
|
||||
// | io::ErrorKind::NetworkUnreachable => Ok(NetworkResult::NoConnection(e)),
|
||||
// io::ErrorKind::AddrNotAvailable => Ok(NetworkResult::AlreadyExists(e)),
|
||||
// _ => Err(e),
|
||||
// },
|
||||
// #[cfg(not(feature = "io_error_more"))]
|
||||
Err(e) => {
|
||||
fn io_error_kind_from_error<T>(e: io::Error) -> io::Result<NetworkResult<T>> {
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
if let Some(os_err) = e.raw_os_error() {
|
||||
if os_err == libc::EHOSTUNREACH || os_err == libc::ENETUNREACH {
|
||||
@ -64,7 +45,28 @@ impl<T> IoNetworkResultExt<T> for io::Result<T> {
|
||||
io::ErrorKind::AddrNotAvailable => Ok(NetworkResult::AlreadyExists(e)),
|
||||
_ => Err(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> IoNetworkResultExt<T> for io::Result<T> {
|
||||
fn into_network_result(self) -> io::Result<NetworkResult<T>> {
|
||||
match self {
|
||||
Ok(v) => Ok(NetworkResult::Value(v)),
|
||||
// #[cfg(feature = "io_error_more")]
|
||||
// Err(e) => match e.kind() {
|
||||
// io::ErrorKind::TimedOut => Ok(NetworkResult::Timeout),
|
||||
// io::ErrorKind::UnexpectedEof
|
||||
// | io::ErrorKind::NotConnected
|
||||
// | io::ErrorKind::BrokenPipe
|
||||
// | io::ErrorKind::ConnectionAborted
|
||||
// | io::ErrorKind::ConnectionRefused
|
||||
// | io::ErrorKind::ConnectionReset
|
||||
// | io::ErrorKind::HostUnreachable
|
||||
// | io::ErrorKind::NetworkUnreachable => Ok(NetworkResult::NoConnection(e)),
|
||||
// io::ErrorKind::AddrNotAvailable => Ok(NetworkResult::AlreadyExists(e)),
|
||||
// _ => Err(e),
|
||||
// },
|
||||
// #[cfg(not(feature = "io_error_more"))]
|
||||
Err(e) => io_error_kind_from_error(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -108,22 +110,7 @@ impl<T> FoldedNetworkResultExt<T> for io::Result<TimeoutOr<T>> {
|
||||
// _ => Err(e),
|
||||
// },
|
||||
// #[cfg(not(feature = "io_error_more"))]
|
||||
Err(e) => {
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
if let Some(os_err) = e.raw_os_error() {
|
||||
if os_err == libc::EHOSTUNREACH || os_err == libc::ENETUNREACH {
|
||||
return Ok(NetworkResult::NoConnection(e));
|
||||
}
|
||||
}
|
||||
match e.kind() {
|
||||
io::ErrorKind::TimedOut => Ok(NetworkResult::Timeout),
|
||||
io::ErrorKind::ConnectionAborted
|
||||
| io::ErrorKind::ConnectionRefused
|
||||
| io::ErrorKind::ConnectionReset => Ok(NetworkResult::NoConnection(e)),
|
||||
io::ErrorKind::AddrNotAvailable => Ok(NetworkResult::AlreadyExists(e)),
|
||||
_ => Err(e),
|
||||
}
|
||||
}
|
||||
Err(e) => io_error_kind_from_error(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -144,22 +131,7 @@ impl<T> FoldedNetworkResultExt<T> for io::Result<NetworkResult<T>> {
|
||||
// _ => Err(e),
|
||||
// },
|
||||
// #[cfg(not(feature = "io_error_more"))]
|
||||
Err(e) => {
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
if let Some(os_err) = e.raw_os_error() {
|
||||
if os_err == libc::EHOSTUNREACH || os_err == libc::ENETUNREACH {
|
||||
return Ok(NetworkResult::NoConnection(e));
|
||||
}
|
||||
}
|
||||
match e.kind() {
|
||||
io::ErrorKind::TimedOut => Ok(NetworkResult::Timeout),
|
||||
io::ErrorKind::ConnectionAborted
|
||||
| io::ErrorKind::ConnectionRefused
|
||||
| io::ErrorKind::ConnectionReset => Ok(NetworkResult::NoConnection(e)),
|
||||
io::ErrorKind::AddrNotAvailable => Ok(NetworkResult::AlreadyExists(e)),
|
||||
_ => Err(e),
|
||||
}
|
||||
}
|
||||
Err(e) => io_error_kind_from_error(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -12,7 +12,7 @@ cfg_if! {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn debug_ts(ts: u64) -> String {
|
||||
pub fn display_ts(ts: u64) -> String {
|
||||
if is_browser() {
|
||||
let now = Date::new_0();
|
||||
now.set_time(Date::now());
|
||||
@ -66,7 +66,7 @@ cfg_if! {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn debug_ts(ts: u64) -> String {
|
||||
pub fn display_ts(ts: u64) -> String {
|
||||
let now = chrono::DateTime::<chrono::Utc>::from(SystemTime::now());
|
||||
let date = chrono::DateTime::<chrono::Utc>::from(UNIX_EPOCH + Duration::from_micros(ts));
|
||||
|
||||
@ -110,7 +110,7 @@ const MIN: u64 = 1_000_000u64 * 60;
|
||||
const SEC: u64 = 1_000_000u64;
|
||||
const MSEC: u64 = 1_000u64;
|
||||
|
||||
pub fn debug_duration(dur: u64) -> String {
|
||||
pub fn display_duration(dur: u64) -> String {
|
||||
let days = dur / DAY;
|
||||
let dur = dur % DAY;
|
||||
let hours = dur / HOUR;
|
||||
|
@ -25,8 +25,8 @@ use tracing_subscriber::prelude::*;
|
||||
use tracing_subscriber::*;
|
||||
use tracing_wasm::{WASMLayerConfigBuilder, *};
|
||||
use tsify::*;
|
||||
use veilid_core::tools::*;
|
||||
use veilid_core::*;
|
||||
use veilid_core::{tools::*, VeilidAPIError};
|
||||
use wasm_bindgen::prelude::*;
|
||||
use wasm_bindgen_futures::*;
|
||||
|
||||
@ -249,7 +249,9 @@ pub fn initialize_veilid_core(platform_config: String) {
|
||||
#[wasm_bindgen()]
|
||||
pub fn change_log_level(layer: String, log_level: String) {
|
||||
let layer = if layer == "all" { "".to_owned() } else { layer };
|
||||
let log_level: veilid_core::VeilidConfigLogLevel = deserialize_json(&log_level).unwrap();
|
||||
let Ok(log_level) = deserialize_json::<veilid_core::VeilidConfigLogLevel>(&log_level) else {
|
||||
return;
|
||||
};
|
||||
let filters = (*FILTERS).borrow();
|
||||
if layer.is_empty() {
|
||||
// Change all layers
|
||||
@ -258,9 +260,10 @@ pub fn change_log_level(layer: String, log_level: String) {
|
||||
}
|
||||
} else {
|
||||
// Change a specific layer
|
||||
let f = filters.get(layer.as_str()).unwrap();
|
||||
if let Some(f) = filters.get(layer.as_str()) {
|
||||
f.set_max_level(log_level);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[wasm_bindgen()]
|
||||
@ -278,12 +281,13 @@ pub fn change_log_ignore(layer: String, log_ignore: String) {
|
||||
}
|
||||
} else {
|
||||
// Change a specific layer
|
||||
let f = filters.get(layer.as_str()).unwrap();
|
||||
if let Some(f) = filters.get(layer.as_str()) {
|
||||
f.set_ignore_list(Some(VeilidLayerFilter::apply_ignore_change(
|
||||
&f.ignore_list(),
|
||||
log_ignore.clone(),
|
||||
)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[wasm_bindgen()]
|
||||
@ -446,10 +450,11 @@ pub fn routing_context_safety(id: u32) -> Promise {
|
||||
|
||||
#[wasm_bindgen()]
|
||||
pub fn routing_context_app_call(id: u32, target_string: String, request: String) -> Promise {
|
||||
wrap_api_future_plain(async move {
|
||||
let request: Vec<u8> = data_encoding::BASE64URL_NOPAD
|
||||
.decode(request.as_bytes())
|
||||
.unwrap();
|
||||
wrap_api_future_plain(async move {
|
||||
.map_err(VeilidAPIError::generic)?;
|
||||
|
||||
let routing_context = get_routing_context(id, "routing_context_app_call")?;
|
||||
|
||||
let veilid_api = get_veilid_api()?;
|
||||
@ -462,10 +467,10 @@ pub fn routing_context_app_call(id: u32, target_string: String, request: String)
|
||||
|
||||
#[wasm_bindgen()]
|
||||
pub fn routing_context_app_message(id: u32, target_string: String, message: String) -> Promise {
|
||||
wrap_api_future_void(async move {
|
||||
let message: Vec<u8> = data_encoding::BASE64URL_NOPAD
|
||||
.decode(message.as_bytes())
|
||||
.unwrap();
|
||||
wrap_api_future_void(async move {
|
||||
.map_err(VeilidAPIError::generic)?;
|
||||
let routing_context = get_routing_context(id, "routing_context_app_message")?;
|
||||
|
||||
let veilid_api = get_veilid_api()?;
|
||||
@ -477,14 +482,15 @@ pub fn routing_context_app_message(id: u32, target_string: String, message: Stri
|
||||
|
||||
#[wasm_bindgen()]
|
||||
pub fn routing_context_create_dht_record(id: u32, schema: String, kind: u32) -> Promise {
|
||||
wrap_api_future_json(async move {
|
||||
let crypto_kind = if kind == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(veilid_core::FourCC::from(kind))
|
||||
};
|
||||
let schema: veilid_core::DHTSchema = veilid_core::deserialize_json(&schema).unwrap();
|
||||
let schema: veilid_core::DHTSchema =
|
||||
veilid_core::deserialize_json(&schema).map_err(VeilidAPIError::generic)?;
|
||||
|
||||
wrap_api_future_json(async move {
|
||||
let routing_context = get_routing_context(id, "routing_context_create_dht_record")?;
|
||||
|
||||
let dht_record_descriptor = routing_context
|
||||
@ -496,10 +502,14 @@ pub fn routing_context_create_dht_record(id: u32, schema: String, kind: u32) ->
|
||||
|
||||
#[wasm_bindgen()]
|
||||
pub fn routing_context_open_dht_record(id: u32, key: String, writer: Option<String>) -> Promise {
|
||||
let key: veilid_core::TypedKey = veilid_core::deserialize_json(&key).unwrap();
|
||||
let writer: Option<veilid_core::KeyPair> =
|
||||
writer.map(|s| veilid_core::deserialize_json(&s).unwrap());
|
||||
wrap_api_future_json(async move {
|
||||
let key: veilid_core::TypedKey =
|
||||
veilid_core::deserialize_json(&key).map_err(VeilidAPIError::generic)?;
|
||||
let writer: Option<veilid_core::KeyPair> = match writer {
|
||||
Some(s) => Some(veilid_core::deserialize_json(&s).map_err(VeilidAPIError::generic)?),
|
||||
None => None,
|
||||
};
|
||||
|
||||
let routing_context = get_routing_context(id, "routing_context_open_dht_record")?;
|
||||
|
||||
let dht_record_descriptor = routing_context.open_dht_record(key, writer).await?;
|
||||
@ -509,8 +519,10 @@ pub fn routing_context_open_dht_record(id: u32, key: String, writer: Option<Stri
|
||||
|
||||
#[wasm_bindgen()]
|
||||
pub fn routing_context_close_dht_record(id: u32, key: String) -> Promise {
|
||||
let key: veilid_core::TypedKey = veilid_core::deserialize_json(&key).unwrap();
|
||||
wrap_api_future_void(async move {
|
||||
let key: veilid_core::TypedKey =
|
||||
veilid_core::deserialize_json(&key).map_err(VeilidAPIError::generic)?;
|
||||
|
||||
let routing_context = get_routing_context(id, "routing_context_close_dht_record")?;
|
||||
|
||||
routing_context.close_dht_record(key).await?;
|
||||
@ -520,8 +532,10 @@ pub fn routing_context_close_dht_record(id: u32, key: String) -> Promise {
|
||||
|
||||
#[wasm_bindgen()]
|
||||
pub fn routing_context_delete_dht_record(id: u32, key: String) -> Promise {
|
||||
let key: veilid_core::TypedKey = veilid_core::deserialize_json(&key).unwrap();
|
||||
wrap_api_future_void(async move {
|
||||
let key: veilid_core::TypedKey =
|
||||
veilid_core::deserialize_json(&key).map_err(VeilidAPIError::generic)?;
|
||||
|
||||
let routing_context = get_routing_context(id, "routing_context_delete_dht_record")?;
|
||||
|
||||
routing_context.delete_dht_record(key).await?;
|
||||
@ -536,8 +550,10 @@ pub fn routing_context_get_dht_value(
|
||||
subkey: u32,
|
||||
force_refresh: bool,
|
||||
) -> Promise {
|
||||
let key: veilid_core::TypedKey = veilid_core::deserialize_json(&key).unwrap();
|
||||
wrap_api_future_json(async move {
|
||||
let key: veilid_core::TypedKey =
|
||||
veilid_core::deserialize_json(&key).map_err(VeilidAPIError::generic)?;
|
||||
|
||||
let routing_context = get_routing_context(id, "routing_context_get_dht_value")?;
|
||||
|
||||
let res = routing_context
|
||||
@ -555,14 +571,17 @@ pub fn routing_context_set_dht_value(
|
||||
data: String,
|
||||
writer: Option<String>,
|
||||
) -> Promise {
|
||||
let key: veilid_core::TypedKey = veilid_core::deserialize_json(&key).unwrap();
|
||||
wrap_api_future_json(async move {
|
||||
let key: veilid_core::TypedKey =
|
||||
veilid_core::deserialize_json(&key).map_err(VeilidAPIError::generic)?;
|
||||
let data: Vec<u8> = data_encoding::BASE64URL_NOPAD
|
||||
.decode(data.as_bytes())
|
||||
.unwrap();
|
||||
let writer: Option<veilid_core::KeyPair> =
|
||||
writer.map(|s| veilid_core::deserialize_json(&s).unwrap());
|
||||
.map_err(VeilidAPIError::generic)?;
|
||||
let writer: Option<veilid_core::KeyPair> = match writer {
|
||||
Some(s) => veilid_core::deserialize_json(&s).map_err(VeilidAPIError::generic)?,
|
||||
None => None,
|
||||
};
|
||||
|
||||
wrap_api_future_json(async move {
|
||||
let routing_context = get_routing_context(id, "routing_context_set_dht_value")?;
|
||||
|
||||
let res = routing_context
|
||||
@ -580,12 +599,15 @@ pub fn routing_context_watch_dht_values(
|
||||
expiration: String,
|
||||
count: u32,
|
||||
) -> Promise {
|
||||
let key: veilid_core::TypedKey = veilid_core::deserialize_json(&key).unwrap();
|
||||
let subkeys: veilid_core::ValueSubkeyRangeSet =
|
||||
veilid_core::deserialize_json(&subkeys).unwrap();
|
||||
let expiration = veilid_core::Timestamp::from_str(&expiration).unwrap();
|
||||
|
||||
wrap_api_future_plain(async move {
|
||||
let key: veilid_core::TypedKey =
|
||||
veilid_core::deserialize_json(&key).map_err(VeilidAPIError::generic)?;
|
||||
let subkeys: veilid_core::ValueSubkeyRangeSet =
|
||||
veilid_core::deserialize_json(&subkeys).map_err(VeilidAPIError::generic)?;
|
||||
let expiration = veilid_core::Timestamp::new(
|
||||
u64::from_str(&expiration).map_err(VeilidAPIError::generic)?,
|
||||
);
|
||||
|
||||
let routing_context = get_routing_context(id, "routing_context_watch_dht_values")?;
|
||||
|
||||
let res = routing_context
|
||||
@ -597,11 +619,12 @@ pub fn routing_context_watch_dht_values(
|
||||
|
||||
#[wasm_bindgen()]
|
||||
pub fn routing_context_cancel_dht_watch(id: u32, key: String, subkeys: String) -> Promise {
|
||||
let key: veilid_core::TypedKey = veilid_core::deserialize_json(&key).unwrap();
|
||||
let subkeys: veilid_core::ValueSubkeyRangeSet =
|
||||
veilid_core::deserialize_json(&subkeys).unwrap();
|
||||
|
||||
wrap_api_future_plain(async move {
|
||||
let key: veilid_core::TypedKey =
|
||||
veilid_core::deserialize_json(&key).map_err(VeilidAPIError::generic)?;
|
||||
let subkeys: veilid_core::ValueSubkeyRangeSet =
|
||||
veilid_core::deserialize_json(&subkeys).map_err(VeilidAPIError::generic)?;
|
||||
|
||||
let routing_context = get_routing_context(id, "routing_context_cancel_dht_watch")?;
|
||||
|
||||
let res = routing_context.cancel_dht_watch(key, subkeys).await?;
|
||||
@ -616,12 +639,14 @@ pub fn routing_context_inspect_dht_record(
|
||||
subkeys: String,
|
||||
scope: String,
|
||||
) -> Promise {
|
||||
let key: veilid_core::TypedKey = veilid_core::deserialize_json(&key).unwrap();
|
||||
let subkeys: veilid_core::ValueSubkeyRangeSet =
|
||||
veilid_core::deserialize_json(&subkeys).unwrap();
|
||||
let scope: veilid_core::DHTReportScope = veilid_core::deserialize_json(&scope).unwrap();
|
||||
|
||||
wrap_api_future_json(async move {
|
||||
let key: veilid_core::TypedKey =
|
||||
veilid_core::deserialize_json(&key).map_err(VeilidAPIError::generic)?;
|
||||
let subkeys: veilid_core::ValueSubkeyRangeSet =
|
||||
veilid_core::deserialize_json(&subkeys).map_err(VeilidAPIError::generic)?;
|
||||
let scope: veilid_core::DHTReportScope =
|
||||
veilid_core::deserialize_json(&scope).map_err(VeilidAPIError::generic)?;
|
||||
|
||||
let routing_context = get_routing_context(id, "routing_context_inspect_dht_record")?;
|
||||
|
||||
let res = routing_context
|
||||
@ -647,10 +672,12 @@ pub fn new_private_route() -> Promise {
|
||||
|
||||
#[wasm_bindgen()]
|
||||
pub fn new_custom_private_route(stability: String, sequencing: String) -> Promise {
|
||||
let stability: veilid_core::Stability = veilid_core::deserialize_json(&stability).unwrap();
|
||||
let sequencing: veilid_core::Sequencing = veilid_core::deserialize_json(&sequencing).unwrap();
|
||||
|
||||
wrap_api_future_json(async move {
|
||||
let stability: veilid_core::Stability =
|
||||
veilid_core::deserialize_json(&stability).map_err(VeilidAPIError::generic)?;
|
||||
let sequencing: veilid_core::Sequencing =
|
||||
veilid_core::deserialize_json(&sequencing).map_err(VeilidAPIError::generic)?;
|
||||
|
||||
let veilid_api = get_veilid_api()?;
|
||||
|
||||
let (route_id, blob) = veilid_api
|
||||
@ -665,10 +692,10 @@ pub fn new_custom_private_route(stability: String, sequencing: String) -> Promis
|
||||
|
||||
#[wasm_bindgen()]
|
||||
pub fn import_remote_private_route(blob: String) -> Promise {
|
||||
wrap_api_future_plain(async move {
|
||||
let blob: Vec<u8> = data_encoding::BASE64URL_NOPAD
|
||||
.decode(blob.as_bytes())
|
||||
.unwrap();
|
||||
wrap_api_future_plain(async move {
|
||||
.map_err(VeilidAPIError::generic)?;
|
||||
let veilid_api = get_veilid_api()?;
|
||||
|
||||
let key = veilid_api.import_remote_private_route(blob)?;
|
||||
@ -679,8 +706,9 @@ pub fn import_remote_private_route(blob: String) -> Promise {
|
||||
|
||||
#[wasm_bindgen()]
|
||||
pub fn release_private_route(route_id: String) -> Promise {
|
||||
let route_id: veilid_core::RouteId = veilid_core::RouteId::try_decode(&route_id).unwrap();
|
||||
wrap_api_future_void(async move {
|
||||
let route_id: veilid_core::RouteId =
|
||||
veilid_core::RouteId::try_decode(&route_id).map_err(VeilidAPIError::generic)?;
|
||||
let veilid_api = get_veilid_api()?;
|
||||
veilid_api.release_private_route(route_id)?;
|
||||
APIRESULT_UNDEFINED
|
||||
@ -689,10 +717,10 @@ pub fn release_private_route(route_id: String) -> Promise {
|
||||
|
||||
#[wasm_bindgen()]
|
||||
pub fn app_call_reply(call_id: String, message: String) -> Promise {
|
||||
wrap_api_future_void(async move {
|
||||
let message: Vec<u8> = data_encoding::BASE64URL_NOPAD
|
||||
.decode(message.as_bytes())
|
||||
.unwrap();
|
||||
wrap_api_future_void(async move {
|
||||
.map_err(VeilidAPIError::generic)?;
|
||||
let call_id = match call_id.parse() {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
@ -853,13 +881,14 @@ pub fn table_db_transaction_rollback(id: u32) -> Promise {
|
||||
|
||||
#[wasm_bindgen()]
|
||||
pub fn table_db_transaction_store(id: u32, col: u32, key: String, value: String) -> Promise {
|
||||
wrap_api_future_void(async move {
|
||||
let key: Vec<u8> = data_encoding::BASE64URL_NOPAD
|
||||
.decode(key.as_bytes())
|
||||
.unwrap();
|
||||
.map_err(VeilidAPIError::generic)?;
|
||||
let value: Vec<u8> = data_encoding::BASE64URL_NOPAD
|
||||
.decode(value.as_bytes())
|
||||
.unwrap();
|
||||
wrap_api_future_void(async move {
|
||||
.map_err(VeilidAPIError::generic)?;
|
||||
|
||||
let tdbt = get_table_db_transaction(id, "table_db_transaction_store")?;
|
||||
|
||||
tdbt.store(col, &key, &value)?;
|
||||
@ -869,10 +898,11 @@ pub fn table_db_transaction_store(id: u32, col: u32, key: String, value: String)
|
||||
|
||||
#[wasm_bindgen()]
|
||||
pub fn table_db_transaction_delete(id: u32, col: u32, key: String) -> Promise {
|
||||
wrap_api_future_void(async move {
|
||||
let key: Vec<u8> = data_encoding::BASE64URL_NOPAD
|
||||
.decode(key.as_bytes())
|
||||
.unwrap();
|
||||
wrap_api_future_void(async move {
|
||||
.map_err(VeilidAPIError::generic)?;
|
||||
|
||||
let tdbt = get_table_db_transaction(id, "table_db_transaction_delete")?;
|
||||
|
||||
tdbt.delete(col, &key)?;
|
||||
@ -882,13 +912,13 @@ pub fn table_db_transaction_delete(id: u32, col: u32, key: String) -> Promise {
|
||||
|
||||
#[wasm_bindgen()]
|
||||
pub fn table_db_store(id: u32, col: u32, key: String, value: String) -> Promise {
|
||||
wrap_api_future_void(async move {
|
||||
let key: Vec<u8> = data_encoding::BASE64URL_NOPAD
|
||||
.decode(key.as_bytes())
|
||||
.unwrap();
|
||||
.map_err(VeilidAPIError::generic)?;
|
||||
let value: Vec<u8> = data_encoding::BASE64URL_NOPAD
|
||||
.decode(value.as_bytes())
|
||||
.unwrap();
|
||||
wrap_api_future_void(async move {
|
||||
.map_err(VeilidAPIError::generic)?;
|
||||
let table_db = get_table_db(id, "table_db_store")?;
|
||||
|
||||
table_db.store(col, &key, &value).await?;
|
||||
@ -898,10 +928,10 @@ pub fn table_db_store(id: u32, col: u32, key: String, value: String) -> Promise
|
||||
|
||||
#[wasm_bindgen()]
|
||||
pub fn table_db_load(id: u32, col: u32, key: String) -> Promise {
|
||||
wrap_api_future_plain(async move {
|
||||
let key: Vec<u8> = data_encoding::BASE64URL_NOPAD
|
||||
.decode(key.as_bytes())
|
||||
.unwrap();
|
||||
wrap_api_future_plain(async move {
|
||||
.map_err(VeilidAPIError::generic)?;
|
||||
let table_db = get_table_db(id, "table_db_load")?;
|
||||
|
||||
let out = table_db.load(col, &key).await?;
|
||||
@ -912,10 +942,10 @@ pub fn table_db_load(id: u32, col: u32, key: String) -> Promise {
|
||||
|
||||
#[wasm_bindgen()]
|
||||
pub fn table_db_delete(id: u32, col: u32, key: String) -> Promise {
|
||||
wrap_api_future_plain(async move {
|
||||
let key: Vec<u8> = data_encoding::BASE64URL_NOPAD
|
||||
.decode(key.as_bytes())
|
||||
.unwrap();
|
||||
wrap_api_future_plain(async move {
|
||||
.map_err(VeilidAPIError::generic)?;
|
||||
let table_db = get_table_db(id, "table_db_delete")?;
|
||||
|
||||
let out = table_db.delete(col, &key).await?;
|
||||
@ -941,16 +971,16 @@ pub fn best_crypto_kind() -> u32 {
|
||||
|
||||
#[wasm_bindgen()]
|
||||
pub fn verify_signatures(node_ids: String, data: String, signatures: String) -> Promise {
|
||||
let node_ids: Vec<veilid_core::TypedKey> = veilid_core::deserialize_json(&node_ids).unwrap();
|
||||
wrap_api_future_json(async move {
|
||||
let node_ids: Vec<veilid_core::TypedKey> =
|
||||
veilid_core::deserialize_json(&node_ids).map_err(VeilidAPIError::generic)?;
|
||||
|
||||
let data: Vec<u8> = data_encoding::BASE64URL_NOPAD
|
||||
.decode(data.as_bytes())
|
||||
.unwrap();
|
||||
.map_err(VeilidAPIError::generic)?;
|
||||
|
||||
let typed_signatures: Vec<veilid_core::TypedSignature> =
|
||||
veilid_core::deserialize_json(&signatures).unwrap();
|
||||
|
||||
wrap_api_future_json(async move {
|
||||
veilid_core::deserialize_json(&signatures).map_err(VeilidAPIError::generic)?;
|
||||
let veilid_api = get_veilid_api()?;
|
||||
let crypto = veilid_api.crypto()?;
|
||||
let out = crypto.verify_signatures(&node_ids, &data, &typed_signatures)?;
|
||||
@ -960,14 +990,13 @@ pub fn verify_signatures(node_ids: String, data: String, signatures: String) ->
|
||||
|
||||
#[wasm_bindgen()]
|
||||
pub fn generate_signatures(data: String, key_pairs: String) -> Promise {
|
||||
wrap_api_future_json(async move {
|
||||
let data: Vec<u8> = data_encoding::BASE64URL_NOPAD
|
||||
.decode(data.as_bytes())
|
||||
.unwrap();
|
||||
.map_err(VeilidAPIError::generic)?;
|
||||
|
||||
let key_pairs: Vec<veilid_core::TypedKeyPair> =
|
||||
veilid_core::deserialize_json(&key_pairs).unwrap();
|
||||
|
||||
wrap_api_future_json(async move {
|
||||
veilid_core::deserialize_json(&key_pairs).map_err(VeilidAPIError::generic)?;
|
||||
let veilid_api = get_veilid_api()?;
|
||||
let crypto = veilid_api.crypto()?;
|
||||
let out = crypto.generate_signatures(&data, &key_pairs, |k, s| {
|
||||
@ -989,12 +1018,14 @@ pub fn generate_key_pair(kind: u32) -> Promise {
|
||||
|
||||
#[wasm_bindgen()]
|
||||
pub fn crypto_cached_dh(kind: u32, key: String, secret: String) -> Promise {
|
||||
wrap_api_future_json(async move {
|
||||
let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind);
|
||||
|
||||
let key: veilid_core::PublicKey = veilid_core::deserialize_json(&key).unwrap();
|
||||
let secret: veilid_core::SecretKey = veilid_core::deserialize_json(&secret).unwrap();
|
||||
let key: veilid_core::PublicKey =
|
||||
veilid_core::deserialize_json(&key).map_err(VeilidAPIError::generic)?;
|
||||
let secret: veilid_core::SecretKey =
|
||||
veilid_core::deserialize_json(&secret).map_err(VeilidAPIError::generic)?;
|
||||
|
||||
wrap_api_future_json(async move {
|
||||
let veilid_api = get_veilid_api()?;
|
||||
let crypto = veilid_api.crypto()?;
|
||||
let csv = crypto.get(kind).ok_or_else(|| {
|
||||
@ -1011,12 +1042,14 @@ pub fn crypto_cached_dh(kind: u32, key: String, secret: String) -> Promise {
|
||||
|
||||
#[wasm_bindgen()]
|
||||
pub fn crypto_compute_dh(kind: u32, key: String, secret: String) -> Promise {
|
||||
wrap_api_future_json(async move {
|
||||
let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind);
|
||||
|
||||
let key: veilid_core::PublicKey = veilid_core::deserialize_json(&key).unwrap();
|
||||
let secret: veilid_core::SecretKey = veilid_core::deserialize_json(&secret).unwrap();
|
||||
let key: veilid_core::PublicKey =
|
||||
veilid_core::deserialize_json(&key).map_err(VeilidAPIError::generic)?;
|
||||
let secret: veilid_core::SecretKey =
|
||||
veilid_core::deserialize_json(&secret).map_err(VeilidAPIError::generic)?;
|
||||
|
||||
wrap_api_future_json(async move {
|
||||
let veilid_api = get_veilid_api()?;
|
||||
let crypto = veilid_api.crypto()?;
|
||||
let csv = crypto.get(kind).ok_or_else(|| {
|
||||
@ -1038,15 +1071,17 @@ pub fn crypto_generate_shared_secret(
|
||||
secret: String,
|
||||
domain: String,
|
||||
) -> Promise {
|
||||
wrap_api_future_json(async move {
|
||||
let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind);
|
||||
|
||||
let key: veilid_core::PublicKey = veilid_core::deserialize_json(&key).unwrap();
|
||||
let secret: veilid_core::SecretKey = veilid_core::deserialize_json(&secret).unwrap();
|
||||
let key: veilid_core::PublicKey =
|
||||
veilid_core::deserialize_json(&key).map_err(VeilidAPIError::generic)?;
|
||||
let secret: veilid_core::SecretKey =
|
||||
veilid_core::deserialize_json(&secret).map_err(VeilidAPIError::generic)?;
|
||||
let domain: Vec<u8> = data_encoding::BASE64URL_NOPAD
|
||||
.decode(domain.as_bytes())
|
||||
.unwrap();
|
||||
.map_err(VeilidAPIError::generic)?;
|
||||
|
||||
wrap_api_future_json(async move {
|
||||
let veilid_api = get_veilid_api()?;
|
||||
let crypto = veilid_api.crypto()?;
|
||||
let csv = crypto.get(kind).ok_or_else(|| {
|
||||
@ -1102,15 +1137,15 @@ pub fn crypto_default_salt_length(kind: u32) -> Promise {
|
||||
|
||||
#[wasm_bindgen()]
|
||||
pub fn crypto_hash_password(kind: u32, password: String, salt: String) -> Promise {
|
||||
wrap_api_future_plain(async move {
|
||||
let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind);
|
||||
let password: Vec<u8> = data_encoding::BASE64URL_NOPAD
|
||||
.decode(password.as_bytes())
|
||||
.unwrap();
|
||||
.map_err(VeilidAPIError::generic)?;
|
||||
let salt: Vec<u8> = data_encoding::BASE64URL_NOPAD
|
||||
.decode(salt.as_bytes())
|
||||
.unwrap();
|
||||
.map_err(VeilidAPIError::generic)?;
|
||||
|
||||
wrap_api_future_plain(async move {
|
||||
let veilid_api = get_veilid_api()?;
|
||||
let crypto = veilid_api.crypto()?;
|
||||
let csv = crypto.get(kind).ok_or_else(|| {
|
||||
@ -1127,12 +1162,11 @@ pub fn crypto_hash_password(kind: u32, password: String, salt: String) -> Promis
|
||||
|
||||
#[wasm_bindgen()]
|
||||
pub fn crypto_verify_password(kind: u32, password: String, password_hash: String) -> Promise {
|
||||
wrap_api_future_plain(async move {
|
||||
let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind);
|
||||
let password: Vec<u8> = data_encoding::BASE64URL_NOPAD
|
||||
.decode(password.as_bytes())
|
||||
.unwrap();
|
||||
|
||||
wrap_api_future_plain(async move {
|
||||
.map_err(VeilidAPIError::generic)?;
|
||||
let veilid_api = get_veilid_api()?;
|
||||
let crypto = veilid_api.crypto()?;
|
||||
let csv = crypto.get(kind).ok_or_else(|| {
|
||||
@ -1149,15 +1183,15 @@ pub fn crypto_verify_password(kind: u32, password: String, password_hash: String
|
||||
|
||||
#[wasm_bindgen()]
|
||||
pub fn crypto_derive_shared_secret(kind: u32, password: String, salt: String) -> Promise {
|
||||
wrap_api_future_json(async move {
|
||||
let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind);
|
||||
let password: Vec<u8> = data_encoding::BASE64URL_NOPAD
|
||||
.decode(password.as_bytes())
|
||||
.unwrap();
|
||||
.map_err(VeilidAPIError::generic)?;
|
||||
let salt: Vec<u8> = data_encoding::BASE64URL_NOPAD
|
||||
.decode(salt.as_bytes())
|
||||
.unwrap();
|
||||
.map_err(VeilidAPIError::generic)?;
|
||||
|
||||
wrap_api_future_json(async move {
|
||||
let veilid_api = get_veilid_api()?;
|
||||
let crypto = veilid_api.crypto()?;
|
||||
let csv = crypto.get(kind).ok_or_else(|| {
|
||||
@ -1174,9 +1208,9 @@ pub fn crypto_derive_shared_secret(kind: u32, password: String, salt: String) ->
|
||||
|
||||
#[wasm_bindgen()]
|
||||
pub fn crypto_random_nonce(kind: u32) -> Promise {
|
||||
wrap_api_future_json(async move {
|
||||
let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind);
|
||||
|
||||
wrap_api_future_json(async move {
|
||||
let veilid_api = get_veilid_api()?;
|
||||
let crypto = veilid_api.crypto()?;
|
||||
let csv = crypto.get(kind).ok_or_else(|| {
|
||||
@ -1231,13 +1265,13 @@ pub fn crypto_generate_key_pair(kind: u32) -> Promise {
|
||||
|
||||
#[wasm_bindgen()]
|
||||
pub fn crypto_generate_hash(kind: u32, data: String) -> Promise {
|
||||
wrap_api_future_json(async move {
|
||||
let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind);
|
||||
|
||||
let data: Vec<u8> = data_encoding::BASE64URL_NOPAD
|
||||
.decode(data.as_bytes())
|
||||
.unwrap();
|
||||
.map_err(VeilidAPIError::generic)?;
|
||||
|
||||
wrap_api_future_json(async move {
|
||||
let veilid_api = get_veilid_api()?;
|
||||
let crypto = veilid_api.crypto()?;
|
||||
let csv = crypto.get(kind).ok_or_else(|| {
|
||||
@ -1254,12 +1288,14 @@ pub fn crypto_generate_hash(kind: u32, data: String) -> Promise {
|
||||
|
||||
#[wasm_bindgen()]
|
||||
pub fn crypto_validate_key_pair(kind: u32, key: String, secret: String) -> Promise {
|
||||
wrap_api_future_plain(async move {
|
||||
let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind);
|
||||
|
||||
let key: veilid_core::PublicKey = veilid_core::deserialize_json(&key).unwrap();
|
||||
let secret: veilid_core::SecretKey = veilid_core::deserialize_json(&secret).unwrap();
|
||||
let key: veilid_core::PublicKey =
|
||||
veilid_core::deserialize_json(&key).map_err(VeilidAPIError::generic)?;
|
||||
let secret: veilid_core::SecretKey =
|
||||
veilid_core::deserialize_json(&secret).map_err(VeilidAPIError::generic)?;
|
||||
|
||||
wrap_api_future_plain(async move {
|
||||
let veilid_api = get_veilid_api()?;
|
||||
let crypto = veilid_api.crypto()?;
|
||||
let csv = crypto.get(kind).ok_or_else(|| {
|
||||
@ -1276,15 +1312,16 @@ pub fn crypto_validate_key_pair(kind: u32, key: String, secret: String) -> Promi
|
||||
|
||||
#[wasm_bindgen()]
|
||||
pub fn crypto_validate_hash(kind: u32, data: String, hash: String) -> Promise {
|
||||
wrap_api_future_plain(async move {
|
||||
let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind);
|
||||
|
||||
let data: Vec<u8> = data_encoding::BASE64URL_NOPAD
|
||||
.decode(data.as_bytes())
|
||||
.unwrap();
|
||||
.map_err(VeilidAPIError::generic)?;
|
||||
|
||||
let hash: veilid_core::HashDigest = veilid_core::deserialize_json(&hash).unwrap();
|
||||
let hash: veilid_core::HashDigest =
|
||||
veilid_core::deserialize_json(&hash).map_err(VeilidAPIError::generic)?;
|
||||
|
||||
wrap_api_future_plain(async move {
|
||||
let veilid_api = get_veilid_api()?;
|
||||
let crypto = veilid_api.crypto()?;
|
||||
let csv = crypto.get(kind).ok_or_else(|| {
|
||||
@ -1301,12 +1338,14 @@ pub fn crypto_validate_hash(kind: u32, data: String, hash: String) -> Promise {
|
||||
|
||||
#[wasm_bindgen()]
|
||||
pub fn crypto_distance(kind: u32, key1: String, key2: String) -> Promise {
|
||||
wrap_api_future_json(async move {
|
||||
let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind);
|
||||
|
||||
let key1: veilid_core::CryptoKey = veilid_core::deserialize_json(&key1).unwrap();
|
||||
let key2: veilid_core::CryptoKey = veilid_core::deserialize_json(&key2).unwrap();
|
||||
let key1: veilid_core::CryptoKey =
|
||||
veilid_core::deserialize_json(&key1).map_err(VeilidAPIError::generic)?;
|
||||
let key2: veilid_core::CryptoKey =
|
||||
veilid_core::deserialize_json(&key2).map_err(VeilidAPIError::generic)?;
|
||||
|
||||
wrap_api_future_json(async move {
|
||||
let veilid_api = get_veilid_api()?;
|
||||
let crypto = veilid_api.crypto()?;
|
||||
let csv = crypto.get(kind).ok_or_else(|| {
|
||||
@ -1323,16 +1362,18 @@ pub fn crypto_distance(kind: u32, key1: String, key2: String) -> Promise {
|
||||
|
||||
#[wasm_bindgen()]
|
||||
pub fn crypto_sign(kind: u32, key: String, secret: String, data: String) -> Promise {
|
||||
wrap_api_future_json(async move {
|
||||
let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind);
|
||||
|
||||
let key: veilid_core::CryptoKey = veilid_core::deserialize_json(&key).unwrap();
|
||||
let secret: veilid_core::CryptoKey = veilid_core::deserialize_json(&secret).unwrap();
|
||||
let key: veilid_core::CryptoKey =
|
||||
veilid_core::deserialize_json(&key).map_err(VeilidAPIError::generic)?;
|
||||
let secret: veilid_core::CryptoKey =
|
||||
veilid_core::deserialize_json(&secret).map_err(VeilidAPIError::generic)?;
|
||||
|
||||
let data: Vec<u8> = data_encoding::BASE64URL_NOPAD
|
||||
.decode(data.as_bytes())
|
||||
.unwrap();
|
||||
.map_err(VeilidAPIError::generic)?;
|
||||
|
||||
wrap_api_future_json(async move {
|
||||
let veilid_api = get_veilid_api()?;
|
||||
let crypto = veilid_api.crypto()?;
|
||||
let csv = crypto.get(kind).ok_or_else(|| {
|
||||
@ -1345,15 +1386,17 @@ pub fn crypto_sign(kind: u32, key: String, secret: String, data: String) -> Prom
|
||||
|
||||
#[wasm_bindgen()]
|
||||
pub fn crypto_verify(kind: u32, key: String, data: String, signature: String) -> Promise {
|
||||
wrap_api_future_plain(async move {
|
||||
let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind);
|
||||
|
||||
let key: veilid_core::CryptoKey = veilid_core::deserialize_json(&key).unwrap();
|
||||
let key: veilid_core::CryptoKey =
|
||||
veilid_core::deserialize_json(&key).map_err(VeilidAPIError::generic)?;
|
||||
let data: Vec<u8> = data_encoding::BASE64URL_NOPAD
|
||||
.decode(data.as_bytes())
|
||||
.unwrap();
|
||||
let signature: veilid_core::Signature = veilid_core::deserialize_json(&signature).unwrap();
|
||||
.map_err(VeilidAPIError::generic)?;
|
||||
let signature: veilid_core::Signature =
|
||||
veilid_core::deserialize_json(&signature).map_err(VeilidAPIError::generic)?;
|
||||
|
||||
wrap_api_future_plain(async move {
|
||||
let veilid_api = get_veilid_api()?;
|
||||
let crypto = veilid_api.crypto()?;
|
||||
let csv = crypto.get(kind).ok_or_else(|| {
|
||||
@ -1366,9 +1409,9 @@ pub fn crypto_verify(kind: u32, key: String, data: String, signature: String) ->
|
||||
|
||||
#[wasm_bindgen()]
|
||||
pub fn crypto_aead_overhead(kind: u32) -> Promise {
|
||||
wrap_api_future_plain(async move {
|
||||
let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind);
|
||||
|
||||
wrap_api_future_plain(async move {
|
||||
let veilid_api = get_veilid_api()?;
|
||||
let crypto = veilid_api.crypto()?;
|
||||
let csv = crypto.get(kind).ok_or_else(|| {
|
||||
@ -1391,24 +1434,28 @@ pub fn crypto_decrypt_aead(
|
||||
shared_secret: String,
|
||||
associated_data: Option<String>,
|
||||
) -> Promise {
|
||||
wrap_api_future_plain(async move {
|
||||
let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind);
|
||||
|
||||
let body: Vec<u8> = data_encoding::BASE64URL_NOPAD
|
||||
.decode(body.as_bytes())
|
||||
.unwrap();
|
||||
.map_err(VeilidAPIError::generic)?;
|
||||
|
||||
let nonce: veilid_core::Nonce = veilid_core::deserialize_json(&nonce).unwrap();
|
||||
let nonce: veilid_core::Nonce =
|
||||
veilid_core::deserialize_json(&nonce).map_err(VeilidAPIError::generic)?;
|
||||
|
||||
let shared_secret: veilid_core::SharedSecret =
|
||||
veilid_core::deserialize_json(&shared_secret).unwrap();
|
||||
veilid_core::deserialize_json(&shared_secret).map_err(VeilidAPIError::generic)?;
|
||||
|
||||
let associated_data: Option<Vec<u8>> = associated_data.map(|ad| {
|
||||
let associated_data: Option<Vec<u8>> = match associated_data {
|
||||
Some(ad) => Some(
|
||||
data_encoding::BASE64URL_NOPAD
|
||||
.decode(ad.as_bytes())
|
||||
.unwrap()
|
||||
});
|
||||
.map_err(VeilidAPIError::generic)?,
|
||||
),
|
||||
None => None,
|
||||
};
|
||||
|
||||
wrap_api_future_plain(async move {
|
||||
let veilid_api = get_veilid_api()?;
|
||||
let crypto = veilid_api.crypto()?;
|
||||
let csv = crypto.get(kind).ok_or_else(|| {
|
||||
@ -1440,24 +1487,28 @@ pub fn crypto_encrypt_aead(
|
||||
shared_secret: String,
|
||||
associated_data: Option<String>,
|
||||
) -> Promise {
|
||||
wrap_api_future_plain(async move {
|
||||
let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind);
|
||||
|
||||
let body: Vec<u8> = data_encoding::BASE64URL_NOPAD
|
||||
.decode(body.as_bytes())
|
||||
.unwrap();
|
||||
.map_err(VeilidAPIError::generic)?;
|
||||
|
||||
let nonce: veilid_core::Nonce = veilid_core::deserialize_json(&nonce).unwrap();
|
||||
let nonce: veilid_core::Nonce =
|
||||
veilid_core::deserialize_json(&nonce).map_err(VeilidAPIError::generic)?;
|
||||
|
||||
let shared_secret: veilid_core::SharedSecret =
|
||||
veilid_core::deserialize_json(&shared_secret).unwrap();
|
||||
veilid_core::deserialize_json(&shared_secret).map_err(VeilidAPIError::generic)?;
|
||||
|
||||
let associated_data: Option<Vec<u8>> = associated_data.map(|ad| {
|
||||
let associated_data: Option<Vec<u8>> = match associated_data {
|
||||
Some(ad) => Some(
|
||||
data_encoding::BASE64URL_NOPAD
|
||||
.decode(ad.as_bytes())
|
||||
.unwrap()
|
||||
});
|
||||
.map_err(VeilidAPIError::generic)?,
|
||||
),
|
||||
None => None,
|
||||
};
|
||||
|
||||
wrap_api_future_plain(async move {
|
||||
let veilid_api = get_veilid_api()?;
|
||||
let crypto = veilid_api.crypto()?;
|
||||
let csv = crypto.get(kind).ok_or_else(|| {
|
||||
@ -1488,18 +1539,19 @@ pub fn crypto_crypt_no_auth(
|
||||
nonce: String,
|
||||
shared_secret: String,
|
||||
) -> Promise {
|
||||
wrap_api_future_plain(async move {
|
||||
let kind: veilid_core::CryptoKind = veilid_core::FourCC::from(kind);
|
||||
|
||||
let mut body: Vec<u8> = data_encoding::BASE64URL_NOPAD
|
||||
.decode(body.as_bytes())
|
||||
.unwrap();
|
||||
.map_err(VeilidAPIError::generic)?;
|
||||
|
||||
let nonce: veilid_core::Nonce = veilid_core::deserialize_json(&nonce).unwrap();
|
||||
let nonce: veilid_core::Nonce =
|
||||
veilid_core::deserialize_json(&nonce).map_err(VeilidAPIError::generic)?;
|
||||
|
||||
let shared_secret: veilid_core::SharedSecret =
|
||||
veilid_core::deserialize_json(&shared_secret).unwrap();
|
||||
veilid_core::deserialize_json(&shared_secret).map_err(VeilidAPIError::generic)?;
|
||||
|
||||
wrap_api_future_plain(async move {
|
||||
let veilid_api = get_veilid_api()?;
|
||||
let crypto = veilid_api.crypto()?;
|
||||
let csv = crypto.get(kind).ok_or_else(|| {
|
||||
|
@ -124,10 +124,11 @@ impl VeilidClient {
|
||||
}
|
||||
} else {
|
||||
// Change a specific layer
|
||||
let f = filters.get(layer.as_str()).unwrap();
|
||||
if let Some(f) = filters.get(layer.as_str()) {
|
||||
f.set_max_level(log_level);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: can we refine the TS type of `layer`?
|
||||
pub fn changeLogIgnore(layer: String, changes: Vec<String>) {
|
||||
@ -142,12 +143,13 @@ impl VeilidClient {
|
||||
}
|
||||
} else {
|
||||
// Change a specific layer
|
||||
let f = filters.get(layer.as_str()).unwrap();
|
||||
if let Some(f) = filters.get(layer.as_str()) {
|
||||
let mut ignore_list = f.ignore_list();
|
||||
VeilidLayerFilter::apply_ignore_change_list(&mut ignore_list, &changes);
|
||||
f.set_ignore_list(Some(ignore_list));
|
||||
}
|
||||
}
|
||||
}
|
||||
/// Shut down Veilid and terminate the API.
|
||||
pub async fn shutdownCore() -> APIResult<()> {
|
||||
let veilid_api = take_veilid_api()?;
|
||||
|
@ -331,7 +331,9 @@ impl VeilidRoutingContext {
|
||||
let key = TypedKey::from_str(&key)?;
|
||||
let subkeys = subkeys.unwrap_or_default();
|
||||
let expiration = if let Some(expiration) = expiration {
|
||||
veilid_core::Timestamp::from_str(&expiration).map_err(VeilidAPIError::generic)?
|
||||
veilid_core::Timestamp::new(
|
||||
u64::from_str(&expiration).map_err(VeilidAPIError::generic)?,
|
||||
)
|
||||
} else {
|
||||
veilid_core::Timestamp::default()
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user