Merge branch 'wasm-test-work' into 'main'

Fixes for WASM DHT and other connection oriented protocol issues

See merge request veilid/veilid!234
This commit is contained in:
Christien Rioux 2023-11-05 01:22:26 +00:00
commit 88389a1b78
109 changed files with 1550 additions and 1394 deletions

View File

@ -253,13 +253,15 @@ tracing-oslog = { version = "0.1.2", optional = true }
### DEV DEPENDENCIES
[dev-dependencies]
serial_test = "2.0.0"
[target.'cfg(not(target_arch = "wasm32"))'.dev-dependencies]
simplelog = { version = "0.12.1", features = ["test"] }
serial_test = "2.0.0"
tracing-subscriber = { version = "0.3.17", features = ["env-filter"] }
[target.'cfg(target_arch = "wasm32")'.dev-dependencies]
serial_test = { version = "2.0.0", default-features = false, features = [
"async",
] }
wasm-bindgen-test = "0.3.37"
console_error_panic_hook = "0.1.7"
wee_alloc = "0.4.5"

View File

@ -4,7 +4,7 @@ use network_manager::*;
use routing_table::*;
use storage_manager::*;
pub struct AttachmentManagerInner {
struct AttachmentManagerInner {
last_attachment_state: AttachmentState,
last_routing_table_health: Option<RoutingTableHealth>,
maintain_peers: bool,
@ -13,13 +13,13 @@ pub struct AttachmentManagerInner {
attachment_maintainer_jh: Option<MustJoinHandle<()>>,
}
pub struct AttachmentManagerUnlockedInner {
struct AttachmentManagerUnlockedInner {
config: VeilidConfig,
network_manager: NetworkManager,
}
#[derive(Clone)]
pub struct AttachmentManager {
pub(crate) struct AttachmentManager {
inner: Arc<Mutex<AttachmentManagerInner>>,
unlocked_inner: Arc<AttachmentManagerUnlockedInner>,
}
@ -28,7 +28,6 @@ impl AttachmentManager {
fn new_unlocked_inner(
config: VeilidConfig,
storage_manager: StorageManager,
protected_store: ProtectedStore,
table_store: TableStore,
#[cfg(feature = "unstable-blockstore")] block_store: BlockStore,
crypto: Crypto,
@ -38,7 +37,6 @@ impl AttachmentManager {
network_manager: NetworkManager::new(
config,
storage_manager,
protected_store,
table_store,
#[cfg(feature = "unstable-blockstore")]
block_store,
@ -59,7 +57,6 @@ impl AttachmentManager {
pub fn new(
config: VeilidConfig,
storage_manager: StorageManager,
protected_store: ProtectedStore,
table_store: TableStore,
#[cfg(feature = "unstable-blockstore")] block_store: BlockStore,
crypto: Crypto,
@ -69,7 +66,6 @@ impl AttachmentManager {
unlocked_inner: Arc::new(Self::new_unlocked_inner(
config,
storage_manager,
protected_store,
table_store,
#[cfg(feature = "unstable-blockstore")]
block_store,

View File

@ -140,7 +140,6 @@ impl ServicesContext {
let attachment_manager = AttachmentManager::new(
self.config.clone(),
storage_manager,
protected_store,
table_store,
#[cfg(feature = "unstable-blockstore")]
block_store,
@ -199,7 +198,7 @@ impl ServicesContext {
/////////////////////////////////////////////////////////////////////////////
///
pub struct VeilidCoreContext {
pub(crate) struct VeilidCoreContext {
pub config: VeilidConfig,
pub update_callback: UpdateCallback,
// Services

View File

@ -49,7 +49,6 @@ mod core_context;
mod crypto;
mod intf;
mod network_manager;
mod receipt_manager;
mod routing_table;
mod rpc_processor;
mod storage_manager;

View File

@ -69,7 +69,7 @@ impl fmt::Debug for AddressFilterUnlockedInner {
}
#[derive(Clone, Debug)]
pub struct AddressFilter {
pub(crate) struct AddressFilter {
unlocked_inner: Arc<AddressFilterUnlockedInner>,
inner: Arc<Mutex<AddressFilterInner>>,
}

View File

@ -2,8 +2,8 @@ use super::*;
#[derive(Clone, Debug)]
pub struct ConnectionHandle {
id: NetworkConnectionId,
descriptor: ConnectionDescriptor,
connection_id: NetworkConnectionId,
flow: Flow,
channel: flume::Sender<(Option<Id>, Vec<u8>)>,
}
@ -15,32 +15,42 @@ pub enum ConnectionHandleSendResult {
impl ConnectionHandle {
pub(super) fn new(
id: NetworkConnectionId,
descriptor: ConnectionDescriptor,
connection_id: NetworkConnectionId,
flow: Flow,
channel: flume::Sender<(Option<Id>, Vec<u8>)>,
) -> Self {
Self {
id,
descriptor,
connection_id,
flow,
channel,
}
}
#[allow(dead_code)]
pub fn connection_id(&self) -> NetworkConnectionId {
self.id
self.connection_id
}
pub fn connection_descriptor(&self) -> ConnectionDescriptor {
self.descriptor
#[allow(dead_code)]
pub fn flow(&self) -> Flow {
self.flow
}
#[cfg_attr(feature="verbose-tracing", instrument(level="trace", skip(self, message), fields(message.len = message.len())))]
pub fn send(&self, message: Vec<u8>) -> ConnectionHandleSendResult {
match self.channel.send((Span::current().id(), message)) {
Ok(()) => ConnectionHandleSendResult::Sent,
Err(e) => ConnectionHandleSendResult::NotSent(e.0 .1),
pub fn unique_flow(&self) -> UniqueFlow {
UniqueFlow {
flow: self.flow,
connection_id: Some(self.connection_id),
}
}
// #[cfg_attr(feature="verbose-tracing", instrument(level="trace", skip(self, message), fields(message.len = message.len())))]
// pub fn send(&self, message: Vec<u8>) -> ConnectionHandleSendResult {
// match self.channel.send((Span::current().id(), message)) {
// Ok(()) => ConnectionHandleSendResult::Sent,
// Err(e) => ConnectionHandleSendResult::NotSent(e.0 .1),
// }
// }
#[cfg_attr(feature="verbose-tracing", instrument(level="trace", skip(self, message), fields(message.len = message.len())))]
pub async fn send_async(&self, message: Vec<u8>) -> ConnectionHandleSendResult {
match self
@ -56,7 +66,7 @@ impl ConnectionHandle {
impl PartialEq for ConnectionHandle {
fn eq(&self, other: &Self) -> bool {
self.descriptor == other.descriptor
self.connection_id == other.connection_id && self.flow == other.flow
}
}

View File

@ -1,4 +1,5 @@
use super::*;
pub(crate) use connection_table::ConnectionRefKind;
use connection_table::*;
use network_connection::*;
use stop_token::future::FutureExt;
@ -12,6 +13,31 @@ enum ConnectionManagerEvent {
Dead(NetworkConnection),
}
#[derive(Debug)]
pub(crate) struct ConnectionRefScope {
connection_manager: ConnectionManager,
id: NetworkConnectionId,
}
impl ConnectionRefScope {
pub fn try_new(connection_manager: ConnectionManager, id: NetworkConnectionId) -> Option<Self> {
if !connection_manager.connection_ref(id, ConnectionRefKind::AddRef) {
return None;
}
Some(Self {
connection_manager,
id,
})
}
}
impl Drop for ConnectionRefScope {
fn drop(&mut self) {
self.connection_manager
.connection_ref(self.id, ConnectionRefKind::RemoveRef);
}
}
#[derive(Debug)]
struct ConnectionManagerInner {
next_id: NetworkConnectionId,
@ -37,7 +63,7 @@ impl core::fmt::Debug for ConnectionManagerArc {
}
#[derive(Debug, Clone)]
pub struct ConnectionManager {
pub(crate) struct ConnectionManager {
arc: Arc<ConnectionManagerArc>,
}
@ -84,10 +110,6 @@ impl ConnectionManager {
self.arc.network_manager.clone()
}
pub fn connection_initial_timeout_ms(&self) -> u32 {
self.arc.connection_initial_timeout_ms
}
pub fn connection_inactivity_timeout_ms(&self) -> u32 {
self.arc.connection_inactivity_timeout_ms
}
@ -140,29 +162,29 @@ impl ConnectionManager {
// Internal routine to see if we should keep this connection
// from being LRU removed. Used on our initiated relay connections.
fn should_protect_connection(&self, conn: &NetworkConnection) -> bool {
fn should_protect_connection(&self, conn: &NetworkConnection) -> Option<NodeRef> {
let netman = self.network_manager();
let routing_table = netman.routing_table();
let remote_address = conn.connection_descriptor().remote_address().address();
let remote_address = conn.flow().remote_address().address();
let Some(routing_domain) = routing_table.routing_domain_for_address(remote_address) else {
return false;
return None;
};
let Some(rn) = routing_table.relay_node(routing_domain) else {
return false;
return None;
};
let relay_nr = rn.filtered_clone(
NodeRefFilter::new()
.with_routing_domain(routing_domain)
.with_address_type(conn.connection_descriptor().address_type())
.with_protocol_type(conn.connection_descriptor().protocol_type()),
.with_address_type(conn.flow().address_type())
.with_protocol_type(conn.flow().protocol_type()),
);
let dids = relay_nr.all_filtered_dial_info_details();
for did in dids {
if did.dial_info.address() == remote_address {
return true;
return Some(relay_nr);
}
}
false
None
}
// Internal routine to register new connection atomically.
@ -193,10 +215,9 @@ impl ConnectionManager {
let handle = conn.get_handle();
// See if this should be a protected connection
let protect = self.should_protect_connection(&conn);
if protect {
log_net!(debug "== PROTECTING connection: {} -> {}", id, conn.debug_print(get_aligned_timestamp()));
conn.protect();
if let Some(protect_nr) = self.should_protect_connection(&conn) {
log_net!(debug "== PROTECTING connection: {} -> {} for node {}", id, conn.debug_print(get_aligned_timestamp()), protect_nr);
conn.protect(protect_nr);
}
// Add to the connection table
@ -207,12 +228,12 @@ impl ConnectionManager {
Ok(Some(conn)) => {
// Connection added and a different one LRU'd out
// Send it to be terminated
// log_net!(debug "== LRU kill connection due to limit: {:?}", conn);
log_net!(debug "== LRU kill connection due to limit: {:?}", conn.debug_print(get_aligned_timestamp()));
let _ = inner.sender.send(ConnectionManagerEvent::Dead(conn));
}
Err(ConnectionTableAddError::AddressFilter(conn, e)) => {
// Connection filtered
let desc = conn.connection_descriptor();
let desc = conn.flow();
let _ = inner.sender.send(ConnectionManagerEvent::Dead(conn));
return Ok(NetworkResult::no_connection_other(format!(
"connection filtered: {:?} ({})",
@ -221,23 +242,44 @@ impl ConnectionManager {
}
Err(ConnectionTableAddError::AlreadyExists(conn)) => {
// Connection already exists
let desc = conn.connection_descriptor();
let desc = conn.flow();
log_net!(debug "== Connection already exists: {:?}", conn.debug_print(get_aligned_timestamp()));
let _ = inner.sender.send(ConnectionManagerEvent::Dead(conn));
return Ok(NetworkResult::no_connection_other(format!(
"connection already exists: {:?}",
desc
)));
}
Err(ConnectionTableAddError::TableFull(conn)) => {
// Connection table is full
let desc = conn.flow();
log_net!(debug "== Connection table full: {:?}", conn.debug_print(get_aligned_timestamp()));
let _ = inner.sender.send(ConnectionManagerEvent::Dead(conn));
return Ok(NetworkResult::no_connection_other(format!(
"connection table is full: {:?}",
desc
)));
}
};
Ok(NetworkResult::Value(handle))
}
// Returns a network connection if one already is established
//#[instrument(level = "trace", skip(self), ret)]
pub fn get_connection(&self, descriptor: ConnectionDescriptor) -> Option<ConnectionHandle> {
self.arc
.connection_table
.get_connection_by_descriptor(descriptor)
pub fn get_connection(&self, flow: Flow) -> Option<ConnectionHandle> {
self.arc.connection_table.peek_connection_by_flow(flow)
}
// Returns a network connection if one already is established
pub(super) fn touch_connection_by_id(&self, id: NetworkConnectionId) {
self.arc.connection_table.touch_connection_by_id(id)
}
// Protects a network connection if one already is established
fn connection_ref(&self, id: NetworkConnectionId, kind: ConnectionRefKind) -> bool {
self.arc.connection_table.ref_connection_by_id(id, kind)
}
pub fn try_connection_ref_scope(&self, id: NetworkConnectionId) -> Option<ConnectionRefScope> {
ConnectionRefScope::try_new(self.clone(), id)
}
/// Called when we want to create a new connection or get the current one that already exists
@ -341,14 +383,22 @@ impl ConnectionManager {
// Process async commands
while let Ok(Ok(event)) = receiver.recv_async().timeout_at(stop_token.clone()).await {
match event {
ConnectionManagerEvent::Accepted(conn) => {
ConnectionManagerEvent::Accepted(prot_conn) => {
// Async lock on the remote address for atomicity per remote
let _lock_guard = self
.arc
.address_lock_table
.lock_tag(prot_conn.flow().remote_address().socket_addr())
.await;
let mut inner = self.arc.inner.lock();
match &mut *inner {
Some(inner) => {
// Register the connection
// We don't care if this fails, since nobody here asked for the inbound connection.
// If it does, we just drop the connection
let _ = self.on_new_protocol_network_connection(inner, conn);
let _ = self.on_new_protocol_network_connection(inner, prot_conn);
}
None => {
// If this somehow happens, we're shutting down
@ -356,6 +406,12 @@ impl ConnectionManager {
};
}
ConnectionManagerEvent::Dead(mut conn) => {
let _lock_guard = self
.arc
.address_lock_table
.lock_tag(conn.flow().remote_address().socket_addr())
.await;
conn.close();
conn.await;
}
@ -415,6 +471,11 @@ impl ConnectionManager {
// Inform the processor of the event
if let Some(conn) = conn {
// If the connection closed while it was protected, report it on the node the connection was established on
// In-use connections will already get reported because they will cause a 'question_lost' stat on the remote node
if let Some(protect_nr) = conn.protected_node_ref() {
protect_nr.report_protected_connection_dropped();
}
let _ = sender.send_async(ConnectionManagerEvent::Dead(conn)).await;
}
}

View File

@ -4,11 +4,13 @@ use hashlink::LruCache;
///////////////////////////////////////////////////////////////////////////////
#[derive(ThisError, Debug)]
pub enum ConnectionTableAddError {
pub(in crate::network_manager) enum ConnectionTableAddError {
#[error("Connection already added to table")]
AlreadyExists(NetworkConnection),
#[error("Connection address was filtered")]
AddressFilter(NetworkConnection, AddressFilterError),
#[error("Connection table is full")]
TableFull(NetworkConnection),
}
impl ConnectionTableAddError {
@ -18,22 +20,31 @@ impl ConnectionTableAddError {
pub fn address_filter(conn: NetworkConnection, err: AddressFilterError) -> Self {
ConnectionTableAddError::AddressFilter(conn, err)
}
pub fn table_full(conn: NetworkConnection) -> Self {
ConnectionTableAddError::TableFull(conn)
}
}
#[derive(Clone, Copy, Debug)]
pub(crate) enum ConnectionRefKind {
AddRef,
RemoveRef,
}
///////////////////////////////////////////////////////////////////////////////
#[derive(Debug)]
pub struct ConnectionTableInner {
struct ConnectionTableInner {
max_connections: Vec<usize>,
conn_by_id: Vec<LruCache<NetworkConnectionId, NetworkConnection>>,
protocol_index_by_id: BTreeMap<NetworkConnectionId, usize>,
id_by_descriptor: BTreeMap<ConnectionDescriptor, NetworkConnectionId>,
id_by_flow: BTreeMap<Flow, NetworkConnectionId>,
ids_by_remote: BTreeMap<PeerAddress, Vec<NetworkConnectionId>>,
address_filter: AddressFilter,
}
#[derive(Debug)]
pub struct ConnectionTable {
pub(in crate::network_manager) struct ConnectionTable {
inner: Arc<Mutex<ConnectionTableInner>>,
}
@ -56,7 +67,7 @@ impl ConnectionTable {
LruCache::new_unbounded(),
],
protocol_index_by_id: BTreeMap::new(),
id_by_descriptor: BTreeMap::new(),
id_by_flow: BTreeMap::new(),
ids_by_remote: BTreeMap::new(),
address_filter,
})),
@ -87,13 +98,14 @@ impl ConnectionTable {
let mut inner = self.inner.lock();
let unord = FuturesUnordered::new();
for table in &mut inner.conn_by_id {
for (_, v) in table.drain() {
for (_, mut v) in table.drain() {
trace!("connection table join: {:?}", v);
v.close();
unord.push(v);
}
}
inner.protocol_index_by_id.clear();
inner.id_by_descriptor.clear();
inner.id_by_flow.clear();
inner.ids_by_remote.clear();
unord
};
@ -139,14 +151,14 @@ impl ConnectionTable {
) -> Result<Option<NetworkConnection>, ConnectionTableAddError> {
// Get indices for network connection table
let id = network_connection.connection_id();
let descriptor = network_connection.connection_descriptor();
let protocol_index = Self::protocol_to_index(descriptor.protocol_type());
let remote = descriptor.remote();
let flow = network_connection.flow();
let protocol_index = Self::protocol_to_index(flow.protocol_type());
let remote = flow.remote();
let mut inner = self.inner.lock();
// Two connections to the same descriptor should be rejected (soft rejection)
if inner.id_by_descriptor.contains_key(&descriptor) {
// Two connections to the same flow should be rejected (soft rejection)
if inner.id_by_flow.contains_key(&flow) {
return Err(ConnectionTableAddError::already_exists(network_connection));
}
@ -157,14 +169,14 @@ impl ConnectionTable {
if inner.protocol_index_by_id.get(&id).is_some() {
panic!("duplicate id to protocol index: {:#?}", network_connection);
}
if let Some(ids) = inner.ids_by_remote.get(&descriptor.remote()) {
if let Some(ids) = inner.ids_by_remote.get(&flow.remote()) {
if ids.contains(&id) {
panic!("duplicate id by remote: {:#?}", network_connection);
}
}
// Filter by ip for connection limits
let ip_addr = descriptor.remote_address().ip_addr();
let ip_addr = flow.remote_address().ip_addr();
match inner.address_filter.add_connection(ip_addr) {
Ok(()) => {}
Err(e) => {
@ -176,59 +188,81 @@ impl ConnectionTable {
}
};
// if we have reached the maximum number of connections per protocol type
// then drop the least recently used connection that is not protected or referenced
let mut out_conn = None;
if inner.conn_by_id[protocol_index].len() >= inner.max_connections[protocol_index] {
// Find a free connection to terminate to make room
let dead_k = {
let Some(lruk) = inner.conn_by_id[protocol_index].iter().find_map(|(k, v)| {
if !v.is_in_use() && v.protected_node_ref().is_none() {
Some(*k)
} else {
None
}
}) else {
// Can't make room, connection table is full
return Err(ConnectionTableAddError::table_full(network_connection));
};
lruk
};
let dead_conn = Self::remove_connection_records(&mut inner, dead_k);
out_conn = Some(dead_conn);
}
// Add the connection to the table
let res = inner.conn_by_id[protocol_index].insert(id, network_connection);
assert!(res.is_none());
// if we have reached the maximum number of connections per protocol type
// then drop the least recently used connection
let mut out_conn = None;
if inner.conn_by_id[protocol_index].len() > inner.max_connections[protocol_index] {
while let Some((lruk, lru_conn)) = inner.conn_by_id[protocol_index].peek_lru() {
let lruk = *lruk;
// Don't LRU protected connections
if lru_conn.is_protected() {
// Mark as recently used
log_net!(debug "== No LRU Out for PROTECTED connection: {} -> {}", lruk, lru_conn.debug_print(get_aligned_timestamp()));
inner.conn_by_id[protocol_index].get(&lruk);
continue;
}
log_net!(debug "== LRU Connection Killed: {} -> {}", lruk, lru_conn.debug_print(get_aligned_timestamp()));
out_conn = Some(Self::remove_connection_records(&mut inner, lruk));
break;
}
}
// add connection records
inner.protocol_index_by_id.insert(id, protocol_index);
inner.id_by_descriptor.insert(descriptor, id);
inner.id_by_flow.insert(flow, id);
inner.ids_by_remote.entry(remote).or_default().push(id);
Ok(out_conn)
}
//#[instrument(level = "trace", skip(self), ret)]
#[allow(dead_code)]
pub fn get_connection_by_id(&self, id: NetworkConnectionId) -> Option<ConnectionHandle> {
let mut inner = self.inner.lock();
let protocol_index = *inner.protocol_index_by_id.get(&id)?;
let out = inner.conn_by_id[protocol_index].get(&id).unwrap();
pub fn peek_connection_by_flow(&self, flow: Flow) -> Option<ConnectionHandle> {
if flow.protocol_type() == ProtocolType::UDP {
return None;
}
let inner = self.inner.lock();
let id = *inner.id_by_flow.get(&flow)?;
let protocol_index = Self::protocol_to_index(flow.protocol_type());
let out = inner.conn_by_id[protocol_index].peek(&id).unwrap();
Some(out.get_handle())
}
//#[instrument(level = "trace", skip(self), ret)]
pub fn get_connection_by_descriptor(
&self,
descriptor: ConnectionDescriptor,
) -> Option<ConnectionHandle> {
pub fn touch_connection_by_id(&self, id: NetworkConnectionId) {
let mut inner = self.inner.lock();
let Some(protocol_index) = inner.protocol_index_by_id.get(&id).copied() else {
return;
};
let _ = inner.conn_by_id[protocol_index].get(&id).unwrap();
}
let id = *inner.id_by_descriptor.get(&descriptor)?;
let protocol_index = Self::protocol_to_index(descriptor.protocol_type());
let out = inner.conn_by_id[protocol_index].get(&id).unwrap();
Some(out.get_handle())
//#[instrument(level = "trace", skip(self), ret)]
pub fn ref_connection_by_id(
&self,
id: NetworkConnectionId,
ref_type: ConnectionRefKind,
) -> bool {
let mut inner = self.inner.lock();
let Some(protocol_index) = inner.protocol_index_by_id.get(&id).copied() else {
// Sometimes network connections die before we can ref/unref them
return false;
};
let out = inner.conn_by_id[protocol_index].get_mut(&id).unwrap();
match ref_type {
ConnectionRefKind::AddRef => out.add_ref(),
ConnectionRefKind::RemoveRef => out.remove_ref(),
}
true
}
// #[instrument(level = "trace", skip(self), ret)]
@ -255,7 +289,7 @@ impl ConnectionTable {
if let Some(best_port) = best_port {
for id in all_ids_by_remote {
let nc = inner.conn_by_id[protocol_index].peek(id).unwrap();
if let Some(local_addr) = nc.connection_descriptor().local() {
if let Some(local_addr) = nc.flow().local() {
if local_addr.port() == best_port {
let nc = inner.conn_by_id[protocol_index].get(id).unwrap();
return Some(nc.get_handle());
@ -282,13 +316,13 @@ impl ConnectionTable {
// pub fn drain_filter<F>(&self, mut filter: F) -> Vec<NetworkConnection>
// where
// F: FnMut(ConnectionDescriptor) -> bool,
// F: FnMut(Flow) -> bool,
// {
// let mut inner = self.inner.lock();
// let mut filtered_ids = Vec::new();
// for cbi in &mut inner.conn_by_id {
// for (id, conn) in cbi {
// if filter(conn.connection_descriptor()) {
// if filter(conn.flow()) {
// filtered_ids.push(*id);
// }
// }
@ -315,11 +349,11 @@ impl ConnectionTable {
let protocol_index = inner.protocol_index_by_id.remove(&id).unwrap();
// conn_by_id
let conn = inner.conn_by_id[protocol_index].remove(&id).unwrap();
// id_by_descriptor
let descriptor = conn.connection_descriptor();
inner.id_by_descriptor.remove(&descriptor).unwrap();
// id_by_flow
let flow = conn.flow();
inner.id_by_flow.remove(&flow).unwrap();
// ids_by_remote
let remote = descriptor.remote();
let remote = flow.remote();
let ids = inner.ids_by_remote.get_mut(&remote).unwrap();
for (n, elem) in ids.iter().enumerate() {
if *elem == id {

View File

@ -3,10 +3,7 @@ use super::*;
impl NetworkManager {
// Direct bootstrap request handler (separate fallback mechanism from cheaper TXT bootstrap mechanism)
#[instrument(level = "trace", skip(self), ret, err)]
pub(crate) async fn handle_boot_request(
&self,
descriptor: ConnectionDescriptor,
) -> EyreResult<NetworkResult<()>> {
pub(crate) async fn handle_boot_request(&self, flow: Flow) -> EyreResult<NetworkResult<()>> {
let routing_table = self.routing_table();
// Get a bunch of nodes with the various
@ -22,14 +19,14 @@ impl NetworkManager {
// Reply with a chunk of signed routing table
match self
.net()
.send_data_to_existing_connection(descriptor, json_bytes)
.send_data_to_existing_flow(flow, json_bytes)
.await?
{
None => {
SendDataToExistingFlowResult::Sent(_) => {
// Bootstrap reply was sent
Ok(NetworkResult::value(()))
}
Some(_) => Ok(NetworkResult::no_connection_other(
SendDataToExistingFlowResult::NotSent(_) => Ok(NetworkResult::no_connection_other(
"bootstrap reply could not be sent",
)),
}

View File

@ -11,6 +11,7 @@ mod connection_manager;
mod connection_table;
mod direct_boot;
mod network_connection;
mod receipt_manager;
mod send_data;
mod stats;
mod tasks;
@ -21,11 +22,11 @@ pub mod tests;
////////////////////////////////////////////////////////////////////////////////////////
pub use connection_manager::*;
pub use direct_boot::*;
pub use network_connection::*;
pub use send_data::*;
pub use stats::*;
pub(crate) use connection_manager::*;
pub(crate) use network_connection::*;
pub(crate) use receipt_manager::*;
pub(crate) use stats::*;
pub use types::*;
////////////////////////////////////////////////////////////////////////////////////////
@ -34,12 +35,10 @@ use connection_handle::*;
use crypto::*;
use futures_util::stream::FuturesUnordered;
use hashlink::LruCache;
use intf::*;
#[cfg(not(target_arch = "wasm32"))]
use native::*;
#[cfg(not(target_arch = "wasm32"))]
pub use native::{LOCAL_NETWORK_CAPABILITIES, MAX_CAPABILITIES, PUBLIC_INTERNET_CAPABILITIES};
use receipt_manager::*;
use routing_table::*;
use rpc_processor::*;
use storage_manager::*;
@ -90,11 +89,14 @@ struct ClientWhitelistEntry {
last_seen_ts: Timestamp,
}
#[derive(Copy, Clone, Debug)]
pub enum SendDataKind {
Direct(ConnectionDescriptor),
Indirect,
Existing(ConnectionDescriptor),
#[derive(Clone, Debug)]
pub(crate) struct SendDataMethod {
/// How the data was sent, possibly to a relay
pub contact_method: NodeContactMethod,
/// Pre-relayed contact method
pub opt_relayed_contact_method: Option<NodeContactMethod>,
/// The specific flow used to send the data
pub unique_flow: UniqueFlow,
}
/// Mechanism required to contact another node
@ -126,6 +128,11 @@ struct NodeContactMethodCacheKey {
#[derive(Copy, Clone, Debug, PartialEq, Eq, Ord, PartialOrd, Hash)]
struct PublicAddressCheckCacheKey(ProtocolType, AddressType);
enum SendDataToExistingFlowResult {
Sent(UniqueFlow),
NotSent(Vec<u8>),
}
// The mutable state of the network manager
struct NetworkManagerInner {
stats: NetworkManagerStats,
@ -141,7 +148,6 @@ struct NetworkManagerUnlockedInner {
// Handles
config: VeilidConfig,
storage_manager: StorageManager,
protected_store: ProtectedStore,
table_store: TableStore,
#[cfg(feature = "unstable-blockstore")]
block_store: BlockStore,
@ -160,7 +166,7 @@ struct NetworkManagerUnlockedInner {
}
#[derive(Clone)]
pub struct NetworkManager {
pub(crate) struct NetworkManager {
inner: Arc<Mutex<NetworkManagerInner>>,
unlocked_inner: Arc<NetworkManagerUnlockedInner>,
}
@ -178,7 +184,6 @@ impl NetworkManager {
fn new_unlocked_inner(
config: VeilidConfig,
storage_manager: StorageManager,
protected_store: ProtectedStore,
table_store: TableStore,
#[cfg(feature = "unstable-blockstore")] block_store: BlockStore,
crypto: Crypto,
@ -187,7 +192,6 @@ impl NetworkManager {
NetworkManagerUnlockedInner {
config: config.clone(),
storage_manager,
protected_store,
table_store,
#[cfg(feature = "unstable-blockstore")]
block_store,
@ -206,7 +210,6 @@ impl NetworkManager {
pub fn new(
config: VeilidConfig,
storage_manager: StorageManager,
protected_store: ProtectedStore,
table_store: TableStore,
#[cfg(feature = "unstable-blockstore")] block_store: BlockStore,
crypto: Crypto,
@ -243,7 +246,6 @@ impl NetworkManager {
unlocked_inner: Arc::new(Self::new_unlocked_inner(
config,
storage_manager,
protected_store,
table_store,
#[cfg(feature = "unstable-blockstore")]
block_store,
@ -268,9 +270,6 @@ impl NetworkManager {
pub fn storage_manager(&self) -> StorageManager {
self.unlocked_inner.storage_manager.clone()
}
pub fn protected_store(&self) -> ProtectedStore {
self.unlocked_inner.protected_store.clone()
}
pub fn table_store(&self) -> TableStore {
self.unlocked_inner.table_store.clone()
}
@ -297,7 +296,7 @@ impl NetworkManager {
.unwrap()
.clone()
}
pub fn net(&self) -> Network {
fn net(&self) -> Network {
self.unlocked_inner
.components
.read()
@ -306,6 +305,15 @@ impl NetworkManager {
.net
.clone()
}
fn receipt_manager(&self) -> ReceiptManager {
self.unlocked_inner
.components
.read()
.as_ref()
.unwrap()
.receipt_manager
.clone()
}
pub fn rpc_processor(&self) -> RPCProcessor {
self.unlocked_inner
.components
@ -315,15 +323,6 @@ impl NetworkManager {
.rpc_processor
.clone()
}
pub fn receipt_manager(&self) -> ReceiptManager {
self.unlocked_inner
.components
.read()
.as_ref()
.unwrap()
.receipt_manager
.clone()
}
pub fn connection_manager(&self) -> ConnectionManager {
self.unlocked_inner
.components
@ -667,7 +666,7 @@ impl NetworkManager {
#[instrument(level = "trace", skip(self), err)]
pub async fn handle_signal(
&self,
signal_connection_descriptor: ConnectionDescriptor,
signal_flow: Flow,
signal_info: SignalInfo,
) -> EyreResult<NetworkResult<()>> {
match signal_info {
@ -676,7 +675,7 @@ impl NetworkManager {
let rpc = self.rpc_processor();
// Add the peer info to our routing table
let peer_nr = match routing_table.register_node_with_peer_info(
let mut peer_nr = match routing_table.register_node_with_peer_info(
RoutingDomain::PublicInternet,
peer_info,
false,
@ -690,10 +689,10 @@ impl NetworkManager {
}
};
// Restrict reverse connection to same protocol as inbound signal
let peer_nr = peer_nr.filtered_clone(NodeRefFilter::from(
signal_connection_descriptor.protocol_type(),
));
// Restrict reverse connection to same sequencing requirement as inbound signal
if signal_flow.protocol_type().is_ordered() {
peer_nr.set_sequencing(Sequencing::EnsureOrdered);
}
// Make a reverse connection to the peer and send the receipt to it
rpc.rpc_call_return_receipt(Destination::direct(peer_nr), receipt)
@ -736,7 +735,7 @@ impl NetworkManager {
// Do our half of the hole punch by sending an empty packet
// Both sides will do this and then the receipt will get sent over the punched hole
let connection_descriptor = network_result_try!(
let unique_flow = network_result_try!(
self.net()
.send_data_to_dial_info(
hole_punch_dial_info_detail.dial_info.clone(),
@ -748,7 +747,7 @@ impl NetworkManager {
// XXX: do we need a delay here? or another hole punch packet?
// Set the hole punch as our 'last connection' to ensure we return the receipt over the direct hole punch
peer_nr.set_last_connection(connection_descriptor, get_aligned_timestamp());
peer_nr.set_last_flow(unique_flow.flow, get_aligned_timestamp());
// Return the receipt using the same dial info send the receipt to it
rpc.rpc_call_return_receipt(Destination::direct(peer_nr), receipt)
@ -814,7 +813,7 @@ impl NetworkManager {
node_ref: NodeRef,
destination_node_ref: Option<NodeRef>,
body: B,
) -> EyreResult<NetworkResult<SendDataKind>> {
) -> EyreResult<NetworkResult<SendDataMethod>> {
let destination_node_ref = destination_node_ref.as_ref().unwrap_or(&node_ref).clone();
let best_node_id = destination_node_ref.best_node_id();
@ -873,28 +872,20 @@ impl NetworkManager {
// network protocol handler. Processes the envelope, authenticates and decrypts the RPC message
// and passes it to the RPC handler
#[cfg_attr(feature="verbose-tracing", instrument(level = "trace", ret, err, skip(self, data), fields(data.len = data.len())))]
async fn on_recv_envelope(
&self,
data: &mut [u8],
connection_descriptor: ConnectionDescriptor,
) -> EyreResult<bool> {
async fn on_recv_envelope(&self, data: &mut [u8], flow: Flow) -> EyreResult<bool> {
#[cfg(feature = "verbose-tracing")]
let root = span!(
parent: None,
Level::TRACE,
"on_recv_envelope",
"data.len" = data.len(),
"descriptor" = ?connection_descriptor
"flow" = ?flow
);
#[cfg(feature = "verbose-tracing")]
let _root_enter = root.enter();
log_net!(
"envelope of {} bytes received from {:?}",
data.len(),
connection_descriptor
);
let remote_addr = connection_descriptor.remote_address().ip_addr();
log_net!("envelope of {} bytes received from {:?}", data.len(), flow);
let remote_addr = flow.remote_address().ip_addr();
// Network accounting
self.stats_packet_rcvd(remote_addr, ByteCount::new(data.len() as u64));
@ -916,18 +907,18 @@ impl NetworkManager {
// Get the routing domain for this data
let routing_domain = match self
.routing_table()
.routing_domain_for_address(connection_descriptor.remote_address().address())
.routing_domain_for_address(flow.remote_address().address())
{
Some(rd) => rd,
None => {
log_net!(debug "no routing domain for envelope received from {:?}", connection_descriptor);
log_net!(debug "no routing domain for envelope received from {:?}", flow);
return Ok(false);
}
};
// Is this a direct bootstrap request instead of an envelope?
if data[0..4] == *BOOT_MAGIC {
network_result_value_or_log!(self.handle_boot_request(connection_descriptor).await? => [ format!(": connection_descriptor={:?}", connection_descriptor) ] {});
network_result_value_or_log!(self.handle_boot_request(flow).await? => [ format!(": flow={:?}", flow) ] {});
return Ok(true);
}
@ -974,7 +965,7 @@ impl NetworkManager {
log_net!(debug
"Timestamp behind: {}ms ({})",
timestamp_to_secs(ts.saturating_sub(ets).as_u64()) * 1000f64,
connection_descriptor.remote()
flow.remote()
);
return Ok(false);
}
@ -984,7 +975,7 @@ impl NetworkManager {
log_net!(debug
"Timestamp ahead: {}ms ({})",
timestamp_to_secs(ets.saturating_sub(ts).as_u64()) * 1000f64,
connection_descriptor.remote()
flow.remote()
);
return Ok(false);
}
@ -1036,17 +1027,11 @@ impl NetworkManager {
}
};
if let Some(relay_nr) = some_relay_nr {
if let Some(mut relay_nr) = some_relay_nr {
// Ensure the protocol used to forward is of the same sequencing requirement
// Address type is allowed to change if connectivity is better
let relay_nr = if connection_descriptor.protocol_type().is_ordered() {
// XXX: this is a little redundant
let (_, nrf) = NodeRefFilter::new().with_sequencing(Sequencing::EnsureOrdered);
let mut relay_nr = relay_nr.filtered_clone(nrf);
if flow.protocol_type().is_ordered() {
relay_nr.set_sequencing(Sequencing::EnsureOrdered);
relay_nr
} else {
relay_nr
};
// Relay the packet to the desired destination
@ -1092,7 +1077,7 @@ impl NetworkManager {
// Cache the envelope information in the routing table
let source_noderef = match routing_table.register_node_with_existing_connection(
envelope.get_sender_typed_id(),
connection_descriptor,
flow,
ts,
) {
Ok(v) => v,
@ -1105,15 +1090,13 @@ impl NetworkManager {
source_noderef.add_envelope_version(envelope.get_version());
// Pass message to RPC system
rpc.enqueue_direct_message(
envelope,
source_noderef,
connection_descriptor,
routing_domain,
body,
)?;
rpc.enqueue_direct_message(envelope, source_noderef, flow, routing_domain, body)?;
// Inform caller that we dealt with the envelope locally
Ok(true)
}
pub fn debug_restart_network(&self) {
self.net().restart_network();
}
}

View File

@ -49,12 +49,12 @@ struct DiscoveryContextUnlockedInner {
}
#[derive(Clone)]
pub struct DiscoveryContext {
pub(super) struct DiscoveryContext {
unlocked_inner: Arc<DiscoveryContextUnlockedInner>,
inner: Arc<Mutex<DiscoveryContextInner>>,
}
pub type ClearNetworkCallback = Arc<dyn Fn() -> SendPinBoxFuture<()> + Send + Sync>;
pub(super) type ClearNetworkCallback = Arc<dyn Fn() -> SendPinBoxFuture<()> + Send + Sync>;
impl DiscoveryContext {
pub fn new(

View File

@ -14,7 +14,7 @@ use network_tcp::*;
use protocol::tcp::RawTcpProtocolHandler;
use protocol::udp::RawUdpProtocolHandler;
use protocol::ws::WebsocketProtocolHandler;
pub use protocol::*;
pub(in crate::network_manager) use protocol::*;
use async_tls::TlsAcceptor;
use futures_util::StreamExt;
@ -137,7 +137,7 @@ struct NetworkUnlockedInner {
}
#[derive(Clone)]
pub struct Network {
pub(in crate::network_manager) struct Network {
config: VeilidConfig,
inner: Arc<Mutex<NetworkInner>>,
unlocked_inner: Arc<NetworkUnlockedInner>,
@ -578,75 +578,79 @@ impl Network {
}
#[cfg_attr(feature="verbose-tracing", instrument(level="trace", err, skip(self, data), fields(data.len = data.len())))]
pub async fn send_data_to_existing_connection(
pub async fn send_data_to_existing_flow(
&self,
descriptor: ConnectionDescriptor,
flow: Flow,
data: Vec<u8>,
) -> EyreResult<Option<Vec<u8>>> {
) -> EyreResult<SendDataToExistingFlowResult> {
let data_len = data.len();
// Handle connectionless protocol
if descriptor.protocol_type() == ProtocolType::UDP {
if flow.protocol_type() == ProtocolType::UDP {
// send over the best udp socket we have bound since UDP is not connection oriented
let peer_socket_addr = descriptor.remote().socket_addr();
let peer_socket_addr = flow.remote().socket_addr();
if let Some(ph) = self.find_best_udp_protocol_handler(
&peer_socket_addr,
&descriptor.local().map(|sa| sa.socket_addr()),
&flow.local().map(|sa| sa.socket_addr()),
) {
network_result_value_or_log!(ph.clone()
.send_message(data.clone(), peer_socket_addr)
.await
.wrap_err("sending data to existing connection")? => [ format!(": data.len={}, descriptor={:?}", data.len(), descriptor) ]
{ return Ok(Some(data)); } );
.wrap_err("sending data to existing connection")? => [ format!(": data.len={}, flow={:?}", data.len(), flow) ]
{ return Ok(SendDataToExistingFlowResult::NotSent(data)); } );
// Network accounting
self.network_manager()
.stats_packet_sent(peer_socket_addr.ip(), ByteCount::new(data_len as u64));
// Data was consumed
return Ok(None);
let unique_flow = UniqueFlow {
flow,
connection_id: None,
};
return Ok(SendDataToExistingFlowResult::Sent(unique_flow));
}
}
// Handle connection-oriented protocols
// Try to send to the exact existing connection if one exists
if let Some(conn) = self.connection_manager().get_connection(descriptor) {
if let Some(conn) = self.connection_manager().get_connection(flow) {
// connection exists, send over it
match conn.send_async(data).await {
ConnectionHandleSendResult::Sent => {
// Network accounting
self.network_manager().stats_packet_sent(
descriptor.remote().socket_addr().ip(),
flow.remote().socket_addr().ip(),
ByteCount::new(data_len as u64),
);
// Data was consumed
return Ok(None);
return Ok(SendDataToExistingFlowResult::Sent(conn.unique_flow()));
}
ConnectionHandleSendResult::NotSent(data) => {
// Couldn't send
// Pass the data back out so we don't own it any more
return Ok(Some(data));
return Ok(SendDataToExistingFlowResult::NotSent(data));
}
}
}
// Connection didn't exist
// Pass the data back out so we don't own it any more
Ok(Some(data))
Ok(SendDataToExistingFlowResult::NotSent(data))
}
// Send data directly to a dial info, possibly without knowing which node it is going to
// Returns a descriptor for the connection used to send the data
// Returns a flow for the connection used to send the data
#[cfg_attr(feature="verbose-tracing", instrument(level="trace", err, skip(self, data), fields(data.len = data.len())))]
pub async fn send_data_to_dial_info(
&self,
dial_info: DialInfo,
data: Vec<u8>,
) -> EyreResult<NetworkResult<ConnectionDescriptor>> {
) -> EyreResult<NetworkResult<UniqueFlow>> {
self.record_dial_info_failure(dial_info.clone(), async move {
let data_len = data.len();
let connection_descriptor;
let unique_flow;
if dial_info.protocol_type() == ProtocolType::UDP {
// Handle connectionless protocol
let peer_socket_addr = dial_info.to_socket_addr();
@ -658,10 +662,14 @@ impl Network {
));
}
};
connection_descriptor = network_result_try!(ph
let flow = network_result_try!(ph
.send_message(data, peer_socket_addr)
.await
.wrap_err("failed to send data to dial info")?);
unique_flow = UniqueFlow {
flow,
connection_id: None,
};
} else {
// Handle connection-oriented protocols
let conn = network_result_try!(
@ -676,24 +684,20 @@ impl Network {
"failed to send",
)));
}
connection_descriptor = conn.connection_descriptor();
unique_flow = conn.unique_flow();
}
// Network accounting
self.network_manager()
.stats_packet_sent(dial_info.ip_addr(), ByteCount::new(data_len as u64));
Ok(NetworkResult::value(connection_descriptor))
Ok(NetworkResult::value(unique_flow))
})
.await
}
/////////////////////////////////////////////////////////////////
pub fn get_protocol_config(&self) -> ProtocolConfig {
self.inner.lock().protocol_config.clone()
}
#[instrument(level = "debug", err, skip_all)]
pub async fn startup(&self) -> EyreResult<()> {
// initialize interfaces

View File

@ -305,6 +305,8 @@ impl Network {
// All done
log_net!(debug "Network class discovery finished with address_types {:?}", all_address_types);
// Set the address types we've seen
editor.setup_network(
protocol_config.outbound,

View File

@ -6,7 +6,7 @@ use stop_token::future::FutureExt;
/////////////////////////////////////////////////////////////////
#[derive(Clone)]
pub struct ListenerState {
pub(in crate::network_manager) struct ListenerState {
pub protocol_accept_handlers: Vec<Box<dyn ProtocolAcceptHandler + 'static>>,
pub tls_protocol_handlers: Vec<Box<dyn ProtocolAcceptHandler + 'static>>,
pub tls_acceptor: Option<TlsAcceptor>,
@ -132,33 +132,33 @@ impl Network {
}
};
#[cfg(all(feature = "rt-async-std", unix))]
{
// async-std does not directly support linger on TcpStream yet
use std::os::fd::{AsRawFd, FromRawFd};
if let Err(e) = unsafe { socket2::Socket::from_raw_fd(tcp_stream.as_raw_fd()) }
.set_linger(Some(core::time::Duration::from_secs(0)))
{
log_net!(debug "Couldn't set TCP linger: {}", e);
return;
}
}
#[cfg(all(feature = "rt-async-std", windows))]
{
// async-std does not directly support linger on TcpStream yet
use std::os::windows::io::{AsRawSocket, FromRawSocket};
if let Err(e) = unsafe { socket2::Socket::from_raw_socket(tcp_stream.as_raw_socket()) }
.set_linger(Some(core::time::Duration::from_secs(0)))
{
log_net!(debug "Couldn't set TCP linger: {}", e);
return;
}
}
#[cfg(not(feature = "rt-async-std"))]
if let Err(e) = tcp_stream.set_linger(Some(core::time::Duration::from_secs(0))) {
log_net!(debug "Couldn't set TCP linger: {}", e);
return;
}
// #[cfg(all(feature = "rt-async-std", unix))]
// {
// // async-std does not directly support linger on TcpStream yet
// use std::os::fd::{AsRawFd, FromRawFd};
// if let Err(e) = unsafe { socket2::Socket::from_raw_fd(tcp_stream.as_raw_fd()) }
// .set_linger(Some(core::time::Duration::from_secs(0)))
// {
// log_net!(debug "Couldn't set TCP linger: {}", e);
// return;
// }
// }
// #[cfg(all(feature = "rt-async-std", windows))]
// {
// // async-std does not directly support linger on TcpStream yet
// use std::os::windows::io::{AsRawSocket, FromRawSocket};
// if let Err(e) = unsafe { socket2::Socket::from_raw_socket(tcp_stream.as_raw_socket()) }
// .set_linger(Some(core::time::Duration::from_secs(0)))
// {
// log_net!(debug "Couldn't set TCP linger: {}", e);
// return;
// }
// }
// #[cfg(not(feature = "rt-async-std"))]
// if let Err(e) = tcp_stream.set_linger(Some(core::time::Duration::from_secs(0))) {
// log_net!(debug "Couldn't set TCP linger: {}", e);
// return;
// }
if let Err(e) = tcp_stream.set_nodelay(true) {
log_net!(debug "Couldn't set TCP nodelay: {}", e);
return;

View File

@ -65,16 +65,16 @@ impl Network {
.timeout_at(stop_token.clone())
.await
{
Ok(Ok((size, descriptor))) => {
Ok(Ok((size, flow))) => {
// Network accounting
network_manager.stats_packet_rcvd(
descriptor.remote_address().ip_addr(),
flow.remote_address().ip_addr(),
ByteCount::new(size as u64),
);
// Pass it up for processing
if let Err(e) = network_manager
.on_recv_envelope(&mut data[..size], descriptor)
.on_recv_envelope(&mut data[..size], flow)
.await
{
log_net!(debug "failed to process received udp envelope: {}", e);

View File

@ -8,8 +8,8 @@ use super::*;
use std::io;
#[derive(Debug)]
pub enum ProtocolNetworkConnection {
Dummy(DummyNetworkConnection),
pub(in crate::network_manager) enum ProtocolNetworkConnection {
// Dummy(DummyNetworkConnection),
RawTcp(tcp::RawTcpNetworkConnection),
WsAccepted(ws::WebSocketNetworkConnectionAccepted),
Ws(ws::WebsocketNetworkConnectionWS),
@ -45,29 +45,29 @@ impl ProtocolNetworkConnection {
}
}
pub fn descriptor(&self) -> ConnectionDescriptor {
pub fn flow(&self) -> Flow {
match self {
Self::Dummy(d) => d.descriptor(),
Self::RawTcp(t) => t.descriptor(),
Self::WsAccepted(w) => w.descriptor(),
Self::Ws(w) => w.descriptor(),
Self::Wss(w) => w.descriptor(),
// Self::Dummy(d) => d.flow(),
Self::RawTcp(t) => t.flow(),
Self::WsAccepted(w) => w.flow(),
Self::Ws(w) => w.flow(),
Self::Wss(w) => w.flow(),
}
}
// pub async fn close(&self) -> io::Result<NetworkResult<()>> {
// match self {
// Self::Dummy(d) => d.close(),
// Self::RawTcp(t) => t.close().await,
// Self::WsAccepted(w) => w.close().await,
// Self::Ws(w) => w.close().await,
// Self::Wss(w) => w.close().await,
// }
// }
pub async fn close(&self) -> io::Result<NetworkResult<()>> {
match self {
// Self::Dummy(d) => d.close(),
Self::RawTcp(t) => t.close().await,
Self::WsAccepted(w) => w.close().await,
Self::Ws(w) => w.close().await,
Self::Wss(w) => w.close().await,
}
}
pub async fn send(&self, message: Vec<u8>) -> io::Result<NetworkResult<()>> {
match self {
Self::Dummy(d) => d.send(message),
// Self::Dummy(d) => d.send(message),
Self::RawTcp(t) => t.send(message).await,
Self::WsAccepted(w) => w.send(message).await,
Self::Ws(w) => w.send(message).await,
@ -76,7 +76,7 @@ impl ProtocolNetworkConnection {
}
pub async fn recv(&self) -> io::Result<NetworkResult<Vec<u8>>> {
match self {
Self::Dummy(d) => d.recv(),
// Self::Dummy(d) => d.recv(),
Self::RawTcp(t) => t.recv().await,
Self::WsAccepted(w) => w.recv().await,
Self::Ws(w) => w.recv().await,

View File

@ -112,9 +112,9 @@ pub fn new_unbound_tcp_socket(domain: Domain) -> io::Result<Socket> {
#[instrument(level = "trace", ret)]
pub fn new_unbound_shared_tcp_socket(domain: Domain) -> io::Result<Socket> {
let socket = Socket::new(domain, Type::STREAM, Some(Protocol::TCP))?;
if let Err(e) = socket.set_linger(Some(core::time::Duration::from_secs(0))) {
log_net!(error "Couldn't set TCP linger: {}", e);
}
// if let Err(e) = socket.set_linger(Some(core::time::Duration::from_secs(0))) {
// log_net!(error "Couldn't set TCP linger: {}", e);
// }
if let Err(e) = socket.set_nodelay(true) {
log_net!(error "Couldn't set TCP nodelay: {}", e);
}
@ -148,9 +148,9 @@ pub fn new_bound_first_tcp_socket(local_address: SocketAddr) -> io::Result<Socke
let domain = Domain::for_address(local_address);
let socket = Socket::new(domain, Type::STREAM, Some(Protocol::TCP))?;
if let Err(e) = socket.set_linger(Some(core::time::Duration::from_secs(0))) {
log_net!(error "Couldn't set TCP linger: {}", e);
}
// if let Err(e) = socket.set_linger(Some(core::time::Duration::from_secs(0))) {
// log_net!(error "Couldn't set TCP linger: {}", e);
// }
if let Err(e) = socket.set_nodelay(true) {
log_net!(error "Couldn't set TCP nodelay: {}", e);
}

View File

@ -3,7 +3,7 @@ use futures_util::{AsyncReadExt, AsyncWriteExt};
use sockets::*;
pub struct RawTcpNetworkConnection {
descriptor: ConnectionDescriptor,
flow: Flow,
stream: AsyncPeekStream,
}
@ -14,33 +14,38 @@ impl fmt::Debug for RawTcpNetworkConnection {
}
impl RawTcpNetworkConnection {
pub fn new(descriptor: ConnectionDescriptor, stream: AsyncPeekStream) -> Self {
Self { descriptor, stream }
pub fn new(flow: Flow, stream: AsyncPeekStream) -> Self {
Self { flow, stream }
}
pub fn descriptor(&self) -> ConnectionDescriptor {
self.descriptor
pub fn flow(&self) -> Flow {
self.flow
}
// #[instrument(level = "trace", err, skip(self))]
// pub async fn close(&mut self) -> io::Result<NetworkResult<()>> {
// // Make an attempt to flush the stream
// self.stream.clone().close().await?;
// // Then shut down the write side of the socket to effect a clean close
// cfg_if! {
// if #[cfg(feature="rt-async-std")] {
// self.tcp_stream
// .shutdown(async_std::net::Shutdown::Write)
// } else if #[cfg(feature="rt-tokio")] {
// use tokio::io::AsyncWriteExt;
// self.tcp_stream.get_mut()
// .shutdown()
// .await
// } else {
// compile_error!("needs executor implementation")
// }
// }
// }
#[cfg_attr(
feature = "verbose-tracing",
instrument(level = "trace", err, skip(self))
)]
pub async fn close(&self) -> io::Result<NetworkResult<()>> {
let mut stream = self.stream.clone();
let _ = stream.close().await;
Ok(NetworkResult::value(()))
// // Then shut down the write side of the socket to effect a clean close
// cfg_if! {
// if #[cfg(feature="rt-async-std")] {
// self.tcp_stream
// .shutdown(async_std::net::Shutdown::Write)
// } else if #[cfg(feature="rt-tokio")] {
// use tokio::io::AsyncWriteExt;
// self.tcp_stream.get_mut()
// .shutdown()
// .await
// } else {
// compile_error!("needs executor implementation")
// }
// }
}
async fn send_internal(
stream: &mut AsyncPeekStream,
@ -107,7 +112,7 @@ impl RawTcpNetworkConnection {
///
#[derive(Clone)]
pub struct RawTcpProtocolHandler
pub(in crate::network_manager) struct RawTcpProtocolHandler
where
Self: ProtocolAcceptHandler,
{
@ -147,7 +152,7 @@ impl RawTcpProtocolHandler {
ProtocolType::TCP,
);
let conn = ProtocolNetworkConnection::RawTcp(RawTcpNetworkConnection::new(
ConnectionDescriptor::new(peer_addr, SocketAddress::from_socket_addr(local_addr)),
Flow::new(peer_addr, SocketAddress::from_socket_addr(local_addr)),
ps,
));
@ -181,7 +186,7 @@ impl RawTcpProtocolHandler {
// Wrap the stream in a network connection and return it
let conn = ProtocolNetworkConnection::RawTcp(RawTcpNetworkConnection::new(
ConnectionDescriptor::new(
Flow::new(
PeerAddress::new(
SocketAddress::from_socket_addr(socket_addr),
ProtocolType::TCP,

View File

@ -2,7 +2,7 @@ use super::*;
use sockets::*;
#[derive(Clone)]
pub struct RawUdpProtocolHandler {
pub(in crate::network_manager) struct RawUdpProtocolHandler {
socket: Arc<UdpSocket>,
assembly_buffer: AssemblyBuffer,
address_filter: Option<AddressFilter>,
@ -17,9 +17,9 @@ impl RawUdpProtocolHandler {
}
}
#[cfg_attr(feature="verbose-tracing", instrument(level = "trace", err, skip(self, data), fields(data.len = data.len(), ret.len, ret.descriptor)))]
pub async fn recv_message(&self, data: &mut [u8]) -> io::Result<(usize, ConnectionDescriptor)> {
let (message_len, descriptor) = loop {
#[cfg_attr(feature="verbose-tracing", instrument(level = "trace", err, skip(self, data), fields(data.len = data.len(), ret.len, ret.flow)))]
pub async fn recv_message(&self, data: &mut [u8]) -> io::Result<(usize, Flow)> {
let (message_len, flow) = loop {
// Get a packet
let (size, remote_addr) = network_result_value_or_log!(self.socket.recv_from(data).await.into_network_result()? => continue);
@ -64,33 +64,33 @@ impl RawUdpProtocolHandler {
// Copy assemble message out if we got one
data[0..message.len()].copy_from_slice(&message);
// Return a connection descriptor and the amount of data in the message
// Return a flow and the amount of data in the message
let peer_addr = PeerAddress::new(
SocketAddress::from_socket_addr(remote_addr),
ProtocolType::UDP,
);
let local_socket_addr = self.socket.local_addr()?;
let descriptor = ConnectionDescriptor::new(
let flow = Flow::new(
peer_addr,
SocketAddress::from_socket_addr(local_socket_addr),
);
break (message.len(), descriptor);
break (message.len(), flow);
};
#[cfg(feature = "verbose-tracing")]
tracing::Span::current().record("ret.len", message_len);
#[cfg(feature = "verbose-tracing")]
tracing::Span::current().record("ret.descriptor", format!("{:?}", descriptor).as_str());
Ok((message_len, descriptor))
tracing::Span::current().record("ret.flow", format!("{:?}", flow).as_str());
Ok((message_len, flow))
}
#[cfg_attr(feature="verbose-tracing", instrument(level = "trace", err, skip(self, data), fields(data.len = data.len(), ret.descriptor)))]
#[cfg_attr(feature="verbose-tracing", instrument(level = "trace", err, skip(self, data), fields(data.len = data.len(), ret.flow)))]
pub async fn send_message(
&self,
data: Vec<u8>,
remote_addr: SocketAddr,
) -> io::Result<NetworkResult<ConnectionDescriptor>> {
) -> io::Result<NetworkResult<Flow>> {
if data.len() > MAX_MESSAGE_SIZE {
bail_io_error_other!("sending too large UDP message");
}
@ -121,21 +121,21 @@ impl RawUdpProtocolHandler {
.await?
);
// Return a connection descriptor for the sent message
// Return a flow for the sent message
let peer_addr = PeerAddress::new(
SocketAddress::from_socket_addr(remote_addr),
ProtocolType::UDP,
);
let local_socket_addr = self.socket.local_addr()?;
let descriptor = ConnectionDescriptor::new(
let flow = Flow::new(
peer_addr,
SocketAddress::from_socket_addr(local_socket_addr),
);
#[cfg(feature = "verbose-tracing")]
tracing::Span::current().record("ret.descriptor", format!("{:?}", descriptor).as_str());
Ok(NetworkResult::value(descriptor))
tracing::Span::current().record("ret.flow", format!("{:?}", flow).as_str());
Ok(NetworkResult::value(flow))
}
#[instrument(level = "trace", err)]

View File

@ -1,21 +1,25 @@
use super::*;
use async_tls::TlsConnector;
use async_tungstenite::tungstenite::error::ProtocolError;
use async_tungstenite::tungstenite::handshake::server::{
Callback, ErrorResponse, Request, Response,
};
use async_tungstenite::tungstenite::http::StatusCode;
use async_tungstenite::tungstenite::protocol::Message;
use async_tungstenite::tungstenite::protocol::{frame::coding::CloseCode, CloseFrame, Message};
use async_tungstenite::tungstenite::Error;
use async_tungstenite::{accept_hdr_async, client_async, WebSocketStream};
use futures_util::{AsyncRead, AsyncWrite, SinkExt};
use sockets::*;
/// Maximum number of websocket request headers to permit
// Maximum number of websocket request headers to permit
const MAX_WS_HEADERS: usize = 24;
/// Maximum size of any one specific websocket header
// Maximum size of any one specific websocket header
const MAX_WS_HEADER_LENGTH: usize = 512;
/// Maximum total size of headers and request including newlines
// Maximum total size of headers and request including newlines
const MAX_WS_BEFORE_BODY: usize = 2048;
// Wait time for connection close
// const MAX_CONNECTION_CLOSE_WAIT_US: u64 = 5_000_000;
cfg_if! {
if #[cfg(feature="rt-async-std")] {
@ -31,14 +35,15 @@ cfg_if! {
}
}
fn err_to_network_result<T>(err: async_tungstenite::tungstenite::Error) -> NetworkResult<T> {
fn err_to_network_result<T>(err: Error) -> NetworkResult<T> {
match err {
async_tungstenite::tungstenite::Error::ConnectionClosed
| async_tungstenite::tungstenite::Error::AlreadyClosed
| async_tungstenite::tungstenite::Error::Io(_)
| async_tungstenite::tungstenite::Error::Protocol(
async_tungstenite::tungstenite::error::ProtocolError::ResetWithoutClosingHandshake,
) => NetworkResult::NoConnection(to_io_error_other(err)),
Error::ConnectionClosed
| Error::AlreadyClosed
| Error::Io(_)
| Error::Protocol(ProtocolError::ResetWithoutClosingHandshake)
| Error::Protocol(ProtocolError::SendAfterClosing) => {
NetworkResult::NoConnection(to_io_error_other(err))
}
_ => NetworkResult::InvalidMessage(err.to_string()),
}
}
@ -49,7 +54,7 @@ pub struct WebsocketNetworkConnection<T>
where
T: AsyncRead + AsyncWrite + Send + Unpin + 'static,
{
descriptor: ConnectionDescriptor,
flow: Flow,
stream: CloneStream<WebSocketStream<T>>,
}
@ -66,45 +71,71 @@ impl<T> WebsocketNetworkConnection<T>
where
T: AsyncRead + AsyncWrite + Send + Unpin + 'static,
{
pub fn new(descriptor: ConnectionDescriptor, stream: WebSocketStream<T>) -> Self {
pub fn new(flow: Flow, stream: WebSocketStream<T>) -> Self {
Self {
descriptor,
flow,
stream: CloneStream::new(stream),
}
}
pub fn descriptor(&self) -> ConnectionDescriptor {
self.descriptor
pub fn flow(&self) -> Flow {
self.flow
}
// #[instrument(level = "trace", err, skip(self))]
// pub async fn close(&self) -> io::Result<()> {
// // Make an attempt to flush the stream
// self.stream.clone().close().await.map_err(to_io_error_other)?;
// // Then forcibly close the socket
// self.tcp_stream
// .shutdown(Shutdown::Both)
// .map_err(to_io_error_other)
// }
#[cfg_attr(
feature = "verbose-tracing",
instrument(level = "trace", err, skip(self))
)]
pub async fn close(&self) -> io::Result<NetworkResult<()>> {
// Make an attempt to close the stream normally
let mut stream = self.stream.clone();
let out = match stream
.send(Message::Close(Some(CloseFrame {
code: CloseCode::Normal,
reason: "".into(),
})))
.await
{
Ok(v) => NetworkResult::value(v),
Err(e) => err_to_network_result(e),
};
let _ = stream.close().await;
Ok(out)
// Drive connection to close
/*
let cur_ts = get_timestamp();
loop {
match stream.flush().await {
Ok(()) => {}
Err(Error::Io(ioerr)) => {
break Err(ioerr).into_network_result();
}
Err(Error::ConnectionClosed) => {
break Ok(NetworkResult::value(()));
}
Err(e) => {
break Err(to_io_error_other(e));
}
}
if get_timestamp().saturating_sub(cur_ts) >= MAX_CONNECTION_CLOSE_WAIT_US {
return Ok(NetworkResult::Timeout);
}
}
*/
}
#[cfg_attr(feature="verbose-tracing", instrument(level = "trace", err, skip(self, message), fields(network_result, message.len = message.len())))]
pub async fn send(&self, message: Vec<u8>) -> io::Result<NetworkResult<()>> {
if message.len() > MAX_MESSAGE_SIZE {
bail_io_error_other!("received too large WS message");
bail_io_error_other!("sending too large WS message");
}
let out = match self.stream.clone().send(Message::binary(message)).await {
Ok(v) => NetworkResult::value(v),
Err(e) => err_to_network_result(e),
};
if !out.is_value() {
#[cfg(feature = "verbose-tracing")]
tracing::Span::current().record("network_result", &tracing::field::display(&out));
return Ok(out);
}
let out = match self.stream.clone().flush().await {
Ok(v) => NetworkResult::value(v),
Err(e) => err_to_network_result(e),
};
#[cfg(feature = "verbose-tracing")]
tracing::Span::current().record("network_result", &tracing::field::display(&out));
@ -153,7 +184,7 @@ struct WebsocketProtocolHandlerArc {
}
#[derive(Clone)]
pub struct WebsocketProtocolHandler
pub(in crate::network_manager) struct WebsocketProtocolHandler
where
Self: ProtocolAcceptHandler,
{
@ -255,7 +286,7 @@ impl WebsocketProtocolHandler {
PeerAddress::new(SocketAddress::from_socket_addr(socket_addr), protocol_type);
let conn = ProtocolNetworkConnection::WsAccepted(WebsocketNetworkConnection::new(
ConnectionDescriptor::new(peer_addr, SocketAddress::from_socket_addr(local_addr)),
Flow::new(peer_addr, SocketAddress::from_socket_addr(local_addr)),
ws_stream,
));
@ -304,8 +335,8 @@ impl WebsocketProtocolHandler {
#[cfg(feature = "rt-tokio")]
let tcp_stream = tcp_stream.compat();
// Make our connection descriptor
let descriptor = ConnectionDescriptor::new(
// Make our flow
let flow = Flow::new(
dial_info.peer_address(),
SocketAddress::from_socket_addr(actual_local_addr),
);
@ -319,14 +350,14 @@ impl WebsocketProtocolHandler {
.map_err(to_io_error_other)?;
Ok(NetworkResult::Value(ProtocolNetworkConnection::Wss(
WebsocketNetworkConnection::new(descriptor, ws_stream),
WebsocketNetworkConnection::new(flow, ws_stream),
)))
} else {
let (ws_stream, _response) = client_async(request, tcp_stream)
.await
.map_err(to_io_error_other)?;
Ok(NetworkResult::Value(ProtocolNetworkConnection::Ws(
WebsocketNetworkConnection::new(descriptor, ws_stream),
WebsocketNetworkConnection::new(flow, ws_stream),
)))
}
}

View File

@ -11,7 +11,7 @@ cfg_if::cfg_if! {
///////////////////////////////////////////////////////////
// Accept
pub trait ProtocolAcceptHandler: ProtocolAcceptHandlerClone + Send + Sync {
pub(in crate::network_manager) trait ProtocolAcceptHandler: ProtocolAcceptHandlerClone + Send + Sync {
fn on_accept(
&self,
stream: AsyncPeekStream,
@ -20,7 +20,7 @@ cfg_if::cfg_if! {
) -> SendPinBoxFuture<io::Result<Option<ProtocolNetworkConnection>>>;
}
pub trait ProtocolAcceptHandlerClone {
pub(in crate::network_manager) trait ProtocolAcceptHandlerClone {
fn clone_box(&self) -> Box<dyn ProtocolAcceptHandler>;
}
@ -38,32 +38,32 @@ cfg_if::cfg_if! {
}
}
pub type NewProtocolAcceptHandler =
pub(in crate::network_manager) type NewProtocolAcceptHandler =
dyn Fn(VeilidConfig, bool) -> Box<dyn ProtocolAcceptHandler> + Send;
}
}
///////////////////////////////////////////////////////////
// Dummy protocol network connection for testing
#[derive(Debug)]
pub struct DummyNetworkConnection {
descriptor: ConnectionDescriptor,
}
// #[derive(Debug)]
// pub struct DummyNetworkConnection {
// flow: Flow,
// }
impl DummyNetworkConnection {
pub fn descriptor(&self) -> ConnectionDescriptor {
self.descriptor
}
// pub fn close(&self) -> io::Result<()> {
// Ok(())
// }
pub fn send(&self, _message: Vec<u8>) -> io::Result<NetworkResult<()>> {
Ok(NetworkResult::Value(()))
}
pub fn recv(&self) -> io::Result<NetworkResult<Vec<u8>>> {
Ok(NetworkResult::Value(Vec::new()))
}
}
// impl DummyNetworkConnection {
// pub fn flow(&self) -> Flow {
// self.flow
// }
// pub fn close(&self) -> io::Result<NetworkResult<()>> {
// Ok(NetworkResult::Value(()))
// }
// pub fn send(&self, _message: Vec<u8>) -> io::Result<NetworkResult<()>> {
// Ok(NetworkResult::Value(()))
// }
// pub fn recv(&self) -> io::Result<NetworkResult<Vec<u8>>> {
// Ok(NetworkResult::Value(Vec::new()))
// }
// }
///////////////////////////////////////////////////////////
// Top-level protocol independent network connection object
@ -83,28 +83,36 @@ pub struct NetworkConnectionStats {
}
pub type NetworkConnectionId = AlignedU64;
#[derive(Debug)]
pub struct NetworkConnection {
pub(in crate::network_manager) struct NetworkConnection {
connection_id: NetworkConnectionId,
descriptor: ConnectionDescriptor,
flow: Flow,
processor: Option<MustJoinHandle<()>>,
established_time: Timestamp,
stats: Arc<Mutex<NetworkConnectionStats>>,
sender: flume::Sender<(Option<Id>, Vec<u8>)>,
stop_source: Option<StopSource>,
protected: bool,
protected_nr: Option<NodeRef>,
ref_count: usize,
}
impl Drop for NetworkConnection {
fn drop(&mut self) {
if self.ref_count != 0 && self.stop_source.is_some() {
log_net!(error "ref_count for network connection should be zero: {:?}", self);
}
}
}
impl NetworkConnection {
pub(super) fn dummy(id: NetworkConnectionId, descriptor: ConnectionDescriptor) -> Self {
pub(super) fn dummy(id: NetworkConnectionId, flow: Flow) -> Self {
// Create handle for sending (dummy is immediately disconnected)
let (sender, _receiver) = flume::bounded(get_concurrency() as usize);
Self {
connection_id: id,
descriptor,
flow,
processor: None,
established_time: get_aligned_timestamp(),
stats: Arc::new(Mutex::new(NetworkConnectionStats {
@ -113,7 +121,8 @@ impl NetworkConnection {
})),
sender,
stop_source: None,
protected: false,
protected_nr: None,
ref_count: 0,
}
}
@ -123,8 +132,8 @@ impl NetworkConnection {
protocol_connection: ProtocolNetworkConnection,
connection_id: NetworkConnectionId,
) -> Self {
// Get descriptor
let descriptor = protocol_connection.descriptor();
// Get flow
let flow = protocol_connection.flow();
// Create handle for sending
let (sender, receiver) = flume::bounded(get_concurrency() as usize);
@ -144,7 +153,7 @@ impl NetworkConnection {
local_stop_token,
manager_stop_token,
connection_id,
descriptor,
flow,
receiver,
protocol_connection,
stats.clone(),
@ -153,13 +162,14 @@ impl NetworkConnection {
// Return the connection
Self {
connection_id,
descriptor,
flow,
processor: Some(processor),
established_time: get_aligned_timestamp(),
stats,
sender,
stop_source: Some(stop_source),
protected: false,
protected_nr: None,
ref_count: 0,
}
}
@ -167,20 +177,40 @@ impl NetworkConnection {
self.connection_id
}
pub fn connection_descriptor(&self) -> ConnectionDescriptor {
self.descriptor
pub fn flow(&self) -> Flow {
self.flow
}
#[allow(dead_code)]
pub fn unique_flow(&self) -> UniqueFlow {
UniqueFlow {
flow: self.flow,
connection_id: Some(self.connection_id),
}
}
pub fn get_handle(&self) -> ConnectionHandle {
ConnectionHandle::new(self.connection_id, self.descriptor, self.sender.clone())
ConnectionHandle::new(self.connection_id, self.flow, self.sender.clone())
}
pub fn is_protected(&self) -> bool {
self.protected
pub fn is_in_use(&self) -> bool {
self.ref_count > 0
}
pub fn protect(&mut self) {
self.protected = true;
pub fn protected_node_ref(&self) -> Option<NodeRef>{
self.protected_nr.clone()
}
pub fn protect(&mut self, protect_nr: NodeRef) {
self.protected_nr = Some(protect_nr);
}
pub fn add_ref(&mut self) {
self.ref_count += 1;
}
pub fn remove_ref(&mut self) {
self.ref_count -= 1;
}
pub fn close(&mut self) {
@ -240,7 +270,7 @@ impl NetworkConnection {
local_stop_token: StopToken,
manager_stop_token: StopToken,
connection_id: NetworkConnectionId,
descriptor: ConnectionDescriptor,
flow: Flow,
receiver: flume::Receiver<(Option<Id>, Vec<u8>)>,
protocol_connection: ProtocolNetworkConnection,
stats: Arc<Mutex<NetworkConnectionStats>>,
@ -248,7 +278,7 @@ impl NetworkConnection {
Box::pin(async move {
log_net!(
"== Starting process_connection loop for id={}, {:?}", connection_id,
descriptor
flow
);
let network_manager = connection_manager.network_manager();
@ -262,7 +292,7 @@ impl NetworkConnection {
let new_timer = || {
sleep(connection_manager.connection_inactivity_timeout_ms()).then(|_| async {
// timeout
log_net!("== Connection timeout on {:?}", descriptor);
log_net!("== Connection timeout on {:?}", flow);
RecvLoopAction::Timeout
})
};
@ -282,6 +312,9 @@ impl NetworkConnection {
// xxx: causes crash (Missing otel data span extensions)
// recv_span.follows_from(span_id);
// Touch the LRU for this connection
connection_manager.touch_connection_by_id(connection_id);
// send the packet
if let Err(e) = Self::send_internal(
&protocol_connection,
@ -314,7 +347,7 @@ impl NetworkConnection {
.then(|res| async {
match res {
Ok(v) => {
let peer_address = protocol_connection.descriptor().remote();
let peer_address = protocol_connection.flow().remote();
// Check to see if it is punished
if address_filter.is_ip_addr_punished(peer_address.socket_addr().ip()) {
@ -340,12 +373,16 @@ impl NetworkConnection {
// Pass received messages up to the network manager for processing
if let Err(e) = network_manager
.on_recv_envelope(message.as_mut_slice(), descriptor)
.on_recv_envelope(message.as_mut_slice(), flow)
.await
{
log_net!(debug "failed to process received envelope: {}", e);
RecvLoopAction::Finish
} else {
// Touch the LRU for this connection
connection_manager.touch_connection_by_id(connection_id);
RecvLoopAction::Recv
}
}
@ -393,25 +430,37 @@ impl NetworkConnection {
}
log_net!(
"== Connection loop finished descriptor={:?}",
descriptor
"== Connection loop finished flow={:?}",
flow
);
// Let the connection manager know the receive loop exited
connection_manager
.report_connection_finished(connection_id)
.await;
// Close the low level socket
if let Err(e) = protocol_connection.close().await {
log_net!(debug "Protocol connection close error: {}", e);
}
}.instrument(trace_span!("process_connection")))
}
pub fn debug_print(&self, cur_ts: Timestamp) -> String {
format!("{} <- {} | {} | est {} sent {} rcvd {}",
self.descriptor.remote_address(),
self.descriptor.local().map(|x| x.to_string()).unwrap_or("---".to_owned()),
format!("{} <- {} | {} | est {} sent {} rcvd {} refcount {}{}",
self.flow.remote_address(),
self.flow.local().map(|x| x.to_string()).unwrap_or("---".to_owned()),
self.connection_id.as_u64(),
debug_duration(cur_ts.as_u64().saturating_sub(self.established_time.as_u64())),
self.stats().last_message_sent_time.map(|ts| debug_duration(cur_ts.as_u64().saturating_sub(ts.as_u64())) ).unwrap_or("---".to_owned()),
self.stats().last_message_recv_time.map(|ts| debug_duration(cur_ts.as_u64().saturating_sub(ts.as_u64())) ).unwrap_or("---".to_owned()),
self.ref_count,
if let Some(pnr) = &self.protected_nr {
format!(" PROTECTED:{}",pnr)
} else {
"".to_owned()
}
)
}
}

View File

@ -7,7 +7,8 @@ use routing_table::*;
use stop_token::future::FutureExt;
#[derive(Clone, Debug)]
pub enum ReceiptEvent {
#[allow(dead_code)]
pub(crate) enum ReceiptEvent {
ReturnedOutOfBand,
ReturnedInBand { inbound_noderef: NodeRef },
ReturnedSafety,
@ -17,14 +18,14 @@ pub enum ReceiptEvent {
}
#[derive(Clone, Debug)]
pub enum ReceiptReturned {
pub(super) enum ReceiptReturned {
OutOfBand,
InBand { inbound_noderef: NodeRef },
Safety,
Private { private_route: PublicKey },
}
pub trait ReceiptCallback: Send + 'static {
pub(crate) trait ReceiptCallback: Send + 'static {
fn call(
&self,
event: ReceiptEvent,
@ -52,6 +53,7 @@ where
type ReceiptCallbackType = Box<dyn ReceiptCallback>;
type ReceiptSingleShotType = SingleShotEventual<ReceiptEvent>;
#[allow(dead_code)]
enum ReceiptRecordCallbackType {
Normal(ReceiptCallbackType),
SingleShot(Option<ReceiptSingleShotType>),
@ -69,7 +71,7 @@ impl fmt::Debug for ReceiptRecordCallbackType {
}
}
pub struct ReceiptRecord {
struct ReceiptRecord {
expiration_ts: Timestamp,
receipt: Receipt,
expected_returns: u32,
@ -90,6 +92,7 @@ impl fmt::Debug for ReceiptRecord {
}
impl ReceiptRecord {
#[allow(dead_code)]
pub fn new(
receipt: Receipt,
expiration_ts: Timestamp,
@ -147,7 +150,7 @@ impl PartialOrd for ReceiptRecordTimestampSort {
///////////////////////////////////
pub struct ReceiptManagerInner {
struct ReceiptManagerInner {
network_manager: NetworkManager,
records_by_nonce: BTreeMap<Nonce, Arc<Mutex<ReceiptRecord>>>,
next_oldest_ts: Option<Timestamp>,
@ -156,7 +159,7 @@ pub struct ReceiptManagerInner {
}
#[derive(Clone)]
pub struct ReceiptManager {
pub(super) struct ReceiptManager {
inner: Arc<Mutex<ReceiptManagerInner>>,
}
@ -314,6 +317,7 @@ impl ReceiptManager {
debug!("finished receipt manager shutdown");
}
#[allow(dead_code)]
pub fn record_receipt(
&self,
receipt: Receipt,
@ -369,6 +373,7 @@ impl ReceiptManager {
inner.next_oldest_ts = new_next_oldest_ts;
}
#[allow(dead_code)]
pub async fn cancel_receipt(&self, nonce: &Nonce) -> EyreResult<()> {
log_rpc!(debug "== Cancel Receipt {}", nonce.encode());

View File

@ -3,40 +3,42 @@ use super::*;
impl NetworkManager {
/// Send raw data to a node
///
/// We may not have dial info for a node, but have an existing connection for it
/// because an inbound connection happened first, and no FindNodeQ has happened to that
/// node yet to discover its dial info. The existing connection should be tried first
/// in this case, if it matches the node ref's filters and no more permissive connection
/// We may not have dial info for a node, but have an existing flow for it
/// because an inbound flow happened first, and no FindNodeQ has happened to that
/// node yet to discover its dial info. The existing flow should be tried first
/// in this case, if it matches the node ref's filters and no more permissive flow
/// could be established.
///
/// Sending to a node requires determining a NetworkClass compatible mechanism
pub fn send_data(
/// Sending to a node requires determining a NetworkClass compatible contact method
/// between the source and destination node
pub(crate) fn send_data(
&self,
destination_node_ref: NodeRef,
data: Vec<u8>,
) -> SendPinBoxFuture<EyreResult<NetworkResult<SendDataKind>>> {
) -> SendPinBoxFuture<EyreResult<NetworkResult<SendDataMethod>>> {
let this = self.clone();
Box::pin(
async move {
// First try to send data to the last socket we've seen this peer on
let data = if let Some(connection_descriptor) = destination_node_ref.last_connection() {
// First try to send data to the last flow we've seen this peer on
let data = if let Some(flow) = destination_node_ref.last_flow() {
match this
.net()
.send_data_to_existing_connection(connection_descriptor, data)
.send_data_to_existing_flow(flow, data)
.await?
{
None => {
// Update timestamp for this last connection since we just sent to it
SendDataToExistingFlowResult::Sent(unique_flow) => {
// Update timestamp for this last flow since we just sent to it
destination_node_ref
.set_last_connection(connection_descriptor, get_aligned_timestamp());
.set_last_flow(unique_flow.flow, get_aligned_timestamp());
return Ok(NetworkResult::value(SendDataKind::Existing(
connection_descriptor,
)));
return Ok(NetworkResult::value(SendDataMethod {
opt_relayed_contact_method: None,
contact_method: NodeContactMethod::Existing,
unique_flow,
}));
}
Some(data) => {
// Couldn't send data to existing connection
SendDataToExistingFlowResult::NotSent(data) => {
// Couldn't send data to existing flow
// so pass the data back out
data
}
@ -49,16 +51,16 @@ impl NetworkManager {
// No existing connection was found or usable, so we proceed to see how to make a new one
// Get the best way to contact this node
let contact_method = this.get_node_contact_method(destination_node_ref.clone())?;
let possibly_relayed_contact_method = this.get_node_contact_method(destination_node_ref.clone())?;
// If we need to relay, do it
let (contact_method, target_node_ref, relayed) = match contact_method {
let (contact_method, target_node_ref, opt_relayed_contact_method) = match possibly_relayed_contact_method.clone() {
NodeContactMethod::OutboundRelay(relay_nr)
| NodeContactMethod::InboundRelay(relay_nr) => {
let cm = this.get_node_contact_method(relay_nr.clone())?;
(cm, relay_nr, true)
(cm, relay_nr, Some(possibly_relayed_contact_method))
}
cm => (cm, destination_node_ref.clone(), false),
cm => (cm, destination_node_ref.clone(), None),
};
#[cfg(feature = "verbose-tracing")]
@ -68,7 +70,7 @@ impl NetworkManager {
);
// Try the contact method
let sdk = match contact_method {
let mut send_data_method = match contact_method {
NodeContactMethod::OutboundRelay(relay_nr) => {
// Relay loop or multiple relays
bail!(
@ -117,11 +119,9 @@ impl NetworkManager {
)
}
};
send_data_method.opt_relayed_contact_method = opt_relayed_contact_method;
if relayed {
return Ok(NetworkResult::value(SendDataKind::Indirect));
}
Ok(NetworkResult::value(sdk))
Ok(NetworkResult::value(send_data_method))
}
.instrument(trace_span!("send_data")),
)
@ -132,31 +132,35 @@ impl NetworkManager {
&self,
target_node_ref: NodeRef,
data: Vec<u8>,
) -> EyreResult<NetworkResult<SendDataKind>> {
) -> EyreResult<NetworkResult<SendDataMethod>> {
// First try to send data to the last connection we've seen this peer on
let Some(connection_descriptor) = target_node_ref.last_connection() else {
let Some(flow) = target_node_ref.last_flow() else {
return Ok(NetworkResult::no_connection_other(
format!("should have found an existing connection: {}", target_node_ref)
));
};
if self
let unique_flow = match self
.net()
.send_data_to_existing_connection(connection_descriptor, data)
.send_data_to_existing_flow(flow, data)
.await?
.is_some()
{
return Ok(NetworkResult::no_connection_other(
"failed to send to existing connection",
));
}
SendDataToExistingFlowResult::Sent(unique_flow) => unique_flow,
SendDataToExistingFlowResult::NotSent(_) => {
return Ok(NetworkResult::no_connection_other(
"failed to send to existing flow",
));
}
};
// Update timestamp for this last connection since we just sent to it
target_node_ref.set_last_connection(connection_descriptor, get_aligned_timestamp());
target_node_ref.set_last_flow(flow, get_aligned_timestamp());
Ok(NetworkResult::value(SendDataKind::Existing(
connection_descriptor,
)))
Ok(NetworkResult::value(SendDataMethod{
contact_method: NodeContactMethod::Existing,
opt_relayed_contact_method: None,
unique_flow
}))
}
/// Send data using NodeContactMethod::Unreachable
@ -164,31 +168,35 @@ impl NetworkManager {
&self,
target_node_ref: NodeRef,
data: Vec<u8>,
) -> EyreResult<NetworkResult<SendDataKind>> {
) -> EyreResult<NetworkResult<SendDataMethod>> {
// Try to send data to the last socket we've seen this peer on
let Some(connection_descriptor) = target_node_ref.last_connection() else {
let Some(flow) = target_node_ref.last_flow() else {
return Ok(NetworkResult::no_connection_other(
format!("Node is not reachable and has no existing connection: {}", target_node_ref)
));
};
if self
let unique_flow = match self
.net()
.send_data_to_existing_connection(connection_descriptor, data)
.await?
.is_some()
.send_data_to_existing_flow(flow, data)
.await?
{
return Ok(NetworkResult::no_connection_other(
format!("failed to send to unreachable node over existing connection: {:?}", connection_descriptor)
));
}
SendDataToExistingFlowResult::Sent(unique_flow) => unique_flow,
SendDataToExistingFlowResult::NotSent(_) => {
return Ok(NetworkResult::no_connection_other(
format!("failed to send to unreachable node over existing connection: {:?}", flow)
));
}
};
// Update timestamp for this last connection since we just sent to it
target_node_ref.set_last_connection(connection_descriptor, get_aligned_timestamp());
target_node_ref.set_last_flow(flow, get_aligned_timestamp());
Ok(NetworkResult::value(SendDataKind::Existing(
connection_descriptor,
)))
Ok(NetworkResult::value(SendDataMethod {
contact_method: NodeContactMethod::Existing,
opt_relayed_contact_method: None,
unique_flow,
}))
}
/// Send data using NodeContactMethod::SignalReverse
@ -197,24 +205,26 @@ impl NetworkManager {
relay_nr: NodeRef,
target_node_ref: NodeRef,
data: Vec<u8>,
) -> EyreResult<NetworkResult<SendDataKind>> {
) -> EyreResult<NetworkResult<SendDataMethod>> {
// First try to send data to the last socket we've seen this peer on
let data = if let Some(connection_descriptor) = target_node_ref.last_connection() {
let data = if let Some(flow) = target_node_ref.last_flow() {
match self
.net()
.send_data_to_existing_connection(connection_descriptor, data)
.send_data_to_existing_flow(flow, data)
.await?
{
None => {
SendDataToExistingFlowResult::Sent(unique_flow) => {
// Update timestamp for this last connection since we just sent to it
target_node_ref
.set_last_connection(connection_descriptor, get_aligned_timestamp());
.set_last_flow(flow, get_aligned_timestamp());
return Ok(NetworkResult::value(SendDataKind::Existing(
connection_descriptor,
)));
return Ok(NetworkResult::value(SendDataMethod{
contact_method: NodeContactMethod::Existing,
opt_relayed_contact_method: None,
unique_flow
}));
}
Some(data) => {
SendDataToExistingFlowResult::NotSent(data) => {
// Couldn't send data to existing connection
// so pass the data back out
data
@ -225,13 +235,15 @@ impl NetworkManager {
data
};
let connection_descriptor = network_result_try!(
self.do_reverse_connect(relay_nr, target_node_ref, data)
let unique_flow = network_result_try!(
self.do_reverse_connect(relay_nr.clone(), target_node_ref.clone(), data)
.await?
);
Ok(NetworkResult::value(SendDataKind::Direct(
connection_descriptor,
)))
Ok(NetworkResult::value(SendDataMethod {
contact_method: NodeContactMethod::SignalReverse(relay_nr, target_node_ref),
opt_relayed_contact_method: None,
unique_flow,
}))
}
/// Send data using NodeContactMethod::SignalHolePunch
@ -240,24 +252,26 @@ impl NetworkManager {
relay_nr: NodeRef,
target_node_ref: NodeRef,
data: Vec<u8>,
) -> EyreResult<NetworkResult<SendDataKind>> {
) -> EyreResult<NetworkResult<SendDataMethod>> {
// First try to send data to the last socket we've seen this peer on
let data = if let Some(connection_descriptor) = target_node_ref.last_connection() {
let data = if let Some(flow) = target_node_ref.last_flow() {
match self
.net()
.send_data_to_existing_connection(connection_descriptor, data)
.send_data_to_existing_flow(flow, data)
.await?
{
None => {
SendDataToExistingFlowResult::Sent(unique_flow) => {
// Update timestamp for this last connection since we just sent to it
target_node_ref
.set_last_connection(connection_descriptor, get_aligned_timestamp());
.set_last_flow(flow, get_aligned_timestamp());
return Ok(NetworkResult::value(SendDataKind::Existing(
connection_descriptor,
)));
return Ok(NetworkResult::value(SendDataMethod{
contact_method: NodeContactMethod::Existing,
opt_relayed_contact_method: None,
unique_flow
}));
}
Some(data) => {
SendDataToExistingFlowResult::NotSent(data) => {
// Couldn't send data to existing connection
// so pass the data back out
data
@ -268,11 +282,13 @@ impl NetworkManager {
data
};
let connection_descriptor =
network_result_try!(self.do_hole_punch(relay_nr, target_node_ref, data).await?);
Ok(NetworkResult::value(SendDataKind::Direct(
connection_descriptor,
)))
let unique_flow =
network_result_try!(self.do_hole_punch(relay_nr.clone(), target_node_ref.clone(), data).await?);
Ok(NetworkResult::value(SendDataMethod {
contact_method: NodeContactMethod::SignalHolePunch(relay_nr, target_node_ref),
opt_relayed_contact_method: None,
unique_flow,
}))
}
/// Send data using NodeContactMethod::Direct
@ -281,34 +297,36 @@ impl NetworkManager {
node_ref: NodeRef,
dial_info: DialInfo,
data: Vec<u8>,
) -> EyreResult<NetworkResult<SendDataKind>> {
) -> EyreResult<NetworkResult<SendDataMethod>> {
// Since we have the best dial info already, we can find a connection to use by protocol type
let node_ref = node_ref.filtered_clone(NodeRefFilter::from(dial_info.make_filter()));
// First try to send data to the last socket we've seen this peer on
let data = if let Some(connection_descriptor) = node_ref.last_connection() {
let data = if let Some(flow) = node_ref.last_flow() {
#[cfg(feature = "verbose-tracing")]
debug!(
"ExistingConnection: {:?} for {:?}",
connection_descriptor, node_ref
flow, node_ref
);
match self
.net()
.send_data_to_existing_connection(connection_descriptor, data)
.send_data_to_existing_flow(flow, data)
.await?
{
None => {
SendDataToExistingFlowResult::Sent(unique_flow) => {
// Update timestamp for this last connection since we just sent to it
node_ref.set_last_connection(connection_descriptor, get_aligned_timestamp());
node_ref.set_last_flow(flow, get_aligned_timestamp());
return Ok(NetworkResult::value(SendDataKind::Existing(
connection_descriptor,
)));
return Ok(NetworkResult::value(SendDataMethod{
contact_method: NodeContactMethod::Existing,
opt_relayed_contact_method: None,
unique_flow
}));
}
Some(d) => {
SendDataToExistingFlowResult::NotSent(d) => {
// Connection couldn't send, kill it
node_ref.clear_last_connection(connection_descriptor);
node_ref.clear_last_connection(flow);
d
}
}
@ -317,15 +335,17 @@ impl NetworkManager {
};
// New direct connection was necessary for this dial info
let connection_descriptor =
network_result_try!(self.net().send_data_to_dial_info(dial_info, data).await?);
let unique_flow =
network_result_try!(self.net().send_data_to_dial_info(dial_info.clone(), data).await?);
// If we connected to this node directly, save off the last connection so we can use it again
node_ref.set_last_connection(connection_descriptor, get_aligned_timestamp());
node_ref.set_last_flow(unique_flow.flow, get_aligned_timestamp());
Ok(NetworkResult::value(SendDataKind::Direct(
connection_descriptor,
)))
Ok(NetworkResult::value(SendDataMethod {
contact_method: NodeContactMethod::Direct(dial_info),
opt_relayed_contact_method: None,
unique_flow,
}))
}
/// Figure out how to reach a node from our own node over the best routing domain and reference the nodes we want to access
@ -381,8 +401,7 @@ impl NetworkManager {
.with_address_type_set(peer_a.signed_node_info().node_info().address_types())
.with_protocol_type_set(peer_a.signed_node_info().node_info().outbound_protocols()));
let sequencing = target_node_ref.sequencing();
// If the node has had lost questions or failures to send, prefer sequencing
// to improve reliability. The node may be experiencing UDP fragmentation drops
// or other firewalling issues and may perform better with TCP.
@ -439,6 +458,7 @@ impl NetworkManager {
bail!("signalreverse target noderef didn't match target key: {:?} != {} for relay {}", target_node_ref, target_key, relay_key );
}
relay_nr.set_sequencing(sequencing);
let target_node_ref = target_node_ref.filtered_clone(NodeRefFilter::from(dial_info_filter));
NodeContactMethod::SignalReverse(relay_nr, target_node_ref)
}
ContactMethod::SignalHolePunch(relay_key, target_key) => {
@ -459,7 +479,7 @@ impl NetworkManager {
// if any other protocol were possible here we could update this and do_hole_punch
// but tcp hole punch is very very unreliable it seems
let udp_target_node_ref = target_node_ref
.filtered_clone(NodeRefFilter::new().with_protocol_type(ProtocolType::UDP));
.filtered_clone(NodeRefFilter::new().with_dial_info_filter(dial_info_filter).with_protocol_type(ProtocolType::UDP));
NodeContactMethod::SignalHolePunch(relay_nr, udp_target_node_ref)
}
@ -511,7 +531,7 @@ impl NetworkManager {
relay_nr: NodeRef,
target_nr: NodeRef,
data: Vec<u8>,
) -> EyreResult<NetworkResult<ConnectionDescriptor>> {
) -> EyreResult<NetworkResult<UniqueFlow>> {
// Build a return receipt for the signal
let receipt_timeout = ms_to_us(
self.unlocked_inner
@ -575,14 +595,14 @@ impl NetworkManager {
}
// And now use the existing connection to send over
if let Some(descriptor) = inbound_nr.last_connection() {
if let Some(flow) = inbound_nr.last_flow() {
match self
.net()
.send_data_to_existing_connection(descriptor, data)
.send_data_to_existing_flow(flow, data)
.await?
{
None => Ok(NetworkResult::value(descriptor)),
Some(_) => Ok(NetworkResult::no_connection_other(
SendDataToExistingFlowResult::Sent(unique_flow) => Ok(NetworkResult::value(unique_flow)),
SendDataToExistingFlowResult::NotSent(_) => Ok(NetworkResult::no_connection_other(
"unable to send over reverse connection",
)),
}
@ -603,7 +623,7 @@ impl NetworkManager {
relay_nr: NodeRef,
target_nr: NodeRef,
data: Vec<u8>,
) -> EyreResult<NetworkResult<ConnectionDescriptor>> {
) -> EyreResult<NetworkResult<UniqueFlow>> {
// Ensure we are filtered down to UDP (the only hole punch protocol supported today)
assert!(target_nr
.filter_ref()
@ -643,7 +663,7 @@ impl NetworkManager {
// Do our half of the hole punch by sending an empty packet
// Both sides will do this and then the receipt will get sent over the punched hole
// Don't bother storing the returned connection descriptor as the 'last connection' because the other side of the hole
// Don't bother storing the returned flow as the 'last flow' because the other side of the hole
// punch should come through and create a real 'last connection' for us if this succeeds
network_result_try!(
self.net()
@ -693,14 +713,14 @@ impl NetworkManager {
}
// And now use the existing connection to send over
if let Some(descriptor) = inbound_nr.last_connection() {
if let Some(flow) = inbound_nr.last_flow() {
match self
.net()
.send_data_to_existing_connection(descriptor, data)
.send_data_to_existing_flow(flow, data)
.await?
{
None => Ok(NetworkResult::value(descriptor)),
Some(_) => Ok(NetworkResult::no_connection_other(
SendDataToExistingFlowResult::Sent(unique_flow) => Ok(NetworkResult::value(unique_flow)),
SendDataToExistingFlowResult::NotSent(_) => Ok(NetworkResult::no_connection_other(
"unable to send over hole punch",
)),
}

View File

@ -42,6 +42,7 @@ impl NetworkManager {
.self_stats
.transfer_stats_accounting
.add_up(bytes);
#[allow(clippy::unwrap_or_default)]
inner
.stats
.per_address_stats
@ -58,6 +59,7 @@ impl NetworkManager {
.self_stats
.transfer_stats_accounting
.add_down(bytes);
#[allow(clippy::unwrap_or_default)]
inner
.stats
.per_address_stats
@ -67,7 +69,7 @@ impl NetworkManager {
.add_down(bytes);
}
// Get stats
#[allow(dead_code)]
pub fn get_stats(&self) -> NetworkManagerStats {
let inner = self.inner.lock();
inner.stats.clone()

View File

@ -31,7 +31,7 @@ impl NetworkManager {
pub fn report_local_network_socket_address(
&self,
_socket_address: SocketAddress,
_connection_descriptor: ConnectionDescriptor,
_flow: Flow,
_reporting_peer: NodeRef,
) {
// XXX: Nothing here yet.
@ -43,11 +43,11 @@ impl NetworkManager {
pub fn report_public_internet_socket_address(
&self,
socket_address: SocketAddress, // the socket address as seen by the remote peer
connection_descriptor: ConnectionDescriptor, // the connection descriptor used
flow: Flow, // the flow used
reporting_peer: NodeRef, // the peer's noderef reporting the socket address
) {
#[cfg(feature = "network-result-extra")]
debug!("report_global_socket_address\nsocket_address: {:#?}\nconnection_descriptor: {:#?}\nreporting_peer: {:#?}", socket_address, connection_descriptor, reporting_peer);
debug!("report_global_socket_address\nsocket_address: {:#?}\nflow: {:#?}\nreporting_peer: {:#?}", socket_address, flow, reporting_peer);
// Ignore these reports if we are currently detecting public dial info
let net = self.net();
@ -77,10 +77,7 @@ impl NetworkManager {
});
// Get the ip(block) this report is coming from
let reporting_ipblock = ip_to_ipblock(
ip6_prefix_size,
connection_descriptor.remote_address().ip_addr(),
);
let reporting_ipblock = ip_to_ipblock(ip6_prefix_size, flow.remote_address().ip_addr());
// Reject public address reports from nodes that we know are behind symmetric nat or
// nodes that must be using a relay for everything
@ -105,10 +102,8 @@ impl NetworkManager {
let mut inner = self.inner.lock();
let inner = &mut *inner;
let addr_proto_type_key = PublicAddressCheckCacheKey(
connection_descriptor.protocol_type(),
connection_descriptor.address_type(),
);
let addr_proto_type_key =
PublicAddressCheckCacheKey(flow.protocol_type(), flow.address_type());
if inner
.public_address_inconsistencies_table
.get(&addr_proto_type_key)
@ -136,7 +131,7 @@ impl NetworkManager {
NetworkClass::InboundCapable
) {
// Get the dial info filter for this connection so we can check if we have any public dialinfo that may have changed
let dial_info_filter = connection_descriptor.make_dial_info_filter();
let dial_info_filter = flow.make_dial_info_filter();
// Get current external ip/port from registered global dialinfo
let current_addresses: BTreeSet<SocketAddress> = routing_table
@ -192,7 +187,7 @@ impl NetworkManager {
let pait = inner
.public_address_inconsistencies_table
.entry(addr_proto_type_key)
.or_insert_with(HashMap::new);
.or_default();
for i in &inconsistencies {
pait.insert(*i, exp_ts);
}
@ -204,7 +199,7 @@ impl NetworkManager {
let pait = inner
.public_address_inconsistencies_table
.entry(addr_proto_type_key)
.or_insert_with(HashMap::new);
.or_default();
let exp_ts = get_aligned_timestamp()
+ PUBLIC_ADDRESS_INCONSISTENCY_PUNISHMENT_TIMEOUT_US;
for i in inconsistencies {
@ -267,7 +262,7 @@ impl NetworkManager {
net.set_needs_public_dial_info_check(bad_public_address_detection_punishment);
} else {
warn!("Public address may have changed. Restarting the server may be required.");
warn!("report_global_socket_address\nsocket_address: {:#?}\nconnection_descriptor: {:#?}\nreporting_peer: {:#?}", socket_address, connection_descriptor, reporting_peer);
warn!("report_global_socket_address\nsocket_address: {:#?}\nflow: {:#?}\nreporting_peer: {:#?}", socket_address, flow, reporting_peer);
warn!(
"public_address_check_cache: {:#?}",
inner.public_address_check_cache

View File

@ -9,12 +9,12 @@ pub async fn test_add_get_remove() {
let address_filter = AddressFilter::new(config.clone(), mock_routing_table());
let table = ConnectionTable::new(config, address_filter);
let a1 = ConnectionDescriptor::new_no_local(PeerAddress::new(
let a1 = Flow::new_no_local(PeerAddress::new(
SocketAddress::new(Address::IPV4(Ipv4Addr::new(192, 168, 0, 1)), 8080),
ProtocolType::TCP,
));
let a2 = a1;
let a3 = ConnectionDescriptor::new(
let a3 = Flow::new(
PeerAddress::new(
SocketAddress::new(Address::IPV6(Ipv6Addr::new(191, 0, 0, 0, 0, 0, 0, 1)), 8090),
ProtocolType::TCP,
@ -26,7 +26,7 @@ pub async fn test_add_get_remove() {
0,
))),
);
let a4 = ConnectionDescriptor::new(
let a4 = Flow::new(
PeerAddress::new(
SocketAddress::new(Address::IPV6(Ipv6Addr::new(192, 0, 0, 0, 0, 0, 0, 1)), 8090),
ProtocolType::TCP,
@ -38,7 +38,7 @@ pub async fn test_add_get_remove() {
0,
))),
);
let a5 = ConnectionDescriptor::new(
let a5 = Flow::new(
PeerAddress::new(
SocketAddress::new(Address::IPV6(Ipv6Addr::new(192, 0, 0, 0, 0, 0, 0, 1)), 8090),
ProtocolType::WSS,
@ -59,12 +59,12 @@ pub async fn test_add_get_remove() {
let c4 = NetworkConnection::dummy(4.into(), a4);
let c5 = NetworkConnection::dummy(5.into(), a5);
assert_eq!(a1, c2.connection_descriptor());
assert_ne!(a3, c4.connection_descriptor());
assert_ne!(a4, c5.connection_descriptor());
assert_eq!(a1, c2.flow());
assert_ne!(a3, c4.flow());
assert_ne!(a4, c5.flow());
assert_eq!(table.connection_count(), 0);
assert_eq!(table.get_connection_by_descriptor(a1), None);
assert_eq!(table.peek_connection_by_flow(a1), None);
table.add_connection(c1).unwrap();
assert!(table.add_connection(c1b).is_err());
@ -72,26 +72,26 @@ pub async fn test_add_get_remove() {
assert!(table.remove_connection_by_id(4.into()).is_none());
assert!(table.remove_connection_by_id(5.into()).is_none());
assert_eq!(table.connection_count(), 1);
assert_eq!(table.get_connection_by_descriptor(a1), Some(c1h.clone()));
assert_eq!(table.get_connection_by_descriptor(a1), Some(c1h.clone()));
assert_eq!(table.peek_connection_by_flow(a1), Some(c1h.clone()));
assert_eq!(table.peek_connection_by_flow(a1), Some(c1h.clone()));
assert_eq!(table.connection_count(), 1);
assert_err!(table.add_connection(c2));
assert_eq!(table.connection_count(), 1);
assert_eq!(table.get_connection_by_descriptor(a1), Some(c1h.clone()));
assert_eq!(table.get_connection_by_descriptor(a1), Some(c1h.clone()));
assert_eq!(table.peek_connection_by_flow(a1), Some(c1h.clone()));
assert_eq!(table.peek_connection_by_flow(a1), Some(c1h.clone()));
assert_eq!(table.connection_count(), 1);
assert_eq!(
table
.remove_connection_by_id(1.into())
.map(|c| c.connection_descriptor())
.map(|c| c.flow())
.unwrap(),
a1
);
assert_eq!(table.connection_count(), 0);
assert!(table.remove_connection_by_id(2.into()).is_none());
assert_eq!(table.connection_count(), 0);
assert_eq!(table.get_connection_by_descriptor(a2), None);
assert_eq!(table.get_connection_by_descriptor(a1), None);
assert_eq!(table.peek_connection_by_flow(a2), None);
assert_eq!(table.peek_connection_by_flow(a1), None);
assert_eq!(table.connection_count(), 0);
let c1 = NetworkConnection::dummy(6.into(), a1);
table.add_connection(c1).unwrap();
@ -103,21 +103,21 @@ pub async fn test_add_get_remove() {
assert_eq!(
table
.remove_connection_by_id(6.into())
.map(|c| c.connection_descriptor())
.map(|c| c.flow())
.unwrap(),
a2
);
assert_eq!(
table
.remove_connection_by_id(3.into())
.map(|c| c.connection_descriptor())
.map(|c| c.flow())
.unwrap(),
a3
);
assert_eq!(
table
.remove_connection_by_id(4.into())
.map(|c| c.connection_descriptor())
.map(|c| c.flow())
.unwrap(),
a4
);

View File

@ -1,7 +1,6 @@
use super::*;
// Ordering here matters, IPV6 is preferred to IPV4 in dial info sorts
// See issue #236 for eventual resolution of this unfortunate implementation
#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Ord, Eq, Hash, Serialize, Deserialize)]
pub enum Address {
IPV6(Ipv6Addr),
@ -21,6 +20,7 @@ impl Address {
SocketAddr::V6(v6) => Address::IPV6(*v6.ip()),
}
}
#[cfg_attr(target_arch = "wasm32", allow(dead_code))]
pub fn from_ip_addr(addr: IpAddr) -> Address {
match addr {
IpAddr::V4(v4) => Address::IPV4(v4),
@ -33,18 +33,6 @@ impl Address {
Address::IPV6(_) => AddressType::IPV6,
}
}
pub fn address_string(&self) -> String {
match self {
Address::IPV4(v4) => v4.to_string(),
Address::IPV6(v6) => v6.to_string(),
}
}
pub fn address_string_with_port(&self, port: u16) -> String {
match self {
Address::IPV4(v4) => format!("{}:{}", v4, port),
Address::IPV6(v6) => format!("[{}]:{}", v6, port),
}
}
pub fn is_unspecified(&self) -> bool {
match self {
Address::IPV4(v4) => ipv4addr_is_unspecified(v4),

View File

@ -234,6 +234,7 @@ impl DialInfo {
Self::WSS(di) => di.socket_address.address(),
}
}
#[allow(dead_code)]
pub fn set_address(&mut self, address: Address) {
match self {
Self::UDP(di) => di.socket_address.set_address(address),
@ -258,6 +259,7 @@ impl DialInfo {
Self::WSS(di) => di.socket_address.ip_addr(),
}
}
#[cfg_attr(target_arch = "wasm32", allow(dead_code))]
pub fn port(&self) -> u16 {
match self {
Self::UDP(di) => di.socket_address.port(),
@ -266,6 +268,7 @@ impl DialInfo {
Self::WSS(di) => di.socket_address.port(),
}
}
#[cfg_attr(target_arch = "wasm32", allow(dead_code))]
pub fn set_port(&mut self, port: u16) {
match self {
Self::UDP(di) => di.socket_address.set_port(port),
@ -274,6 +277,7 @@ impl DialInfo {
Self::WSS(di) => di.socket_address.set_port(port),
}
}
#[cfg_attr(target_arch = "wasm32", allow(dead_code))]
pub fn to_socket_addr(&self) -> SocketAddr {
match self {
Self::UDP(di) => di.socket_address.socket_addr(),
@ -453,6 +457,7 @@ impl DialInfo {
}
}
}
#[allow(dead_code)]
pub async fn to_url(&self) -> String {
match self {
DialInfo::UDP(di) => intf::ptr_lookup(di.socket_address.ip_addr())

View File

@ -97,6 +97,15 @@ impl From<AddressType> for DialInfoFilter {
}
}
impl From<Flow> for DialInfoFilter {
fn from(other: Flow) -> Self {
Self {
protocol_type_set: ProtocolTypeSet::from(other.protocol_type()),
address_type_set: AddressTypeSet::from(other.address_type()),
}
}
}
pub trait MatchesDialInfoFilter {
fn matches_filter(&self, filter: &DialInfoFilter) -> bool;
}

View File

@ -3,17 +3,21 @@ use super::*;
/// Represents the 5-tuple of an established connection
/// Not used to specify connections to create, that is reserved for DialInfo
///
/// ConnectionDescriptors should never be from unspecified local addresses for connection oriented protocols
/// Abstracts both connections to 'connection oriented' protocols (TCP/WS/WSS), but also datagram protocols (UDP)
///
/// Flows should never be from UNSPECIFIED local addresses for connection oriented protocols
/// If the medium does not allow local addresses, None should have been used or 'new_no_local'
/// If we are specifying only a port, then the socket's 'local_address()' should have been used, since an
/// established connection is always from a real address to another real address.
///
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
pub struct ConnectionDescriptor {
pub struct Flow {
remote: PeerAddress,
local: Option<SocketAddress>,
}
impl ConnectionDescriptor {
impl Flow {
pub fn new(remote: PeerAddress, local: SocketAddress) -> Self {
assert!(!remote.protocol_type().is_ordered() || !local.address().is_unspecified());
@ -50,7 +54,7 @@ impl ConnectionDescriptor {
}
}
impl MatchesDialInfoFilter for ConnectionDescriptor {
impl MatchesDialInfoFilter for Flow {
fn matches_filter(&self, filter: &DialInfoFilter) -> bool {
if !filter.protocol_type_set.contains(self.protocol_type()) {
return false;
@ -61,3 +65,14 @@ impl MatchesDialInfoFilter for ConnectionDescriptor {
true
}
}
/// UniqueFlow is a record a specific flow that may or may not currently exist
/// The NetworkConnectionId associated with each flow may represent a low level network connection
/// and will be unique with high probability per low-level connection
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
pub struct UniqueFlow {
pub flow: Flow,
pub connection_id: Option<NetworkConnectionId>,
}
pub type NetworkConnectionId = AlignedU64;

View File

@ -4,18 +4,8 @@ use super::*;
// Keep member order appropriate for sorting < preference
// Must match DialInfo order
#[allow(clippy::derived_hash_with_manual_eq)]
#[derive(Debug, PartialOrd, Ord, Hash, EnumSetType, Serialize, Deserialize)]
#[enumset(repr = "u8")]
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
pub enum LowLevelProtocolType {
UDP = 0,
TCP = 1,
}
impl LowLevelProtocolType {
pub fn is_connection_oriented(&self) -> bool {
matches!(self, LowLevelProtocolType::TCP)
}
}
// pub type LowLevelProtocolTypeSet = EnumSet<LowLevelProtocolType>;

View File

@ -1,9 +1,9 @@
mod address;
mod address_type;
mod connection_descriptor;
mod dial_info;
mod dial_info_class;
mod dial_info_filter;
mod flow;
mod low_level_protocol_type;
mod network_class;
mod peer_address;
@ -15,10 +15,10 @@ use super::*;
pub use address::*;
pub use address_type::*;
pub use connection_descriptor::*;
pub use dial_info::*;
pub use dial_info_class::*;
pub use dial_info_filter::*;
pub use flow::*;
pub use low_level_protocol_type::*;
pub use network_class::*;
pub use peer_address::*;

View File

@ -30,6 +30,7 @@ impl SocketAddress {
pub fn port(&self) -> u16 {
self.port
}
#[cfg_attr(target_arch = "wasm32", allow(dead_code))]
pub fn set_port(&mut self, port: u16) {
self.port = port
}

View File

@ -63,7 +63,7 @@ struct NetworkUnlockedInner {
}
#[derive(Clone)]
pub struct Network {
pub(in crate::network_manager) struct Network {
config: VeilidConfig,
inner: Arc<Mutex<NetworkInner>>,
unlocked_inner: Arc<NetworkUnlockedInner>,
@ -246,13 +246,13 @@ impl Network {
}
#[cfg_attr(feature="verbose-tracing", instrument(level="trace", err, skip(self, data), fields(data.len = data.len())))]
pub async fn send_data_to_existing_connection(
pub async fn send_data_to_existing_flow(
&self,
descriptor: ConnectionDescriptor,
flow: Flow,
data: Vec<u8>,
) -> EyreResult<Option<Vec<u8>>> {
) -> EyreResult<SendDataToExistingFlowResult> {
let data_len = data.len();
match descriptor.protocol_type() {
match flow.protocol_type() {
ProtocolType::UDP => {
bail!("no support for UDP protocol")
}
@ -265,29 +265,29 @@ impl Network {
// Handle connection-oriented protocols
// Try to send to the exact existing connection if one exists
if let Some(conn) = self.connection_manager().get_connection(descriptor) {
if let Some(conn) = self.connection_manager().get_connection(flow) {
// connection exists, send over it
match conn.send_async(data).await {
ConnectionHandleSendResult::Sent => {
// Network accounting
self.network_manager().stats_packet_sent(
descriptor.remote().socket_addr().ip(),
flow.remote().socket_addr().ip(),
ByteCount::new(data_len as u64),
);
// Data was consumed
return Ok(None);
return Ok(SendDataToExistingFlowResult::Sent(conn.unique_flow()));
}
ConnectionHandleSendResult::NotSent(data) => {
// Couldn't send
// Pass the data back out so we don't own it any more
return Ok(Some(data));
return Ok(SendDataToExistingFlowResult::NotSent(data));
}
}
}
// Connection didn't exist
// Pass the data back out so we don't own it any more
Ok(Some(data))
Ok(SendDataToExistingFlowResult::NotSent(data))
}
#[cfg_attr(feature="verbose-tracing", instrument(level="trace", err, skip(self, data), fields(data.len = data.len())))]
@ -295,7 +295,7 @@ impl Network {
&self,
dial_info: DialInfo,
data: Vec<u8>,
) -> EyreResult<NetworkResult<ConnectionDescriptor>> {
) -> EyreResult<NetworkResult<UniqueFlow>> {
self.record_dial_info_failure(dial_info.clone(), async move {
let data_len = data.len();
if dial_info.protocol_type() == ProtocolType::UDP {
@ -318,13 +318,13 @@ impl Network {
"failed to send",
)));
}
let connection_descriptor = conn.connection_descriptor();
let unique_flow = conn.unique_flow();
// Network accounting
self.network_manager()
.stats_packet_sent(dial_info.ip_addr(), ByteCount::new(data_len as u64));
Ok(NetworkResult::value(connection_descriptor))
Ok(NetworkResult::value(unique_flow))
})
.await
}
@ -430,18 +430,6 @@ impl Network {
trace!("network stopped");
}
pub fn is_stable_interface_address(&self, _addr: IpAddr) -> bool {
false
}
pub fn get_stable_interface_addresses(&self) -> Vec<IpAddr> {
Vec::new()
}
pub fn get_local_port(&self, _protocol_type: ProtocolType) -> Option<u16> {
None
}
pub fn get_preferred_local_address(&self, _dial_info: &DialInfo) -> Option<SocketAddr> {
None
}
@ -459,10 +447,6 @@ impl Network {
false
}
pub fn get_protocol_config(&self) -> ProtocolConfig {
self.inner.lock().protocol_config.clone()
}
//////////////////////////////////////////
pub async fn tick(&self) -> EyreResult<()> {
Ok(())

View File

@ -5,9 +5,9 @@ use super::*;
use std::io;
#[derive(Debug)]
pub enum ProtocolNetworkConnection {
pub(in crate::network_manager) enum ProtocolNetworkConnection {
#[allow(dead_code)]
Dummy(DummyNetworkConnection),
//Dummy(DummyNetworkConnection),
Ws(ws::WebsocketNetworkConnection),
//WebRTC(wrtc::WebRTCNetworkConnection),
}
@ -35,29 +35,28 @@ impl ProtocolNetworkConnection {
}
}
pub fn descriptor(&self) -> ConnectionDescriptor {
pub fn flow(&self) -> Flow {
match self {
Self::Dummy(d) => d.descriptor(),
Self::Ws(w) => w.descriptor(),
// Self::Dummy(d) => d.flow(),
Self::Ws(w) => w.flow(),
}
}
pub async fn close(&self) -> io::Result<NetworkResult<()>> {
match self {
// Self::Dummy(d) => d.close(),
Self::Ws(w) => w.close().await,
}
}
// pub async fn close(&self) -> io::Result<NetworkResult<()>> {
// match self {
// Self::Dummy(d) => d.close(),
// Self::Ws(w) => w.close().await,
// }
// }
pub async fn send(&self, message: Vec<u8>) -> io::Result<NetworkResult<()>> {
match self {
Self::Dummy(d) => d.send(message),
// Self::Dummy(d) => d.send(message),
Self::Ws(w) => w.send(message).await,
}
}
pub async fn recv(&self) -> io::Result<NetworkResult<Vec<u8>>> {
match self {
Self::Dummy(d) => d.recv(),
// Self::Dummy(d) => d.recv(),
Self::Ws(w) => w.recv().await,
}
}

View File

@ -5,7 +5,7 @@ use std::io;
use ws_stream_wasm::*;
struct WebsocketNetworkConnectionInner {
_ws_meta: WsMeta,
ws_meta: WsMeta,
ws_stream: CloneStream<WsStream>,
}
@ -34,7 +34,7 @@ fn to_io(err: WsErr) -> io::Error {
#[derive(Clone)]
pub struct WebsocketNetworkConnection {
descriptor: ConnectionDescriptor,
flow: Flow,
inner: Arc<WebsocketNetworkConnectionInner>,
}
@ -45,24 +45,29 @@ impl fmt::Debug for WebsocketNetworkConnection {
}
impl WebsocketNetworkConnection {
pub fn new(descriptor: ConnectionDescriptor, ws_meta: WsMeta, ws_stream: WsStream) -> Self {
pub fn new(flow: Flow, ws_meta: WsMeta, ws_stream: WsStream) -> Self {
Self {
descriptor,
flow,
inner: Arc::new(WebsocketNetworkConnectionInner {
_ws_meta: ws_meta,
ws_meta,
ws_stream: CloneStream::new(ws_stream),
}),
}
}
pub fn descriptor(&self) -> ConnectionDescriptor {
self.descriptor
pub fn flow(&self) -> Flow {
self.flow
}
// #[instrument(level = "trace", err, skip(self))]
// pub async fn close(&self) -> io::Result<()> {
// self.inner.ws_meta.close().await.map_err(to_io).map(drop)
// }
#[cfg_attr(
feature = "verbose-tracing",
instrument(level = "trace", err, skip(self))
)]
pub async fn close(&self) -> io::Result<NetworkResult<()>> {
let x = self.inner.ws_meta.close().await.map_err(to_io);
log_net!(debug "close result: {:?}", x);
Ok(NetworkResult::value(()))
}
#[cfg_attr(feature="verbose-tracing", instrument(level = "trace", err, skip(self, message), fields(network_result, message.len = message.len())))]
pub async fn send(&self, message: Vec<u8>) -> io::Result<NetworkResult<()>> {
@ -113,7 +118,7 @@ impl WebsocketNetworkConnection {
///////////////////////////////////////////////////////////
///
pub struct WebsocketProtocolHandler {}
pub(in crate::network_manager) struct WebsocketProtocolHandler {}
impl WebsocketProtocolHandler {
#[instrument(level = "trace", ret, err)]
@ -142,9 +147,9 @@ impl WebsocketProtocolHandler {
.into_network_result())
.into_network_result()?);
// Make our connection descriptor
// Make our flow
let wnc = WebsocketNetworkConnection::new(
ConnectionDescriptor::new_no_local(dial_info.peer_address()),
Flow::new_no_local(dial_info.peer_address()),
wsmeta,
wsio,
);

View File

@ -29,18 +29,18 @@ const NEVER_REACHED_PING_COUNT: u32 = 3;
// Do not change order here, it will mess up other sorts
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub enum BucketEntryState {
pub(crate) enum BucketEntryState {
Dead,
Unreliable,
Reliable,
}
#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Ord, Hash)]
pub struct LastConnectionKey(ProtocolType, AddressType);
pub(crate) struct LastFlowKey(ProtocolType, AddressType);
/// Bucket entry information specific to the LocalNetwork RoutingDomain
#[derive(Debug, Serialize, Deserialize)]
pub struct BucketEntryPublicInternet {
pub(crate) struct BucketEntryPublicInternet {
/// The PublicInternet node info
signed_node_info: Option<Box<SignedNodeInfo>>,
/// The last node info timestamp of ours that this entry has seen
@ -51,7 +51,7 @@ pub struct BucketEntryPublicInternet {
/// Bucket entry information specific to the LocalNetwork RoutingDomain
#[derive(Debug, Serialize, Deserialize)]
pub struct BucketEntryLocalNetwork {
pub(crate) struct BucketEntryLocalNetwork {
/// The LocalNetwork node info
signed_node_info: Option<Box<SignedNodeInfo>>,
/// The last node info timestamp of ours that this entry has seen
@ -62,7 +62,7 @@ pub struct BucketEntryLocalNetwork {
/// The data associated with each bucket entry
#[derive(Debug, Serialize, Deserialize)]
pub struct BucketEntryInner {
pub(crate) struct BucketEntryInner {
/// The node ids matching this bucket entry, with the cryptography versions supported by this node as the 'kind' field
validated_node_ids: TypedKeyGroup,
/// The node ids claimed by the remote node that use cryptography versions we do not support
@ -75,9 +75,9 @@ pub struct BucketEntryInner {
/// has the same timestamp, because if we change our own IP address or network class it may be possible for nodes that were
/// unreachable may now be reachable with the same SignedNodeInfo/DialInfo
updated_since_last_network_change: bool,
/// The last connection descriptors used to contact this node, per protocol type
/// The last flows used to contact this node, per protocol type
#[serde(skip)]
last_connections: BTreeMap<LastConnectionKey, (ConnectionDescriptor, Timestamp)>,
last_flows: BTreeMap<LastFlowKey, (Flow, Timestamp)>,
/// The node info for this entry on the publicinternet routing domain
public_internet: BucketEntryPublicInternet,
/// The node info for this entry on the localnetwork routing domain
@ -175,6 +175,7 @@ impl BucketEntryInner {
}
// Less is faster
#[allow(dead_code)]
pub fn cmp_fastest(e1: &Self, e2: &Self) -> std::cmp::Ordering {
// Lower latency to the front
if let Some(e1_latency) = &e1.peer_stats.latency {
@ -234,10 +235,12 @@ impl BucketEntryInner {
}
}
#[allow(dead_code)]
pub fn sort_fastest_reliable_fn(cur_ts: Timestamp) -> impl FnMut(&Self, &Self) -> std::cmp::Ordering {
move |e1, e2| Self::cmp_fastest_reliable(cur_ts, e1, e2)
}
#[cfg_attr(target_arch = "wasm32", allow(dead_code))]
pub fn clear_signed_node_info(&mut self, routing_domain: RoutingDomain) {
// Get the correct signed_node_info for the chosen routing domain
let opt_current_sni = match routing_domain {
@ -301,7 +304,7 @@ impl BucketEntryInner {
// The latest connection would have been the one we got the new node info
// over so that connection is still valid.
if node_info_changed {
self.clear_last_connections_except_latest();
self.clear_last_flows_except_latest();
}
}
@ -330,7 +333,7 @@ impl BucketEntryInner {
}
// Check connections
let last_connections = self.last_connections(
let last_connections = self.last_flows(
rti,
true,
NodeRefFilter::from(routing_domain),
@ -384,7 +387,7 @@ impl BucketEntryInner {
}
// Check connections
let mut best_routing_domain: Option<RoutingDomain> = None;
let last_connections = self.last_connections(
let last_connections = self.last_flows(
rti,
true,
NodeRefFilter::from(routing_domain_set),
@ -405,77 +408,77 @@ impl BucketEntryInner {
best_routing_domain
}
fn descriptor_to_key(&self, last_connection: ConnectionDescriptor) -> LastConnectionKey {
LastConnectionKey(
last_connection.protocol_type(),
last_connection.address_type(),
fn flow_to_key(&self, last_flow: Flow) -> LastFlowKey {
LastFlowKey(
last_flow.protocol_type(),
last_flow.address_type(),
)
}
// Stores a connection descriptor in this entry's table of last connections
pub fn set_last_connection(&mut self, last_connection: ConnectionDescriptor, timestamp: Timestamp) {
// Stores a flow in this entry's table of last flows
pub fn set_last_flow(&mut self, last_flow: Flow, timestamp: Timestamp) {
if self.is_punished {
// Don't record connection if this entry is currently punished
return;
}
let key = self.descriptor_to_key(last_connection);
self.last_connections
.insert(key, (last_connection, timestamp));
let key = self.flow_to_key(last_flow);
self.last_flows
.insert(key, (last_flow, timestamp));
}
// Removes a connection descriptor in this entry's table of last connections
pub fn clear_last_connection(&mut self, last_connection: ConnectionDescriptor) {
let key = self.descriptor_to_key(last_connection);
self.last_connections
// Removes a flow in this entry's table of last flows
pub fn remove_last_flow(&mut self, last_flow: Flow) {
let key = self.flow_to_key(last_flow);
self.last_flows
.remove(&key);
}
// Clears the table of last connections to ensure we create new ones and drop any existing ones
pub fn clear_last_connections(&mut self) {
self.last_connections.clear();
// Clears the table of last flows to ensure we create new ones and drop any existing ones
pub fn clear_last_flows(&mut self) {
self.last_flows.clear();
}
// Clears the table of last connections except the most recent one
pub fn clear_last_connections_except_latest(&mut self) {
if self.last_connections.is_empty() {
// Clears the table of last flows except the most recent one
pub fn clear_last_flows_except_latest(&mut self) {
if self.last_flows.is_empty() {
// No last_connections
return;
}
let mut dead_keys = Vec::with_capacity(self.last_connections.len()-1);
let mut most_recent_connection = None;
let mut most_recent_connection_time = 0u64;
for (k, v) in &self.last_connections {
let mut dead_keys = Vec::with_capacity(self.last_flows.len()-1);
let mut most_recent_flow = None;
let mut most_recent_flow_time = 0u64;
for (k, v) in &self.last_flows {
let lct = v.1.as_u64();
if lct > most_recent_connection_time {
most_recent_connection = Some(k);
most_recent_connection_time = lct;
if lct > most_recent_flow_time {
most_recent_flow = Some(k);
most_recent_flow_time = lct;
}
}
let Some(most_recent_connection) = most_recent_connection else {
let Some(most_recent_flow) = most_recent_flow else {
return;
};
for k in self.last_connections.keys() {
if k != most_recent_connection {
for k in self.last_flows.keys() {
if k != most_recent_flow {
dead_keys.push(k.clone());
}
}
for dk in dead_keys {
self.last_connections.remove(&dk);
self.last_flows.remove(&dk);
}
}
// Gets all the 'last connections' that match a particular filter, and their accompanying timestamps of last use
pub(super) fn last_connections(
// Gets all the 'last flows' that match a particular filter, and their accompanying timestamps of last use
pub(super) fn last_flows(
&self,
rti: &RoutingTableInner,
only_live: bool,
filter: NodeRefFilter,
) -> Vec<(ConnectionDescriptor, Timestamp)> {
) -> Vec<(Flow, Timestamp)> {
let connection_manager =
rti.unlocked_inner.network_manager.connection_manager();
let mut out: Vec<(ConnectionDescriptor, Timestamp)> = self
.last_connections
let mut out: Vec<(Flow, Timestamp)> = self
.last_flows
.iter()
.filter_map(|(k, v)| {
let include = {
@ -537,6 +540,7 @@ impl BucketEntryInner {
self.envelope_support = envelope_support;
}
#[allow(dead_code)]
pub fn envelope_support(&self) -> Vec<u8> {
self.envelope_support.clone()
}
@ -560,7 +564,7 @@ impl BucketEntryInner {
pub fn set_punished(&mut self, punished: bool) {
self.is_punished = punished;
if punished {
self.clear_last_connections();
self.clear_last_flows();
}
}
@ -617,12 +621,8 @@ impl BucketEntryInner {
}
}
pub fn set_updated_since_last_network_change(&mut self, updated: bool) {
self.updated_since_last_network_change = updated;
}
pub fn has_updated_since_last_network_change(&self) -> bool {
self.updated_since_last_network_change
pub fn reset_updated_since_last_network_change(&mut self) {
self.updated_since_last_network_change = false;
}
///// stats methods
@ -828,7 +828,7 @@ impl BucketEntryInner {
}
#[derive(Debug)]
pub struct BucketEntry {
pub(crate) struct BucketEntry {
pub(super) ref_count: AtomicU32,
inner: RwLock<BucketEntryInner>,
}
@ -845,7 +845,7 @@ impl BucketEntry {
unsupported_node_ids: TypedKeyGroup::new(),
envelope_support: Vec::new(),
updated_since_last_network_change: false,
last_connections: BTreeMap::new(),
last_flows: BTreeMap::new(),
local_network: BucketEntryLocalNetwork {
last_seen_our_node_info_ts: Timestamp::new(0u64),
signed_node_info: None,

View File

@ -20,20 +20,20 @@ use super::*;
use crate::crypto::*;
use crate::network_manager::*;
use crate::rpc_processor::*;
use bucket::*;
use hashlink::LruCache;
pub use bucket_entry::*;
pub use debug::*;
pub use find_peers::*;
pub use node_ref::*;
pub use node_ref_filter::*;
pub use privacy::*;
pub use route_spec_store::*;
pub use routing_domain_editor::*;
pub use routing_domains::*;
pub use routing_table_inner::*;
pub use stats_accounting::*;
pub(crate) use bucket_entry::*;
pub(crate) use node_ref::*;
pub(crate) use node_ref_filter::*;
pub(crate) use privacy::*;
pub(crate) use route_spec_store::*;
pub(crate) use routing_domain_editor::*;
pub(crate) use routing_domains::*;
pub(crate) use routing_table_inner::*;
pub(crate) use stats_accounting::*;
pub use types::*;
//////////////////////////////////////////////////////////////////////////
@ -57,20 +57,21 @@ const CACHE_VALIDITY_KEY: &[u8] = b"cache_validity_key";
// Critical sections
const LOCK_TAG_TICK: &str = "TICK";
pub type LowLevelProtocolPorts = BTreeSet<(LowLevelProtocolType, AddressType, u16)>;
pub type ProtocolToPortMapping = BTreeMap<(ProtocolType, AddressType), (LowLevelProtocolType, u16)>;
type LowLevelProtocolPorts = BTreeSet<(LowLevelProtocolType, AddressType, u16)>;
type ProtocolToPortMapping = BTreeMap<(ProtocolType, AddressType), (LowLevelProtocolType, u16)>;
#[derive(Clone, Debug)]
pub struct LowLevelPortInfo {
pub low_level_protocol_ports: LowLevelProtocolPorts,
pub protocol_to_port: ProtocolToPortMapping,
}
pub type RoutingTableEntryFilter<'t> =
pub(crate) type RoutingTableEntryFilter<'t> =
Box<dyn FnMut(&RoutingTableInner, Option<Arc<BucketEntry>>) -> bool + Send + 't>;
pub type SerializedBuckets = Vec<Vec<u8>>;
pub type SerializedBucketMap = BTreeMap<CryptoKind, SerializedBuckets>;
type SerializedBuckets = Vec<Vec<u8>>;
type SerializedBucketMap = BTreeMap<CryptoKind, SerializedBuckets>;
#[derive(Clone, Debug, Default, Eq, PartialEq)]
pub struct RoutingTableHealth {
pub(crate) struct RoutingTableHealth {
/// Number of reliable (long-term responsive) entries in the routing table
pub reliable_entry_count: usize,
/// Number of unreliable (occasionally unresponsive) entries in the routing table
@ -87,7 +88,12 @@ pub struct RoutingTableHealth {
pub type BucketIndex = (CryptoKind, usize);
pub struct RoutingTableUnlockedInner {
#[derive(Debug, Clone, Copy)]
pub(crate) struct RecentPeersEntry {
pub last_connection: Flow,
}
pub(crate) struct RoutingTableUnlockedInner {
// Accessors
config: VeilidConfig,
network_manager: NetworkManager,
@ -192,7 +198,7 @@ impl RoutingTableUnlockedInner {
}
#[derive(Clone)]
pub struct RoutingTable {
pub(crate) struct RoutingTable {
inner: Arc<RwLock<RoutingTableInner>>,
unlocked_inner: Arc<RoutingTableUnlockedInner>,
}
@ -455,6 +461,7 @@ impl RoutingTable {
}
/// Set up the local network routing domain with our local routing table configuration
#[cfg_attr(target_arch = "wasm32", allow(dead_code))]
pub fn configure_local_network_routing_domain(&self, local_networks: Vec<(IpAddr, IpAddr)>) {
log_net!(debug "configure_local_network_routing_domain: {:#?}", local_networks);
self.inner
@ -481,24 +488,10 @@ impl RoutingTable {
self.inner.read().relay_node_last_keepalive(domain)
}
pub fn has_dial_info(&self, domain: RoutingDomain) -> bool {
self.inner.read().has_dial_info(domain)
}
pub fn dial_info_details(&self, domain: RoutingDomain) -> Vec<DialInfoDetail> {
self.inner.read().dial_info_details(domain)
}
pub fn first_filtered_dial_info_detail(
&self,
routing_domain_set: RoutingDomainSet,
filter: &DialInfoFilter,
) -> Option<DialInfoDetail> {
self.inner
.read()
.first_filtered_dial_info_detail(routing_domain_set, filter)
}
pub fn all_filtered_dial_info_details(
&self,
routing_domain_set: RoutingDomainSet,
@ -509,22 +502,13 @@ impl RoutingTable {
.all_filtered_dial_info_details(routing_domain_set, filter)
}
#[cfg_attr(target_arch = "wasm32", allow(dead_code))]
pub fn ensure_dial_info_is_valid(&self, domain: RoutingDomain, dial_info: &DialInfo) -> bool {
self.inner
.read()
.ensure_dial_info_is_valid(domain, dial_info)
}
pub fn node_info_is_valid_in_routing_domain(
&self,
routing_domain: RoutingDomain,
node_info: &NodeInfo,
) -> bool {
self.inner
.read()
.node_info_is_valid_in_routing_domain(routing_domain, node_info)
}
pub fn signed_node_info_is_valid_in_routing_domain(
&self,
routing_domain: RoutingDomain,
@ -581,20 +565,6 @@ impl RoutingTable {
self.inner.read().get_network_class(routing_domain)
}
/// Return the domain's filter for what we can receivein the form of a dial info filter
pub fn get_inbound_dial_info_filter(&self, routing_domain: RoutingDomain) -> DialInfoFilter {
self.inner
.read()
.get_inbound_dial_info_filter(routing_domain)
}
/// Return the domain's filter for what we can receive in the form of a node ref filter
pub fn get_inbound_node_ref_filter(&self, routing_domain: RoutingDomain) -> NodeRefFilter {
self.inner
.read()
.get_inbound_node_ref_filter(routing_domain)
}
/// Return the domain's filter for what we can send out in the form of a dial info filter
pub fn get_outbound_dial_info_filter(&self, routing_domain: RoutingDomain) -> DialInfoFilter {
self.inner
@ -619,27 +589,7 @@ impl RoutingTable {
self.inner.write().purge_last_connections();
}
pub fn get_entry_count(
&self,
routing_domain_set: RoutingDomainSet,
min_state: BucketEntryState,
crypto_kinds: &[CryptoKind],
) -> usize {
self.inner
.read()
.get_entry_count(routing_domain_set, min_state, crypto_kinds)
}
pub fn get_entry_count_per_crypto_kind(
&self,
routing_domain_set: RoutingDomainSet,
min_state: BucketEntryState,
) -> BTreeMap<CryptoKind, usize> {
self.inner
.read()
.get_entry_count_per_crypto_kind(routing_domain_set, min_state)
}
/// See which nodes need to be pinged
pub fn get_nodes_needing_ping(
&self,
routing_domain: RoutingDomain,
@ -650,11 +600,6 @@ impl RoutingTable {
.get_nodes_needing_ping(self.clone(), routing_domain, cur_ts)
}
pub fn get_all_nodes(&self, cur_ts: Timestamp) -> Vec<NodeRef> {
let inner = self.inner.read();
inner.get_all_nodes(self.clone(), cur_ts)
}
fn queue_bucket_kicks(&self, node_ids: TypedKeyGroup) {
for node_id in node_ids.iter() {
// Skip node ids we didn't add to buckets
@ -717,13 +662,13 @@ impl RoutingTable {
pub fn register_node_with_existing_connection(
&self,
node_id: TypedKey,
descriptor: ConnectionDescriptor,
flow: Flow,
timestamp: Timestamp,
) -> EyreResult<NodeRef> {
self.inner.write().register_node_with_existing_connection(
self.clone(),
node_id,
descriptor,
flow,
timestamp,
)
}
@ -753,7 +698,7 @@ impl RoutingTable {
for e in &recent_peers {
let mut dead = true;
if let Ok(Some(nr)) = self.lookup_node_ref(*e) {
if let Some(last_connection) = nr.last_connection() {
if let Some(last_connection) = nr.last_flow() {
out.push((*e, RecentPeersEntry { last_connection }));
dead = false;
}
@ -774,12 +719,6 @@ impl RoutingTable {
out
}
pub fn touch_recent_peer(&self, node_id: TypedKey, last_connection: ConnectionDescriptor) {
self.inner
.write()
.touch_recent_peer(node_id, last_connection)
}
//////////////////////////////////////////////////////////////////////
// Find Nodes
@ -788,7 +727,7 @@ impl RoutingTable {
/// Only one protocol per low level protocol/port combination is required
/// For example, if WS/WSS and TCP protocols are on the same low-level TCP port, only TCP keepalives will be required
/// and we do not need to do WS/WSS keepalive as well. If they are on different ports, then we will need WS/WSS keepalives too.
pub fn get_low_level_port_info(&self) -> LowLevelPortInfo {
fn get_low_level_port_info(&self) -> LowLevelPortInfo {
let mut low_level_protocol_ports =
BTreeSet::<(LowLevelProtocolType, AddressType, u16)>::new();
let mut protocol_to_port =
@ -818,6 +757,7 @@ impl RoutingTable {
}
/// Makes a filter that finds nodes with a matching inbound dialinfo
#[cfg_attr(target_arch = "wasm32", allow(dead_code))]
pub fn make_inbound_dial_info_entry_filter<'a>(
routing_domain: RoutingDomain,
dial_info_filter: DialInfoFilter,
@ -978,27 +918,6 @@ impl RoutingTable {
out
}
pub fn find_peers_with_sort_and_filter<C, T, O>(
&self,
node_count: usize,
cur_ts: Timestamp,
filters: VecDeque<RoutingTableEntryFilter>,
compare: C,
transform: T,
) -> Vec<O>
where
C: for<'a, 'b> FnMut(
&'a RoutingTableInner,
&'b Option<Arc<BucketEntry>>,
&'b Option<Arc<BucketEntry>>,
) -> core::cmp::Ordering,
T: for<'r> FnMut(&'r RoutingTableInner, Option<Arc<BucketEntry>>) -> O + Send,
{
self.inner
.read()
.find_peers_with_sort_and_filter(node_count, cur_ts, filters, compare, transform)
}
pub fn find_preferred_fastest_nodes<'a, T, O>(
&self,
node_count: usize,

View File

@ -4,7 +4,7 @@ use alloc::fmt;
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
pub struct NodeRefBaseCommon {
pub(crate) struct NodeRefBaseCommon {
routing_table: RoutingTable,
entry: Arc<BucketEntry>,
filter: Option<NodeRefFilter>,
@ -15,7 +15,7 @@ pub struct NodeRefBaseCommon {
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
pub trait NodeRefBase: Sized {
pub(crate) trait NodeRefBase: Sized {
// Common field access
fn common(&self) -> &NodeRefBaseCommon;
fn common_mut(&mut self) -> &mut NodeRefBaseCommon;
@ -112,12 +112,6 @@ pub trait NodeRefBase: Sized {
fn best_node_id(&self) -> TypedKey {
self.operate(|_rti, e| e.best_node_id())
}
fn has_updated_since_last_network_change(&self) -> bool {
self.operate(|_rti, e| e.has_updated_since_last_network_change())
}
fn set_updated_since_last_network_change(&self) {
self.operate_mut(|_rti, e| e.set_updated_since_last_network_change(true));
}
fn update_node_status(&self, routing_domain: RoutingDomain, node_status: NodeStatus) {
self.operate_mut(|_rti, e| {
e.update_node_status(routing_domain, node_status);
@ -279,13 +273,13 @@ pub trait NodeRefBase: Sized {
/// Get the most recent 'last connection' to this node
/// Filtered first and then sorted by ordering preference and then by most recent
fn last_connection(&self) -> Option<ConnectionDescriptor> {
fn last_flow(&self) -> Option<Flow> {
self.operate(|rti, e| {
// apply sequencing to filter and get sort
let sequencing = self.common().sequencing;
let filter = self.common().filter.unwrap_or_default();
let (ordered, filter) = filter.with_sequencing(sequencing);
let mut last_connections = e.last_connections(rti, true, filter);
let mut last_connections = e.last_flows(rti, true, filter);
if ordered {
last_connections.sort_by(|a, b| {
@ -298,19 +292,19 @@ pub trait NodeRefBase: Sized {
}
fn clear_last_connections(&self) {
self.operate_mut(|_rti, e| e.clear_last_connections())
self.operate_mut(|_rti, e| e.clear_last_flows())
}
fn set_last_connection(&self, connection_descriptor: ConnectionDescriptor, ts: Timestamp) {
fn set_last_flow(&self, flow: Flow, ts: Timestamp) {
self.operate_mut(|rti, e| {
e.set_last_connection(connection_descriptor, ts);
rti.touch_recent_peer(e.best_node_id(), connection_descriptor);
e.set_last_flow(flow, ts);
rti.touch_recent_peer(e.best_node_id(), flow);
})
}
fn clear_last_connection(&self, connection_descriptor: ConnectionDescriptor) {
fn clear_last_connection(&self, flow: Flow) {
self.operate_mut(|_rti, e| {
e.clear_last_connection(connection_descriptor);
e.remove_last_flow(flow);
})
}
@ -327,6 +321,14 @@ pub trait NodeRefBase: Sized {
})
}
fn report_protected_connection_dropped(&self) {
self.stats_failed_to_send(get_aligned_timestamp(), false);
}
fn report_failed_route_test(&self) {
self.stats_failed_to_send(get_aligned_timestamp(), false);
}
fn stats_question_sent(&self, ts: Timestamp, bytes: Timestamp, expects_answer: bool) {
self.operate_mut(|rti, e| {
rti.transfer_stats_accounting().add_up(bytes);
@ -369,7 +371,7 @@ pub trait NodeRefBase: Sized {
/// Reference to a routing table entry
/// Keeps entry in the routing table until all references are gone
pub struct NodeRef {
pub(crate) struct NodeRef {
common: NodeRefBaseCommon,
}
@ -496,7 +498,7 @@ impl Drop for NodeRef {
/// For internal use inside the RoutingTable module where you have
/// already locked a RoutingTableInner
/// Keeps entry in the routing table until all references are gone
pub struct NodeRefLocked<'a> {
pub(crate) struct NodeRefLocked<'a> {
inner: Mutex<&'a RoutingTableInner>,
nr: NodeRef,
}
@ -559,7 +561,7 @@ impl<'a> fmt::Debug for NodeRefLocked<'a> {
/// For internal use inside the RoutingTable module where you have
/// already locked a RoutingTableInner
/// Keeps entry in the routing table until all references are gone
pub struct NodeRefLockedMut<'a> {
pub(crate) struct NodeRefLockedMut<'a> {
inner: Mutex<&'a mut RoutingTableInner>,
nr: NodeRef,
}
@ -572,9 +574,9 @@ impl<'a> NodeRefLockedMut<'a> {
}
}
pub fn unlocked(&self) -> NodeRef {
self.nr.clone()
}
// pub fn unlocked(&self) -> NodeRef {
// self.nr.clone()
// }
}
impl<'a> NodeRefBase for NodeRefLockedMut<'a> {

View File

@ -35,6 +35,7 @@ impl NodeRefFilter {
self.dial_info_filter = self.dial_info_filter.with_protocol_type(protocol_type);
self
}
#[allow(dead_code)]
pub fn with_protocol_type_set(mut self, protocol_set: ProtocolTypeSet) -> Self {
self.dial_info_filter = self.dial_info_filter.with_protocol_type_set(protocol_set);
self
@ -43,6 +44,7 @@ impl NodeRefFilter {
self.dial_info_filter = self.dial_info_filter.with_address_type(address_type);
self
}
#[allow(dead_code)]
pub fn with_address_type_set(mut self, address_set: AddressTypeSet) -> Self {
self.dial_info_filter = self.dial_info_filter.with_address_type_set(address_set);
self
@ -54,6 +56,7 @@ impl NodeRefFilter {
.filtered(&other_filter.dial_info_filter);
self
}
#[allow(dead_code)]
pub fn is_dead(&self) -> bool {
self.dial_info_filter.is_dead() || self.routing_domain_set.is_empty()
}
@ -108,3 +111,12 @@ impl From<AddressType> for NodeRefFilter {
}
}
}
impl From<Flow> for NodeRefFilter {
fn from(other: Flow) -> Self {
Self {
routing_domain_set: RoutingDomainSet::all(),
dial_info_filter: DialInfoFilter::from(other),
}
}
}

View File

@ -5,7 +5,7 @@ use super::*;
/// An encrypted private/safety route hop
#[derive(Clone, Debug)]
pub struct RouteHopData {
pub(crate) struct RouteHopData {
/// The nonce used in the encryption ENC(Xn,DH(PKn,SKapr))
pub nonce: Nonce,
/// The encrypted blob
@ -14,7 +14,7 @@ pub struct RouteHopData {
/// How to find a route node
#[derive(Clone, Debug)]
pub enum RouteNode {
pub(crate) enum RouteNode {
/// Route node is optimized, no contact method information as this node id has been seen before
NodeId(PublicKey),
/// Route node with full contact method information to ensure the peer is reachable
@ -79,7 +79,7 @@ impl RouteNode {
/// An unencrypted private/safety route hop
#[derive(Clone, Debug)]
pub struct RouteHop {
pub(crate) struct RouteHop {
/// The location of the hop
pub node: RouteNode,
/// The encrypted blob to pass to the next hop as its data (None for stubs)
@ -93,7 +93,7 @@ impl RouteHop {
/// The kind of hops a private route can have
#[derive(Clone, Debug)]
pub enum PrivateRouteHops {
pub(crate) enum PrivateRouteHops {
/// The first hop of a private route, unencrypted, route_hops == total hop count
FirstHop(Box<RouteHop>),
/// Private route internal node. Has > 0 private route hops left but < total hop count
@ -113,7 +113,7 @@ impl PrivateRouteHops {
}
/// A private route for receiver privacy
#[derive(Clone, Debug)]
pub struct PrivateRoute {
pub(crate) struct PrivateRoute {
/// The public key used for the entire route
pub public_key: TypedKey,
pub hop_count: u8,
@ -121,14 +121,6 @@ pub struct PrivateRoute {
}
impl PrivateRoute {
/// Empty private route is the form used when receiving the last hop
pub fn new_empty(public_key: TypedKey) -> Self {
Self {
public_key,
hop_count: 0,
hops: PrivateRouteHops::Empty,
}
}
/// Stub route is the form used when no privacy is required, but you need to specify the destination for a safety route
pub fn new_stub(public_key: TypedKey, node: RouteNode) -> Self {
Self {
@ -225,7 +217,7 @@ impl fmt::Display for PrivateRoute {
}
#[derive(Clone, Debug)]
pub enum SafetyRouteHops {
pub(crate) enum SafetyRouteHops {
/// Has >= 1 safety route hops
Data(RouteHopData),
/// Has 0 safety route hops
@ -233,7 +225,7 @@ pub enum SafetyRouteHops {
}
#[derive(Clone, Debug)]
pub struct SafetyRoute {
pub(crate) struct SafetyRoute {
pub public_key: TypedKey,
pub hop_count: u8,
pub hops: SafetyRouteHops,

View File

@ -1,4 +1,5 @@
use super::*;
use crate::veilid_api::*;
mod permutation;
mod remote_private_route_info;
@ -7,15 +8,14 @@ mod route_spec_store_cache;
mod route_spec_store_content;
mod route_stats;
pub use remote_private_route_info::*;
pub use route_set_spec_detail::*;
pub use route_spec_store_cache::*;
pub use route_spec_store_content::*;
pub use route_stats::*;
use crate::veilid_api::*;
use permutation::*;
use remote_private_route_info::*;
use route_set_spec_detail::*;
use route_spec_store_cache::*;
use route_spec_store_content::*;
pub(crate) use route_spec_store_cache::CompiledRoute;
pub(crate) use route_stats::*;
/// The size of the remote private route cache
const REMOTE_PRIVATE_ROUTE_CACHE_SIZE: usize = 1024;
@ -27,14 +27,14 @@ const ROUTE_MIN_IDLE_TIME_MS: u32 = 30_000;
const COMPILED_ROUTE_CACHE_SIZE: usize = 256;
#[derive(Debug)]
pub struct RouteSpecStoreInner {
struct RouteSpecStoreInner {
/// Serialize RouteSpecStore content
content: RouteSpecStoreContent,
/// RouteSpecStore cache
cache: RouteSpecStoreCache,
}
pub struct RouteSpecStoreUnlockedInner {
struct RouteSpecStoreUnlockedInner {
/// Handle to routing table
routing_table: RoutingTable,
/// Maximum number of hops in a route
@ -54,7 +54,7 @@ impl fmt::Debug for RouteSpecStoreUnlockedInner {
/// The routing table's storage for private/safety routes
#[derive(Clone, Debug)]
pub struct RouteSpecStore {
pub(crate) struct RouteSpecStore {
inner: Arc<Mutex<RouteSpecStoreInner>>,
unlocked_inner: Arc<RouteSpecStoreUnlockedInner>,
}
@ -166,7 +166,8 @@ impl RouteSpecStore {
/// Returns Err(VeilidAPIError::TryAgain) if no route could be allocated at this time
/// Returns other errors on failure
/// Returns Ok(route id string) on success
#[instrument(level = "trace", skip(self), ret, err)]
#[instrument(level = "trace", skip(self), ret, err(level=Level::TRACE))]
#[allow(clippy::too_many_arguments)]
pub fn allocate_route(
&self,
crypto_kinds: &[CryptoKind],
@ -175,6 +176,7 @@ impl RouteSpecStore {
hop_count: usize,
directions: DirectionSet,
avoid_nodes: &[TypedKey],
automatic: bool,
) -> VeilidAPIResult<RouteId> {
let inner = &mut *self.inner.lock();
let routing_table = self.unlocked_inner.routing_table.clone();
@ -189,10 +191,11 @@ impl RouteSpecStore {
hop_count,
directions,
avoid_nodes,
automatic,
)
}
#[instrument(level = "trace", skip(self, inner, rti), ret, err)]
#[instrument(level = "trace", skip(self, inner, rti), ret, err(level=Level::TRACE))]
#[allow(clippy::too_many_arguments)]
fn allocate_route_inner(
&self,
@ -204,6 +207,7 @@ impl RouteSpecStore {
hop_count: usize,
directions: DirectionSet,
avoid_nodes: &[TypedKey],
automatic: bool,
) -> VeilidAPIResult<RouteId> {
use core::cmp::Ordering;
@ -576,6 +580,7 @@ impl RouteSpecStore {
directions,
stability,
can_do_sequenced,
automatic,
);
// make id
@ -661,9 +666,9 @@ impl RouteSpecStore {
)]
async fn test_allocated_route(&self, private_route_id: RouteId) -> VeilidAPIResult<bool> {
// Make loopback route to test with
let dest = {
// Get the best private route for this id
let (key, hop_count) = {
let (dest, hops) = {
// Get the best allocated route for this id
let (key, hops) = {
let inner = &mut *self.inner.lock();
let Some(rssd) = inner.content.get_detail(&private_route_id) else {
apibail_invalid_argument!(
@ -675,9 +680,10 @@ impl RouteSpecStore {
let Some(key) = rssd.get_best_route_set_key() else {
apibail_internal!("no best key to test allocated route");
};
// Match the private route's hop length for safety route length
let hop_count = rssd.hop_count();
(key, hop_count)
// Get the hops so we can match the route's hop length for safety
// route length as well as marking nodes as unreliable if this fails
let hops = rssd.hops_node_refs();
(key, hops)
};
// Get the private route to send to
@ -686,6 +692,8 @@ impl RouteSpecStore {
let stability = Stability::Reliable;
// Routes should test with the most likely to succeed sequencing they are capable of
let sequencing = Sequencing::PreferOrdered;
// Hop count for safety spec should match the private route spec
let hop_count = hops.len();
let safety_spec = SafetySpec {
preferred_route: Some(private_route_id),
@ -695,10 +703,13 @@ impl RouteSpecStore {
};
let safety_selection = SafetySelection::Safe(safety_spec);
Destination::PrivateRoute {
private_route,
safety_selection,
}
(
Destination::PrivateRoute {
private_route,
safety_selection,
},
hops,
)
};
// Test with double-round trip ping to self
@ -706,7 +717,12 @@ impl RouteSpecStore {
let _res = match rpc_processor.rpc_call_status(dest).await? {
NetworkResult::Value(v) => v,
_ => {
// Did not error, but did not come back, just return false
// Did not error, but did not come back, mark the nodes as failed to send, and then return false
// This will prevent those node from immediately being included in the next allocated route,
// avoiding the same route being constructed to replace this one when it is removed.
for hop in hops {
hop.report_failed_route_test();
}
return Ok(false);
}
};
@ -1244,6 +1260,7 @@ impl RouteSpecStore {
safety_spec.hop_count,
direction,
avoid_nodes,
true,
)?
};

View File

@ -2,7 +2,7 @@ use super::*;
/// What remote private routes have seen
#[derive(Debug, Clone, Default)]
pub struct RemotePrivateRouteInfo {
pub(crate) struct RemotePrivateRouteInfo {
/// The private routes themselves
private_routes: Vec<PrivateRoute>,
/// Did this remote private route see our node info due to no safety route in use

View File

@ -1,7 +1,7 @@
use super::*;
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct RouteSpecDetail {
pub(crate) struct RouteSpecDetail {
/// Crypto kind
pub crypto_kind: CryptoKind,
/// Secret key
@ -11,7 +11,7 @@ pub struct RouteSpecDetail {
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct RouteSetSpecDetail {
pub(crate) struct RouteSetSpecDetail {
/// Route set per crypto kind
route_set: BTreeMap<PublicKey, RouteSpecDetail>,
/// Route noderefs
@ -29,6 +29,8 @@ pub struct RouteSetSpecDetail {
can_do_sequenced: bool,
/// Stats
stats: RouteStats,
/// Automatically allocated route vs manually allocated route
automatic: bool,
}
impl RouteSetSpecDetail {
@ -39,6 +41,7 @@ impl RouteSetSpecDetail {
directions: DirectionSet,
stability: Stability,
can_do_sequenced: bool,
automatic: bool,
) -> Self {
Self {
route_set,
@ -48,14 +51,12 @@ impl RouteSetSpecDetail {
stability,
can_do_sequenced,
stats: RouteStats::new(cur_ts),
automatic,
}
}
pub fn get_route_by_key(&self, key: &PublicKey) -> Option<&RouteSpecDetail> {
self.route_set.get(key)
}
pub fn get_route_by_key_mut(&mut self, key: &PublicKey) -> Option<&mut RouteSpecDetail> {
self.route_set.get_mut(key)
}
pub fn get_route_set_keys(&self) -> TypedKeyGroup {
let mut tks = TypedKeyGroup::new();
for (k, v) in &self.route_set {
@ -74,11 +75,6 @@ impl RouteSetSpecDetail {
) -> alloc::collections::btree_map::Iter<PublicKey, RouteSpecDetail> {
self.route_set.iter()
}
pub fn iter_route_set_mut(
&mut self,
) -> alloc::collections::btree_map::IterMut<PublicKey, RouteSpecDetail> {
self.route_set.iter_mut()
}
pub fn get_stats(&self) -> &RouteStats {
&self.stats
}
@ -94,6 +90,9 @@ impl RouteSetSpecDetail {
pub fn hop_count(&self) -> usize {
self.hop_node_refs.len()
}
pub fn hops_node_refs(&self) -> Vec<NodeRef> {
self.hop_node_refs.clone()
}
pub fn hop_node_ref(&self, idx: usize) -> Option<NodeRef> {
self.hop_node_refs.get(idx).cloned()
}
@ -120,6 +119,9 @@ impl RouteSetSpecDetail {
}
false
}
pub fn is_automatic(&self) -> bool {
self.automatic
}
/// Generate a key for the cache that can be used to uniquely identify this route's contents
pub fn make_cache_key(&self, rti: &RoutingTableInner) -> Vec<u8> {

View File

@ -9,7 +9,7 @@ struct CompiledRouteCacheKey {
/// Compiled route (safety route + private route)
#[derive(Clone, Debug)]
pub struct CompiledRoute {
pub(crate) struct CompiledRoute {
/// The safety route attached to the private route
pub safety_route: SafetyRoute,
/// The secret used to encrypt the message payload
@ -20,7 +20,7 @@ pub struct CompiledRoute {
/// Ephemeral data used to help the RouteSpecStore operate efficiently
#[derive(Debug)]
pub struct RouteSpecStoreCache {
pub(super) struct RouteSpecStoreCache {
/// How many times nodes have been used
used_nodes: HashMap<PublicKey, usize>,
/// How many times nodes have been used at the terminal point of a route
@ -110,8 +110,10 @@ impl RouteSpecStoreCache {
self.invalidate_compiled_route_cache(pk);
}
// Mark it as dead for the update
self.dead_routes.push(id);
// Mark it as dead for the update if it wasn't automatically created
if !rssd.is_automatic() {
self.dead_routes.push(id);
}
true
}

View File

@ -2,7 +2,7 @@ use super::*;
/// The core representation of the RouteSpecStore that can be serialized
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct RouteSpecStoreContent {
pub(super) struct RouteSpecStoreContent {
/// All of the route sets we have allocated so far indexed by key
id_by_key: HashMap<PublicKey, RouteId>,
/// All of the route sets we have allocated so far

View File

@ -1,7 +1,7 @@
use super::*;
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct RouteStats {
pub(crate) struct RouteStats {
/// Consecutive failed to send count
#[serde(skip)]
pub failed_to_send: u32,
@ -94,6 +94,7 @@ impl RouteStats {
}
/// Get the transfer stats
#[allow(dead_code)]
pub fn transfer_stats(&self) -> &TransferStatsDownUp {
&self.transfer_stats_down_up
}

View File

@ -13,6 +13,7 @@ enum RoutingDomainChange {
SetRelayNodeKeepalive {
ts: Option<Timestamp>,
},
#[cfg_attr(target_arch = "wasm32", allow(dead_code))]
AddDialInfoDetail {
dial_info_detail: DialInfoDetail,
},
@ -27,7 +28,7 @@ enum RoutingDomainChange {
},
}
pub struct RoutingDomainEditor {
pub(crate) struct RoutingDomainEditor {
routing_table: RoutingTable,
routing_domain: RoutingDomain,
changes: Vec<RoutingDomainChange>,
@ -214,7 +215,7 @@ impl RoutingDomainEditor {
if this_changed {
info!(
"[{:?}] setup network: {:?} {:?} {:?} {:?}",
"[{:?}] setup network: outbound {:?} inbound {:?} address types {:?} capabilities {:?}",
self.routing_domain,
outbound_protocols,
inbound_protocols,

View File

@ -2,7 +2,7 @@ use super::*;
/// Mechanism required to contact another node
#[derive(Clone, Debug)]
pub enum ContactMethod {
pub(crate) enum ContactMethod {
/// Node is not reachable by any means
Unreachable,
/// Connection should have already existed
@ -20,7 +20,7 @@ pub enum ContactMethod {
}
#[derive(Debug)]
pub struct RoutingDomainDetailCommon {
pub(crate) struct RoutingDomainDetailCommon {
routing_domain: RoutingDomain,
network_class: Option<NetworkClass>,
outbound_protocols: ProtocolTypeSet,
@ -216,6 +216,7 @@ impl RoutingDomainDetailCommon {
f(cpi.as_ref().unwrap())
}
#[allow(dead_code)]
pub fn inbound_dial_info_filter(&self) -> DialInfoFilter {
DialInfoFilter::all()
.with_protocol_type_set(self.inbound_protocols)
@ -233,7 +234,7 @@ impl RoutingDomainDetailCommon {
}
/// General trait for all routing domains
pub trait RoutingDomainDetail {
pub(crate) trait RoutingDomainDetail {
// Common accessors
fn common(&self) -> &RoutingDomainDetailCommon;
fn common_mut(&mut self) -> &mut RoutingDomainDetailCommon;
@ -535,6 +536,7 @@ impl Default for LocalNetworkRoutingDomainDetail {
}
impl LocalNetworkRoutingDomainDetail {
#[cfg_attr(target_arch = "wasm32", allow(dead_code))]
pub fn set_local_networks(&mut self, mut local_networks: Vec<(IpAddr, IpAddr)>) -> bool {
local_networks.sort();
if local_networks == self.local_networks {

View File

@ -6,13 +6,8 @@ pub const RECENT_PEERS_TABLE_SIZE: usize = 64;
pub type EntryCounts = BTreeMap<(RoutingDomain, CryptoKind), usize>;
//////////////////////////////////////////////////////////////////////////
#[derive(Debug, Clone, Copy)]
pub struct RecentPeersEntry {
pub last_connection: ConnectionDescriptor,
}
/// RoutingTable rwlock-internal data
pub struct RoutingTableInner {
pub(crate) struct RoutingTableInner {
/// Extra pointer to unlocked members to simplify access
pub(super) unlocked_inner: Arc<RoutingTableUnlockedInner>,
/// Routing table buckets that hold references to entries, per crypto kind
@ -107,6 +102,7 @@ impl RoutingTableInner {
self.with_routing_domain(domain, |rd| rd.common().relay_node_last_keepalive())
}
#[allow(dead_code)]
pub fn has_dial_info(&self, domain: RoutingDomain) -> bool {
self.with_routing_domain(domain, |rd| !rd.common().dial_info_details().is_empty())
}
@ -115,6 +111,7 @@ impl RoutingTableInner {
self.with_routing_domain(domain, |rd| rd.common().dial_info_details().clone())
}
#[cfg_attr(target_arch = "wasm32", allow(dead_code))]
pub fn first_filtered_dial_info_detail(
&self,
routing_domain_set: RoutingDomainSet,
@ -238,7 +235,7 @@ impl RoutingTableInner {
let cur_ts = get_aligned_timestamp();
self.with_entries_mut(cur_ts, BucketEntryState::Dead, |rti, v| {
v.with_mut(rti, |_rti, e| {
e.set_updated_since_last_network_change(false)
e.reset_updated_since_last_network_change();
});
Option::<()>::None
});
@ -270,6 +267,7 @@ impl RoutingTableInner {
}
/// Return the domain's filter for what we can receivein the form of a dial info filter
#[allow(dead_code)]
pub fn get_inbound_dial_info_filter(&self, routing_domain: RoutingDomain) -> DialInfoFilter {
self.with_routing_domain(routing_domain, |rdd| {
rdd.common().inbound_dial_info_filter()
@ -277,6 +275,7 @@ impl RoutingTableInner {
}
/// Return the domain's filter for what we can receive in the form of a node ref filter
#[allow(dead_code)]
pub fn get_inbound_node_ref_filter(&self, routing_domain: RoutingDomain) -> NodeRefFilter {
let dif = self.get_inbound_dial_info_filter(routing_domain);
NodeRefFilter::new()
@ -325,6 +324,7 @@ impl RoutingTableInner {
}
}
#[cfg_attr(target_arch = "wasm32", allow(dead_code))]
pub fn configure_local_network_routing_domain(
&mut self,
local_networks: Vec<(IpAddr, IpAddr)>,
@ -341,7 +341,7 @@ impl RoutingTableInner {
self.with_entries_mut(cur_ts, BucketEntryState::Dead, |rti, e| {
e.with_mut(rti, |_rti, e| {
e.clear_signed_node_info(RoutingDomain::LocalNetwork);
e.set_updated_since_last_network_change(false);
e.reset_updated_since_last_network_change();
});
Option::<()>::None
});
@ -378,7 +378,7 @@ impl RoutingTableInner {
for bucket in &self.buckets[&ck] {
for entry in bucket.entries() {
entry.1.with_mut_inner(|e| {
e.clear_last_connections();
e.clear_last_flows();
});
}
}
@ -467,32 +467,6 @@ impl RoutingTableInner {
count
}
/// Count entries per crypto kind that match some criteria
pub fn get_entry_count_per_crypto_kind(
&self,
routing_domain_set: RoutingDomainSet,
min_state: BucketEntryState,
) -> BTreeMap<CryptoKind, usize> {
let mut counts = BTreeMap::new();
let cur_ts = get_aligned_timestamp();
self.with_entries(cur_ts, min_state, |rti, e| {
if let Some(crypto_kinds) = e.with_inner(|e| {
if e.best_routing_domain(rti, routing_domain_set).is_some() {
Some(e.crypto_kinds())
} else {
None
}
}) {
// Got crypto kinds, add to map
for ck in crypto_kinds {
counts.entry(ck).and_modify(|x| *x += 1).or_insert(1);
}
}
Option::<()>::None
});
counts
}
/// Iterate entries with a filter
pub fn with_entries<T, F: FnMut(&RoutingTableInner, Arc<BucketEntry>) -> Option<T>>(
&self,
@ -532,7 +506,7 @@ impl RoutingTableInner {
None
}
pub fn get_nodes_needing_ping(
pub(super) fn get_nodes_needing_ping(
&self,
outer_self: RoutingTable,
routing_domain: RoutingDomain,
@ -580,6 +554,7 @@ impl RoutingTableInner {
node_refs
}
#[allow(dead_code)]
pub fn get_all_nodes(&self, outer_self: RoutingTable, cur_ts: Timestamp) -> Vec<NodeRef> {
let mut node_refs = Vec::<NodeRef>::with_capacity(self.bucket_entry_count());
self.with_entries(cur_ts, BucketEntryState::Unreliable, |_rti, entry| {
@ -878,7 +853,7 @@ impl RoutingTableInner {
&mut self,
outer_self: RoutingTable,
node_id: TypedKey,
descriptor: ConnectionDescriptor,
flow: Flow,
timestamp: Timestamp,
) -> EyreResult<NodeRef> {
let nr = self.create_node_ref(outer_self, &TypedKeyGroup::from(node_id), |_rti, e| {
@ -886,8 +861,7 @@ impl RoutingTableInner {
e.touch_last_seen(timestamp);
})?;
// set the most recent node address for connection finding and udp replies
nr.locked_mut(self)
.set_last_connection(descriptor, timestamp);
nr.locked_mut(self).set_last_flow(flow, timestamp);
Ok(nr)
}
@ -937,7 +911,7 @@ impl RoutingTableInner {
}
}
pub fn touch_recent_peer(&mut self, node_id: TypedKey, last_connection: ConnectionDescriptor) {
pub fn touch_recent_peer(&mut self, node_id: TypedKey, last_connection: Flow) {
self.recent_peers
.insert(node_id, RecentPeersEntry { last_connection });
}

View File

@ -252,56 +252,106 @@ impl RoutingTable {
Ok(merged_bootstrap_records)
}
// 'direct' bootstrap task routine for systems incapable of resolving TXT records, such as browser WASM
#[instrument(level = "trace", skip(self), err)]
pub(crate) async fn direct_bootstrap_task_routine(
self,
stop_token: StopToken,
bootstrap_dialinfos: Vec<DialInfo>,
) -> EyreResult<()> {
let mut unord = FuturesUnordered::new();
let network_manager = self.network_manager();
//#[instrument(level = "trace", skip(self), err)]
pub(crate) fn bootstrap_with_peer(self, crypto_kinds: Vec<CryptoKind>, pi: PeerInfo, unord: &FuturesUnordered<SendPinBoxFuture<()>>) {
for bootstrap_di in bootstrap_dialinfos {
log_rtab!(debug "direct bootstrap with: {}", bootstrap_di);
let peer_info = network_manager.boot_request(bootstrap_di).await?;
log_rtab!(
"--- bootstrapping {} with {:?}",
pi.node_ids(),
pi.signed_node_info().node_info().dial_info_detail_list()
);
log_rtab!(debug " direct bootstrap peerinfo: {:?}", peer_info);
// Got peer info, let's add it to the routing table
for pi in peer_info {
// Register the node
let nr = match self.register_node_with_peer_info(
RoutingDomain::PublicInternet,
pi,
false,
) {
Ok(nr) => nr,
Err(e) => {
log_rtab!(error "failed to register direct bootstrap peer info: {}", e);
continue;
}
};
// Add this our futures to process in parallel
for crypto_kind in VALID_CRYPTO_KINDS {
let routing_table = self.clone();
let nr = nr.clone();
unord.push(
// lets ask bootstrap to find ourselves now
async move { routing_table.reverse_find_node(crypto_kind, nr, true).await }
.instrument(Span::current()),
);
}
let nr =
match self.register_node_with_peer_info(RoutingDomain::PublicInternet, pi, true) {
Ok(nr) => nr,
Err(e) => {
log_rtab!(error "failed to register bootstrap peer info: {}", e);
return;
}
};
// Add this our futures to process in parallel
for crypto_kind in crypto_kinds {
// Bootstrap this crypto kind
let nr = nr.clone();
let routing_table = self.clone();
unord.push(Box::pin(
async move {
// Get what contact method would be used for contacting the bootstrap
let bsdi = match routing_table
.network_manager()
.get_node_contact_method(nr.clone())
{
Ok(NodeContactMethod::Direct(v)) => v,
Ok(v) => {
log_rtab!(warn "invalid contact method for bootstrap: {:?}", v);
return;
}
Err(e) => {
log_rtab!(warn "unable to bootstrap: {}", e);
return;
}
};
// Need VALID signed peer info, so ask bootstrap to find_node of itself
// which will ensure it has the bootstrap's signed peer info as part of the response
let _ = routing_table.find_target(crypto_kind, nr.clone()).await;
// Ensure we got the signed peer info
if !nr.signed_node_info_has_valid_signature(RoutingDomain::PublicInternet) {
log_rtab!(warn "bootstrap server is not responding");
log_rtab!(debug "bootstrap server is not responding for dialinfo: {}", bsdi);
// Try a different dialinfo next time
routing_table.network_manager().address_filter().set_dial_info_failed(bsdi);
} else {
// otherwise this bootstrap is valid, lets ask it to find ourselves now
routing_table.reverse_find_node(crypto_kind, nr, true).await
}
}
.instrument(Span::current()),
));
}
}
#[instrument(level = "trace", skip(self), err)]
pub(crate) async fn bootstrap_with_peer_list(self, peers: Vec<PeerInfo>, stop_token: StopToken) -> EyreResult<()> {
log_rtab!(debug " bootstrapped peers: {:?}", &peers);
// Get crypto kinds to bootstrap
let crypto_kinds = self.get_bootstrap_crypto_kinds();
log_rtab!(debug " bootstrapped crypto kinds: {:?}", &crypto_kinds);
// Run all bootstrap operations concurrently
let mut unord = FuturesUnordered::<SendPinBoxFuture<()>>::new();
for peer in peers {
self.clone().bootstrap_with_peer(crypto_kinds.clone(), peer, &unord);
}
// Wait for all bootstrap operations to complete before we complete the singlefuture
while let Ok(Some(_)) = unord.next().timeout_at(stop_token.clone()).await {}
Ok(())
}
// Get counts by crypto kind and figure out which crypto kinds need bootstrapping
fn get_bootstrap_crypto_kinds(&self) -> Vec<CryptoKind> {
let entry_count = self.inner.read().cached_entry_counts();
let mut crypto_kinds = Vec::new();
for crypto_kind in VALID_CRYPTO_KINDS {
// Do we need to bootstrap this crypto kind?
let eckey = (RoutingDomain::PublicInternet, crypto_kind);
let cnt = entry_count.get(&eckey).copied().unwrap_or_default();
if cnt == 0 {
crypto_kinds.push(crypto_kind);
}
}
crypto_kinds
}
#[instrument(level = "trace", skip(self), err)]
pub(crate) async fn bootstrap_task_routine(self, stop_token: StopToken) -> EyreResult<()> {
let bootstrap = self
@ -315,9 +365,6 @@ impl RoutingTable {
log_rtab!(debug "--- bootstrap_task");
// Get counts by crypto kind
let entry_count = self.inner.read().cached_entry_counts();
// See if we are specifying a direct dialinfo for bootstrap, if so use the direct mechanism
let mut bootstrap_dialinfos = Vec::<DialInfo>::new();
for b in &bootstrap {
@ -327,102 +374,48 @@ impl RoutingTable {
}
}
}
if !bootstrap_dialinfos.is_empty() {
return self
.direct_bootstrap_task_routine(stop_token, bootstrap_dialinfos)
.await;
}
// Get a peer list from bootstrap to process
let peers = if !bootstrap_dialinfos.is_empty() {
// Direct bootstrap
let network_manager = self.network_manager();
// If not direct, resolve bootstrap servers and recurse their TXT entries
let bsrecs = self.resolve_bootstrap(bootstrap).await?;
// Run all bootstrap operations concurrently
let mut unord = FuturesUnordered::new();
for bsrec in bsrecs {
log_rtab!(
"--- bootstrapping {} with {:?}",
&bsrec.node_ids,
&bsrec.dial_info_details
);
// Get crypto support from list of node ids
let crypto_support = bsrec.node_ids.kinds();
// Make unsigned SignedNodeInfo
let sni =
SignedNodeInfo::Direct(SignedDirectNodeInfo::with_no_signature(NodeInfo::new(
NetworkClass::InboundCapable, // Bootstraps are always inbound capable
ProtocolTypeSet::only(ProtocolType::UDP), // Bootstraps do not participate in relaying and will not make outbound requests, but will have UDP enabled
AddressTypeSet::all(), // Bootstraps are always IPV4 and IPV6 capable
bsrec.envelope_support, // Envelope support is as specified in the bootstrap list
crypto_support, // Crypto support is derived from list of node ids
vec![], // Bootstrap needs no capabilities
bsrec.dial_info_details, // Dial info is as specified in the bootstrap list
)));
let pi = PeerInfo::new(bsrec.node_ids, sni);
let nr =
match self.register_node_with_peer_info(RoutingDomain::PublicInternet, pi, true) {
Ok(nr) => nr,
Err(e) => {
log_rtab!(error "failed to register bootstrap peer info: {}", e);
continue;
let mut peer_map = HashMap::<TypedKeyGroup, PeerInfo>::new();
for bootstrap_di in bootstrap_dialinfos {
log_rtab!(debug "direct bootstrap with: {}", bootstrap_di);
let peers = network_manager.boot_request(bootstrap_di).await?;
for peer in peers {
if !peer_map.contains_key(peer.node_ids()) {
peer_map.insert(peer.node_ids().clone(), peer);
}
};
// Add this our futures to process in parallel
for crypto_kind in VALID_CRYPTO_KINDS {
// Do we need to bootstrap this crypto kind?
let eckey = (RoutingDomain::PublicInternet, crypto_kind);
let cnt = entry_count.get(&eckey).copied().unwrap_or_default();
if cnt != 0 {
continue;
}
// Bootstrap this crypto kind
let nr = nr.clone();
let routing_table = self.clone();
unord.push(
async move {
// Get what contact method would be used for contacting the bootstrap
let bsdi = match routing_table
.network_manager()
.get_node_contact_method(nr.clone())
{
Ok(NodeContactMethod::Direct(v)) => v,
Ok(v) => {
log_rtab!(warn "invalid contact method for bootstrap: {:?}", v);
return;
}
Err(e) => {
log_rtab!(warn "unable to bootstrap: {}", e);
return;
}
};
// Need VALID signed peer info, so ask bootstrap to find_node of itself
// which will ensure it has the bootstrap's signed peer info as part of the response
let _ = routing_table.find_target(crypto_kind, nr.clone()).await;
// Ensure we got the signed peer info
if !nr.signed_node_info_has_valid_signature(RoutingDomain::PublicInternet) {
log_rtab!(warn "bootstrap server is not responding");
log_rtab!(debug "bootstrap server is not responding for dialinfo: {}", bsdi);
// Try a different dialinfo next time
routing_table.network_manager().address_filter().set_dial_info_failed(bsdi);
} else {
// otherwise this bootstrap is valid, lets ask it to find ourselves now
routing_table.reverse_find_node(crypto_kind, nr, true).await
}
}
.instrument(Span::current()),
);
}
}
peer_map.into_values().collect()
} else {
// If not direct, resolve bootstrap servers and recurse their TXT entries
let bsrecs = self.resolve_bootstrap(bootstrap).await?;
let peers : Vec<PeerInfo> = bsrecs.into_iter().map(|bsrec| {
// Get crypto support from list of node ids
let crypto_support = bsrec.node_ids.kinds();
// Wait for all bootstrap operations to complete before we complete the singlefuture
while let Ok(Some(_)) = unord.next().timeout_at(stop_token.clone()).await {}
Ok(())
// Make unsigned SignedNodeInfo
let sni =
SignedNodeInfo::Direct(SignedDirectNodeInfo::with_no_signature(NodeInfo::new(
NetworkClass::InboundCapable, // Bootstraps are always inbound capable
ProtocolTypeSet::only(ProtocolType::UDP), // Bootstraps do not participate in relaying and will not make outbound requests, but will have UDP enabled
AddressTypeSet::all(), // Bootstraps are always IPV4 and IPV6 capable
bsrec.envelope_support, // Envelope support is as specified in the bootstrap list
crypto_support, // Crypto support is derived from list of node ids
vec![], // Bootstrap needs no capabilities
bsrec.dial_info_details, // Dial info is as specified in the bootstrap list
)));
PeerInfo::new(bsrec.node_ids, sni)
}).collect();
peers
};
self.clone().bootstrap_with_peer_list(peers, stop_token).await
}
}

View File

@ -4,6 +4,9 @@ use super::*;
/// remains valid, as well as to make sure we remain in any relay node's routing table
const KEEPALIVE_PING_INTERVAL_SECS: u32 = 10;
/// Ping queue processing depth
const MAX_PARALLEL_PINGS: usize = 16;
use futures_util::stream::{FuturesUnordered, StreamExt};
use futures_util::FutureExt;
use stop_token::future::FutureExt as StopFutureExt;
@ -14,12 +17,12 @@ type PingValidatorFuture =
impl RoutingTable {
// Ping each node in the routing table if they need to be pinged
// to determine their reliability
#[instrument(level = "trace", skip(self), err)]
#[instrument(level = "trace", skip(self, futurequeue), err)]
async fn relay_keepalive_public_internet(
&self,
cur_ts: Timestamp,
relay_nr: NodeRef,
unord: &mut FuturesUnordered<PingValidatorFuture>,
futurequeue: &mut VecDeque<PingValidatorFuture>,
) -> EyreResult<()> {
let rpc = self.rpc_processor();
// Get our publicinternet dial info
@ -107,7 +110,7 @@ impl RoutingTable {
#[cfg(not(feature = "network-result-extra"))]
log_rtab!("--> Keepalive ping to {:?}", relay_nr_filtered);
unord.push(
futurequeue.push_back(
async move {
rpc.rpc_call_status(Destination::direct(relay_nr_filtered))
.await
@ -120,11 +123,11 @@ impl RoutingTable {
}
// Ping each node in the routing table if they need to be pinged
// to determine their reliability
#[instrument(level = "trace", skip(self), err)]
#[instrument(level = "trace", skip(self, futurequeue), err)]
async fn ping_validator_public_internet(
&self,
cur_ts: Timestamp,
unord: &mut FuturesUnordered<PingValidatorFuture>,
futurequeue: &mut VecDeque<PingValidatorFuture>,
) -> EyreResult<()> {
let rpc = self.rpc_processor();
@ -136,7 +139,7 @@ impl RoutingTable {
// If this is our relay, let's check for NAT keepalives
if let Some(relay_nr) = opt_relay_nr {
self.relay_keepalive_public_internet(cur_ts, relay_nr, unord)
self.relay_keepalive_public_internet(cur_ts, relay_nr, futurequeue)
.await?;
}
@ -144,7 +147,7 @@ impl RoutingTable {
for nr in node_refs {
let rpc = rpc.clone();
log_rtab!("--> Validator ping to {:?}", nr);
unord.push(
futurequeue.push_back(
async move { rpc.rpc_call_status(Destination::direct(nr)).await }
.instrument(Span::current())
.boxed(),
@ -156,11 +159,11 @@ impl RoutingTable {
// Ping each node in the LocalNetwork routing domain if they
// need to be pinged to determine their reliability
#[instrument(level = "trace", skip(self), err)]
#[instrument(level = "trace", skip(self, futurequeue), err)]
async fn ping_validator_local_network(
&self,
cur_ts: Timestamp,
unord: &mut FuturesUnordered<PingValidatorFuture>,
futurequeue: &mut VecDeque<PingValidatorFuture>,
) -> EyreResult<()> {
let rpc = self.rpc_processor();
@ -172,7 +175,7 @@ impl RoutingTable {
let rpc = rpc.clone();
// Just do a single ping with the best protocol for all the nodes
unord.push(
futurequeue.push_back(
async move { rpc.rpc_call_status(Destination::direct(nr)).await }
.instrument(Span::current())
.boxed(),
@ -191,18 +194,44 @@ impl RoutingTable {
_last_ts: Timestamp,
cur_ts: Timestamp,
) -> EyreResult<()> {
let mut unord = FuturesUnordered::new();
let mut futurequeue: VecDeque<PingValidatorFuture> = VecDeque::new();
// PublicInternet
self.ping_validator_public_internet(cur_ts, &mut unord)
self.ping_validator_public_internet(cur_ts, &mut futurequeue)
.await?;
// LocalNetwork
self.ping_validator_local_network(cur_ts, &mut unord)
self.ping_validator_local_network(cur_ts, &mut futurequeue)
.await?;
// Wait for ping futures to complete in parallel
while let Ok(Some(_)) = unord.next().timeout_at(stop_token.clone()).await {}
let mut unord = FuturesUnordered::new();
while !unord.is_empty() || !futurequeue.is_empty() {
#[cfg(feature = "verbose-tracing")]
log_rtab!(debug "Ping validation queue: {} remaining, {} in progress", futurequeue.len(), unord.len());
// Process one unordered futures if we have some
match unord.next().timeout_at(stop_token.clone()).await {
Ok(Some(_)) => {
// Some ping completed
}
Ok(None) => {
// We're empty
}
Err(_) => {
// Timeout means we drop the rest because we were asked to stop
break;
}
}
// Fill unord up to max parallelism
while unord.len() < MAX_PARALLEL_PINGS {
let Some(fq) = futurequeue.pop_front() else {
break;
};
unord.push(fq);
}
}
Ok(())
}

View File

@ -219,6 +219,7 @@ impl RoutingTable {
default_route_hop_count,
DirectionSet::all(),
&[],
true,
) {
Err(VeilidAPIError::TryAgain { message }) => {
log_rtab!(debug "Route allocation unavailable: {}", message);

View File

@ -19,7 +19,6 @@ pub(crate) fn mock_routing_table() -> routing_table::RoutingTable {
let network_manager = network_manager::NetworkManager::new(
veilid_config.clone(),
storage_manager,
protected_store.clone(),
table_store.clone(),
#[cfg(feature = "unstable-blockstore")]
block_store.clone(),

View File

@ -1,7 +1,7 @@
use super::*;
use core::convert::TryInto;
pub fn encode_address(
pub(crate) fn encode_address(
address: &Address,
builder: &mut veilid_capnp::address::Builder,
) -> Result<(), RPCError> {
@ -37,7 +37,7 @@ pub fn encode_address(
Ok(())
}
pub fn decode_address(reader: &veilid_capnp::address::Reader) -> Result<Address, RPCError> {
pub(crate) fn decode_address(reader: &veilid_capnp::address::Reader) -> Result<Address, RPCError> {
match reader.reborrow().which() {
Ok(veilid_capnp::address::Which::Ipv4(Ok(v4))) => {
let v4b = v4.get_addr().to_be_bytes();

View File

@ -1,6 +1,6 @@
use super::*;
pub fn encode_address_type_set(
pub(crate) fn encode_address_type_set(
address_type_set: &AddressTypeSet,
builder: &mut veilid_capnp::address_type_set::Builder,
) -> Result<(), RPCError> {
@ -10,7 +10,7 @@ pub fn encode_address_type_set(
Ok(())
}
pub fn decode_address_type_set(
pub(crate) fn decode_address_type_set(
reader: &veilid_capnp::address_type_set::Reader,
) -> Result<AddressTypeSet, RPCError> {
let mut out = AddressTypeSet::new();

View File

@ -1,7 +1,9 @@
use super::*;
use core::convert::TryInto;
pub fn decode_dial_info(reader: &veilid_capnp::dial_info::Reader) -> Result<DialInfo, RPCError> {
pub(crate) fn decode_dial_info(
reader: &veilid_capnp::dial_info::Reader,
) -> Result<DialInfo, RPCError> {
match reader
.reborrow()
.which()
@ -60,7 +62,7 @@ pub fn decode_dial_info(reader: &veilid_capnp::dial_info::Reader) -> Result<Dial
}
}
pub fn encode_dial_info(
pub(crate) fn encode_dial_info(
dial_info: &DialInfo,
builder: &mut veilid_capnp::dial_info::Builder,
) -> Result<(), RPCError> {

View File

@ -1,6 +1,8 @@
use super::*;
pub fn encode_dial_info_class(dial_info_class: DialInfoClass) -> veilid_capnp::DialInfoClass {
pub(crate) fn encode_dial_info_class(
dial_info_class: DialInfoClass,
) -> veilid_capnp::DialInfoClass {
match dial_info_class {
DialInfoClass::Direct => veilid_capnp::DialInfoClass::Direct,
DialInfoClass::Mapped => veilid_capnp::DialInfoClass::Mapped,
@ -11,7 +13,9 @@ pub fn encode_dial_info_class(dial_info_class: DialInfoClass) -> veilid_capnp::D
}
}
pub fn decode_dial_info_class(dial_info_class: veilid_capnp::DialInfoClass) -> DialInfoClass {
pub(crate) fn decode_dial_info_class(
dial_info_class: veilid_capnp::DialInfoClass,
) -> DialInfoClass {
match dial_info_class {
veilid_capnp::DialInfoClass::Direct => DialInfoClass::Direct,
veilid_capnp::DialInfoClass::Mapped => DialInfoClass::Mapped,

View File

@ -1,6 +1,6 @@
use super::*;
pub fn encode_dial_info_detail(
pub(crate) fn encode_dial_info_detail(
dial_info_detail: &DialInfoDetail,
builder: &mut veilid_capnp::dial_info_detail::Builder,
) -> Result<(), RPCError> {
@ -11,7 +11,7 @@ pub fn encode_dial_info_detail(
Ok(())
}
pub fn decode_dial_info_detail(
pub(crate) fn decode_dial_info_detail(
reader: &veilid_capnp::dial_info_detail::Reader,
) -> Result<DialInfoDetail, RPCError> {
let dial_info = decode_dial_info(

View File

@ -1,7 +1,7 @@
use super::*;
use core::convert::TryInto;
pub fn decode_key256(public_key: &veilid_capnp::key256::Reader) -> PublicKey {
pub(crate) fn decode_key256(public_key: &veilid_capnp::key256::Reader) -> PublicKey {
let u0 = public_key.get_u0().to_be_bytes();
let u1 = public_key.get_u1().to_be_bytes();
let u2 = public_key.get_u2().to_be_bytes();
@ -16,7 +16,7 @@ pub fn decode_key256(public_key: &veilid_capnp::key256::Reader) -> PublicKey {
PublicKey::new(x)
}
pub fn encode_key256(key: &PublicKey, builder: &mut veilid_capnp::key256::Builder) {
pub(crate) fn encode_key256(key: &PublicKey, builder: &mut veilid_capnp::key256::Builder) {
builder.set_u0(u64::from_be_bytes(
key.bytes[0..8]
.try_into()

View File

@ -27,21 +27,22 @@ mod tunnel;
mod typed_key;
mod typed_signature;
pub use address::*;
pub use address_type_set::*;
pub use dial_info::*;
pub use dial_info_class::*;
pub use dial_info_detail::*;
pub use key256::*;
pub use network_class::*;
pub use node_info::*;
pub use node_status::*;
pub use nonce::*;
pub use operations::*;
pub use peer_info::*;
pub use private_safety_route::*;
pub use protocol_type_set::*;
pub use sender_info::*;
pub(in crate::rpc_processor) use operations::*;
pub(crate) use address::*;
pub(crate) use address_type_set::*;
pub(crate) use dial_info::*;
pub(crate) use dial_info_class::*;
pub(crate) use dial_info_detail::*;
pub(crate) use key256::*;
pub(crate) use network_class::*;
pub(crate) use node_info::*;
pub(crate) use node_status::*;
pub(crate) use nonce::*;
pub(crate) use peer_info::*;
pub(crate) use private_safety_route::*;
pub(crate) use protocol_type_set::*;
pub(crate) use sender_info::*;
pub use sequencing::*;
pub use signal_info::*;
pub use signature512::*;
@ -59,14 +60,14 @@ pub use typed_signature::*;
use super::*;
#[derive(Debug, Clone)]
pub enum QuestionContext {
pub(in crate::rpc_processor) enum QuestionContext {
GetValue(ValidateGetValueContext),
SetValue(ValidateSetValueContext),
}
#[derive(Clone)]
pub struct RPCValidateContext {
pub(in crate::rpc_processor) struct RPCValidateContext {
pub crypto: Crypto,
pub rpc_processor: RPCProcessor,
// pub rpc_processor: RPCProcessor,
pub question_context: Option<QuestionContext>,
}

View File

@ -1,7 +1,7 @@
use super::*;
#[derive(Debug, Clone)]
pub struct RPCAnswer {
pub(in crate::rpc_processor) struct RPCAnswer {
detail: RPCAnswerDetail,
}
@ -30,7 +30,7 @@ impl RPCAnswer {
}
#[derive(Debug, Clone)]
pub enum RPCAnswerDetail {
pub(in crate::rpc_processor) enum RPCAnswerDetail {
StatusA(Box<RPCOperationStatusA>),
FindNodeA(Box<RPCOperationFindNodeA>),
AppCallA(Box<RPCOperationAppCallA>),

View File

@ -29,34 +29,34 @@ mod operation_complete_tunnel;
#[cfg(feature = "unstable-tunnels")]
mod operation_start_tunnel;
pub use answer::*;
pub use operation::*;
pub use operation_app_call::*;
pub use operation_app_message::*;
pub use operation_find_node::*;
pub use operation_get_value::*;
pub use operation_return_receipt::*;
pub use operation_route::*;
pub use operation_set_value::*;
pub use operation_signal::*;
pub use operation_status::*;
pub use operation_validate_dial_info::*;
pub use operation_value_changed::*;
pub use operation_watch_value::*;
pub use question::*;
pub use respond_to::*;
pub use statement::*;
pub(in crate::rpc_processor) use answer::*;
pub(in crate::rpc_processor) use operation::*;
pub(in crate::rpc_processor) use operation_app_call::*;
pub(in crate::rpc_processor) use operation_app_message::*;
pub(in crate::rpc_processor) use operation_find_node::*;
pub(in crate::rpc_processor) use operation_get_value::*;
pub(in crate::rpc_processor) use operation_return_receipt::*;
pub(in crate::rpc_processor) use operation_route::*;
pub(in crate::rpc_processor) use operation_set_value::*;
pub(in crate::rpc_processor) use operation_signal::*;
pub(in crate::rpc_processor) use operation_status::*;
pub(in crate::rpc_processor) use operation_validate_dial_info::*;
pub(in crate::rpc_processor) use operation_value_changed::*;
pub(in crate::rpc_processor) use operation_watch_value::*;
pub(in crate::rpc_processor) use question::*;
pub(in crate::rpc_processor) use respond_to::*;
pub(in crate::rpc_processor) use statement::*;
#[cfg(feature = "unstable-blockstore")]
pub use operation_find_block::*;
pub(in crate::rpc_processor) use operation_find_block::*;
#[cfg(feature = "unstable-blockstore")]
pub use operation_supply_block::*;
pub(in crate::rpc_processor) use operation_supply_block::*;
#[cfg(feature = "unstable-tunnels")]
pub use operation_cancel_tunnel::*;
pub(in crate::rpc_processor) use operation_cancel_tunnel::*;
#[cfg(feature = "unstable-tunnels")]
pub use operation_complete_tunnel::*;
pub(in crate::rpc_processor) use operation_complete_tunnel::*;
#[cfg(feature = "unstable-tunnels")]
pub use operation_start_tunnel::*;
pub(in crate::rpc_processor) use operation_start_tunnel::*;
use super::*;

View File

@ -1,7 +1,7 @@
use super::*;
#[derive(Debug, Clone)]
pub enum RPCOperationKind {
pub(in crate::rpc_processor) enum RPCOperationKind {
Question(Box<RPCQuestion>),
Statement(Box<RPCStatement>),
Answer(Box<RPCAnswer>),
@ -60,7 +60,7 @@ impl RPCOperationKind {
}
#[derive(Debug, Clone)]
pub struct RPCOperation {
pub(in crate::rpc_processor) struct RPCOperation {
op_id: OperationId,
opt_sender_peer_info: Option<PeerInfo>,
target_node_info_ts: Timestamp,

View File

@ -4,7 +4,7 @@ const MAX_APP_CALL_Q_MESSAGE_LEN: usize = 32768;
const MAX_APP_CALL_A_MESSAGE_LEN: usize = 32768;
#[derive(Debug, Clone)]
pub struct RPCOperationAppCallQ {
pub(in crate::rpc_processor) struct RPCOperationAppCallQ {
message: Vec<u8>,
}
@ -46,7 +46,7 @@ impl RPCOperationAppCallQ {
}
#[derive(Debug, Clone)]
pub struct RPCOperationAppCallA {
pub(in crate::rpc_processor) struct RPCOperationAppCallA {
message: Vec<u8>,
}

View File

@ -3,7 +3,7 @@ use super::*;
const MAX_APP_MESSAGE_MESSAGE_LEN: usize = 32768;
#[derive(Debug, Clone)]
pub struct RPCOperationAppMessage {
pub(in crate::rpc_processor) struct RPCOperationAppMessage {
message: Vec<u8>,
}

View File

@ -2,7 +2,7 @@ use super::*;
#[cfg(feature = "unstable-tunnels")]
#[derive(Debug, Clone)]
pub struct RPCOperationCancelTunnelQ {
pub(in crate::rpc_processor) struct RPCOperationCancelTunnelQ {
id: TunnelId,
}
@ -40,7 +40,7 @@ impl RPCOperationCancelTunnelQ {
#[cfg(feature = "unstable-tunnels")]
#[derive(Debug, Clone)]
pub enum RPCOperationCancelTunnelA {
pub(in crate::rpc_processor) enum RPCOperationCancelTunnelA {
Tunnel(TunnelId),
Error(TunnelError),
}

View File

@ -2,7 +2,7 @@ use super::*;
#[cfg(feature = "unstable-tunnels")]
#[derive(Debug, Clone)]
pub struct RPCOperationCompleteTunnelQ {
pub(in crate::rpc_processor) struct RPCOperationCompleteTunnelQ {
id: TunnelId,
local_mode: TunnelMode,
depth: u8,
@ -77,7 +77,7 @@ impl RPCOperationCompleteTunnelQ {
#[cfg(feature = "unstable-tunnels")]
#[derive(Debug, Clone)]
pub enum RPCOperationCompleteTunnelA {
pub(in crate::rpc_processor) enum RPCOperationCompleteTunnelA {
Tunnel(FullTunnel),
Error(TunnelError),
}

View File

@ -5,7 +5,7 @@ const MAX_FIND_BLOCK_A_SUPPLIERS_LEN: usize = 10;
const MAX_FIND_BLOCK_A_PEERS_LEN: usize = 10;
#[derive(Debug, Clone)]
pub struct RPCOperationFindBlockQ {
pub(in crate::rpc_processor) struct RPCOperationFindBlockQ {
block_id: TypedKey,
}
@ -45,7 +45,7 @@ impl RPCOperationFindBlockQ {
}
#[derive(Debug, Clone)]
pub struct RPCOperationFindBlockA {
pub(in crate::rpc_processor) struct RPCOperationFindBlockA {
data: Vec<u8>,
suppliers: Vec<PeerInfo>,
peers: Vec<PeerInfo>,

View File

@ -3,7 +3,7 @@ use super::*;
const MAX_FIND_NODE_A_PEERS_LEN: usize = 20;
#[derive(Debug, Clone)]
pub struct RPCOperationFindNodeQ {
pub(in crate::rpc_processor) struct RPCOperationFindNodeQ {
node_id: TypedKey,
capabilities: Vec<Capability>,
}
@ -74,7 +74,7 @@ impl RPCOperationFindNodeQ {
}
#[derive(Debug, Clone)]
pub struct RPCOperationFindNodeA {
pub(in crate::rpc_processor) struct RPCOperationFindNodeA {
peers: Vec<PeerInfo>,
}

View File

@ -4,7 +4,7 @@ use crate::storage_manager::{SignedValueData, SignedValueDescriptor};
const MAX_GET_VALUE_A_PEERS_LEN: usize = 20;
#[derive(Clone)]
pub struct ValidateGetValueContext {
pub(in crate::rpc_processor) struct ValidateGetValueContext {
pub last_descriptor: Option<SignedValueDescriptor>,
pub subkey: ValueSubkey,
pub vcrypto: CryptoSystemVersion,
@ -21,7 +21,7 @@ impl fmt::Debug for ValidateGetValueContext {
}
#[derive(Debug, Clone)]
pub struct RPCOperationGetValueQ {
pub(in crate::rpc_processor) struct RPCOperationGetValueQ {
key: TypedKey,
subkey: ValueSubkey,
want_descriptor: bool,
@ -76,7 +76,7 @@ impl RPCOperationGetValueQ {
}
#[derive(Debug, Clone)]
pub struct RPCOperationGetValueA {
pub(in crate::rpc_processor) struct RPCOperationGetValueA {
value: Option<SignedValueData>,
peers: Vec<PeerInfo>,
descriptor: Option<SignedValueDescriptor>,

View File

@ -1,7 +1,7 @@
use super::*;
#[derive(Debug, Clone)]
pub struct RPCOperationReturnReceipt {
pub(in crate::rpc_processor) struct RPCOperationReturnReceipt {
receipt: Vec<u8>,
}

View File

@ -1,7 +1,7 @@
use super::*;
#[derive(Clone)]
pub struct RoutedOperation {
pub(in crate::rpc_processor) struct RoutedOperation {
sequencing: Sequencing,
signatures: Vec<Signature>,
nonce: Nonce,
@ -106,7 +106,7 @@ impl RoutedOperation {
}
#[derive(Debug, Clone)]
pub struct RPCOperationRoute {
pub(in crate::rpc_processor) struct RPCOperationRoute {
safety_route: SafetyRoute,
operation: RoutedOperation,
}

View File

@ -4,7 +4,7 @@ use crate::storage_manager::{SignedValueData, SignedValueDescriptor};
const MAX_SET_VALUE_A_PEERS_LEN: usize = 20;
#[derive(Clone)]
pub struct ValidateSetValueContext {
pub(in crate::rpc_processor) struct ValidateSetValueContext {
pub descriptor: SignedValueDescriptor,
pub subkey: ValueSubkey,
pub vcrypto: CryptoSystemVersion,
@ -21,7 +21,7 @@ impl fmt::Debug for ValidateSetValueContext {
}
#[derive(Debug, Clone)]
pub struct RPCOperationSetValueQ {
pub(in crate::rpc_processor) struct RPCOperationSetValueQ {
key: TypedKey,
subkey: ValueSubkey,
value: SignedValueData,
@ -110,7 +110,7 @@ impl RPCOperationSetValueQ {
}
#[derive(Debug, Clone)]
pub struct RPCOperationSetValueA {
pub(in crate::rpc_processor) struct RPCOperationSetValueA {
set: bool,
value: Option<SignedValueData>,
peers: Vec<PeerInfo>,

View File

@ -1,7 +1,7 @@
use super::*;
#[derive(Debug, Clone)]
pub struct RPCOperationSignal {
pub(in crate::rpc_processor) struct RPCOperationSignal {
signal_info: SignalInfo,
}

View File

@ -2,7 +2,7 @@ use super::*;
#[cfg(feature = "unstable-tunnels")]
#[derive(Debug, Clone)]
pub struct RPCOperationStartTunnelQ {
pub(in crate::rpc_processor) struct RPCOperationStartTunnelQ {
id: TunnelId,
local_mode: TunnelMode,
depth: u8,
@ -67,7 +67,7 @@ impl RPCOperationStartTunnelQ {
#[cfg(feature = "unstable-tunnels")]
#[derive(Debug, Clone)]
pub enum RPCOperationStartTunnelA {
pub(in crate::rpc_processor) enum RPCOperationStartTunnelA {
Partial(PartialTunnel),
Error(TunnelError),
}

View File

@ -1,7 +1,7 @@
use super::*;
#[derive(Debug, Clone)]
pub struct RPCOperationStatusQ {
pub(in crate::rpc_processor) struct RPCOperationStatusQ {
node_status: Option<NodeStatus>,
}
@ -43,7 +43,7 @@ impl RPCOperationStatusQ {
}
#[derive(Debug, Clone)]
pub struct RPCOperationStatusA {
pub(in crate::rpc_processor) struct RPCOperationStatusA {
node_status: Option<NodeStatus>,
sender_info: Option<SenderInfo>,
}

View File

@ -3,7 +3,7 @@ use super::*;
const MAX_SUPPLY_BLOCK_A_PEERS_LEN: usize = 20;
#[derive(Debug, Clone)]
pub struct RPCOperationSupplyBlockQ {
pub(in crate::rpc_processor) struct RPCOperationSupplyBlockQ {
block_id: TypedKey,
}
@ -43,7 +43,7 @@ impl RPCOperationSupplyBlockQ {
}
#[derive(Debug, Clone)]
pub struct RPCOperationSupplyBlockA {
pub(in crate::rpc_processor) struct RPCOperationSupplyBlockA {
expiration: u64,
peers: Vec<PeerInfo>,
}

View File

@ -1,7 +1,7 @@
use super::*;
#[derive(Debug, Clone)]
pub struct RPCOperationValidateDialInfo {
pub(in crate::rpc_processor) struct RPCOperationValidateDialInfo {
dial_info: DialInfo,
receipt: Vec<u8>,
redirect: bool,

View File

@ -2,7 +2,7 @@ use super::*;
use crate::storage_manager::SignedValueData;
#[derive(Debug, Clone)]
pub struct RPCOperationValueChanged {
pub(in crate::rpc_processor) struct RPCOperationValueChanged {
key: TypedKey,
subkeys: ValueSubkeyRangeSet,
count: u32,

View File

@ -4,7 +4,7 @@ const MAX_WATCH_VALUE_Q_SUBKEYS_LEN: usize = 512;
const MAX_WATCH_VALUE_A_PEERS_LEN: usize = 20;
#[derive(Debug, Clone)]
pub struct RPCOperationWatchValueQ {
pub(in crate::rpc_processor) struct RPCOperationWatchValueQ {
key: TypedKey,
subkeys: ValueSubkeyRangeSet,
expiration: u64,
@ -199,7 +199,7 @@ impl RPCOperationWatchValueQ {
}
#[derive(Debug, Clone)]
pub struct RPCOperationWatchValueA {
pub(in crate::rpc_processor) struct RPCOperationWatchValueA {
expiration: u64,
peers: Vec<PeerInfo>,
}

View File

@ -1,7 +1,7 @@
use super::*;
#[derive(Debug, Clone)]
pub struct RPCQuestion {
pub(in crate::rpc_processor) struct RPCQuestion {
respond_to: RespondTo,
detail: RPCQuestionDetail,
}
@ -42,7 +42,7 @@ impl RPCQuestion {
}
#[derive(Debug, Clone)]
pub enum RPCQuestionDetail {
pub(in crate::rpc_processor) enum RPCQuestionDetail {
StatusQ(Box<RPCOperationStatusQ>),
FindNodeQ(Box<RPCOperationFindNodeQ>),
AppCallQ(Box<RPCOperationAppCallQ>),

View File

@ -1,7 +1,7 @@
use super::*;
#[derive(Debug, Clone)]
pub enum RespondTo {
pub(in crate::rpc_processor) enum RespondTo {
Sender,
PrivateRoute(PrivateRoute),
}

View File

@ -1,7 +1,7 @@
use super::*;
#[derive(Debug, Clone)]
pub struct RPCStatement {
pub(in crate::rpc_processor) struct RPCStatement {
detail: RPCStatementDetail,
}
@ -33,7 +33,7 @@ impl RPCStatement {
}
#[derive(Debug, Clone)]
pub enum RPCStatementDetail {
pub(in crate::rpc_processor) enum RPCStatementDetail {
ValidateDialInfo(Box<RPCOperationValidateDialInfo>),
Route(Box<RPCOperationRoute>),
ValueChanged(Box<RPCOperationValueChanged>),

View File

@ -2,7 +2,7 @@ use super::*;
////////////////////////////////////////////////////////////////////////////////////////////////////
pub fn encode_route_hop_data(
pub(crate) fn encode_route_hop_data(
route_hop_data: &RouteHopData,
builder: &mut veilid_capnp::route_hop_data::Builder,
) -> Result<(), RPCError> {
@ -24,7 +24,7 @@ pub fn encode_route_hop_data(
Ok(())
}
pub fn decode_route_hop_data(
pub(crate) fn decode_route_hop_data(
reader: &veilid_capnp::route_hop_data::Reader,
) -> Result<RouteHopData, RPCError> {
let nonce = decode_nonce(
@ -45,7 +45,7 @@ pub fn decode_route_hop_data(
////////////////////////////////////////////////////////////////////////////////////////////////////
pub fn encode_route_hop(
pub(crate) fn encode_route_hop(
route_hop: &RouteHop,
builder: &mut veilid_capnp::route_hop::Builder,
) -> Result<(), RPCError> {
@ -67,7 +67,9 @@ pub fn encode_route_hop(
Ok(())
}
pub fn decode_route_hop(reader: &veilid_capnp::route_hop::Reader) -> Result<RouteHop, RPCError> {
pub(crate) fn decode_route_hop(
reader: &veilid_capnp::route_hop::Reader,
) -> Result<RouteHop, RPCError> {
let n_reader = reader.reborrow().get_node();
let node = match n_reader.which().map_err(RPCError::protocol)? {
veilid_capnp::route_hop::node::Which::NodeId(ni) => {
@ -97,7 +99,7 @@ pub fn decode_route_hop(reader: &veilid_capnp::route_hop::Reader) -> Result<Rout
////////////////////////////////////////////////////////////////////////////////////////////////////
pub fn encode_private_route(
pub(crate) fn encode_private_route(
private_route: &PrivateRoute,
builder: &mut veilid_capnp::private_route::Builder,
) -> Result<(), RPCError> {
@ -123,7 +125,7 @@ pub fn encode_private_route(
Ok(())
}
pub fn decode_private_route(
pub(crate) fn decode_private_route(
reader: &veilid_capnp::private_route::Reader,
) -> Result<PrivateRoute, RPCError> {
let public_key = decode_typed_key(&reader.get_public_key().map_err(
@ -152,7 +154,7 @@ pub fn decode_private_route(
////////////////////////////////////////////////////////////////////////////////////////////////////
pub fn encode_safety_route(
pub(crate) fn encode_safety_route(
safety_route: &SafetyRoute,
builder: &mut veilid_capnp::safety_route::Builder,
) -> Result<(), RPCError> {
@ -176,7 +178,7 @@ pub fn encode_safety_route(
Ok(())
}
pub fn decode_safety_route(
pub(crate) fn decode_safety_route(
reader: &veilid_capnp::safety_route::Reader,
) -> Result<SafetyRoute, RPCError> {
let public_key = decode_typed_key(

View File

@ -2,7 +2,7 @@ use super::*;
/// Where to send an RPC message
#[derive(Debug, Clone)]
pub enum Destination {
pub(crate) enum Destination {
/// Send to node directly
Direct {
/// The node to send to

View File

@ -8,14 +8,14 @@ where
result: Option<Result<R, RPCError>>,
}
pub type FanoutCallReturnType = RPCNetworkResult<Vec<PeerInfo>>;
pub type FanoutNodeInfoFilter = Arc<dyn Fn(&[TypedKey], &NodeInfo) -> bool + Send + Sync>;
pub(crate) type FanoutCallReturnType = RPCNetworkResult<Vec<PeerInfo>>;
pub(crate) type FanoutNodeInfoFilter = Arc<dyn Fn(&[TypedKey], &NodeInfo) -> bool + Send + Sync>;
pub fn empty_fanout_node_info_filter() -> FanoutNodeInfoFilter {
pub(crate) fn empty_fanout_node_info_filter() -> FanoutNodeInfoFilter {
Arc::new(|_, _| true)
}
pub fn capability_fanout_node_info_filter(caps: Vec<Capability>) -> FanoutNodeInfoFilter {
pub(crate) fn capability_fanout_node_info_filter(caps: Vec<Capability>) -> FanoutNodeInfoFilter {
Arc::new(move |_, ni| ni.has_capabilities(&caps))
}
@ -34,7 +34,7 @@ pub fn capability_fanout_node_info_filter(caps: Vec<Capability>) -> FanoutNodeIn
/// If the algorithm times out, a Timeout result is returned, however operations will still have been performed and a
/// timeout is not necessarily indicative of an algorithmic 'failure', just that no definitive stopping condition was found
/// in the given time
pub struct FanoutCall<R, F, C, D>
pub(crate) struct FanoutCall<R, F, C, D>
where
R: Unpin,
F: Future<Output = FanoutCallReturnType>,

View File

@ -1,6 +1,6 @@
use super::*;
pub struct FanoutQueue {
pub(in crate::rpc_processor) struct FanoutQueue {
crypto_kind: CryptoKind,
current_nodes: VecDeque<NodeRef>,
returned_nodes: HashSet<TypedKey>,

View File

@ -29,21 +29,20 @@ mod rpc_complete_tunnel;
#[cfg(feature = "unstable-tunnels")]
mod rpc_start_tunnel;
pub use coders::*;
pub use destination::*;
pub use fanout_call::*;
pub use fanout_queue::*;
pub use operation_waiter::*;
pub use rpc_error::*;
pub use rpc_status::*;
pub(crate) use coders::*;
pub(crate) use destination::*;
pub(crate) use operation_waiter::*;
pub(crate) use rpc_error::*;
pub(crate) use rpc_status::*;
pub(crate) use fanout_call::*;
use super::*;
use crypto::*;
use futures_util::StreamExt;
use network_manager::*;
use receipt_manager::*;
use routing_table::*;
use fanout_queue::*;
use stop_token::future::FutureExt;
use storage_manager::*;
@ -55,8 +54,8 @@ struct RPCMessageHeaderDetailDirect {
envelope: Envelope,
/// The noderef of the peer that sent the message (not the original sender). Ensures node doesn't get evicted from routing table until we're done with it
peer_noderef: NodeRef,
/// The connection from the peer sent the message (not the original sender)
connection_descriptor: ConnectionDescriptor,
/// The flow from the peer sent the message (not the original sender)
flow: Flow,
/// The routing domain the message was sent through
routing_domain: RoutingDomain,
}
@ -186,10 +185,11 @@ struct WaitableReply {
timeout_us: TimestampDuration,
node_ref: NodeRef,
send_ts: Timestamp,
send_data_kind: SendDataKind,
send_data_method: SendDataMethod,
safety_route: Option<PublicKey>,
remote_private_route: Option<PublicKey>,
reply_private_route: Option<PublicKey>,
_opt_connection_ref_scope: Option<ConnectionRefScope>,
}
/////////////////////////////////////////////////////////////////////
@ -269,17 +269,18 @@ enum RPCKind {
/////////////////////////////////////////////////////////////////////
pub struct RPCProcessorInner {
struct RPCProcessorInner {
send_channel: Option<flume::Sender<(Option<Id>, RPCMessageEncoded)>>,
stop_source: Option<StopSource>,
worker_join_handles: Vec<MustJoinHandle<()>>,
}
pub struct RPCProcessorUnlockedInner {
struct RPCProcessorUnlockedInner {
timeout_us: TimestampDuration,
queue_size: u32,
concurrency: u32,
max_route_hop_count: usize,
#[cfg_attr(target_arch = "wasm32", allow(dead_code))]
validate_dial_info_receipt_time_ms: u32,
update_callback: UpdateCallback,
waiting_rpc_table: OperationWaiter<RPCMessage, Option<QuestionContext>>,
@ -287,7 +288,7 @@ pub struct RPCProcessorUnlockedInner {
}
#[derive(Clone)]
pub struct RPCProcessor {
pub(crate) struct RPCProcessor {
crypto: Crypto,
config: VeilidConfig,
network_manager: NetworkManager,
@ -975,11 +976,16 @@ impl RPCProcessor {
safety_route: Option<PublicKey>,
remote_private_route: Option<PublicKey>,
) {
let wants_answer = matches!(rpc_kind, RPCKind::Question);
// Record for node if this was not sent via a route
if safety_route.is_none() && remote_private_route.is_none() {
node_ref.stats_question_sent(send_ts, bytes, wants_answer);
let wants_answer = matches!(rpc_kind, RPCKind::Question);
let is_answer = matches!(rpc_kind, RPCKind::Answer);
if is_answer {
node_ref.stats_answer_sent(bytes);
} else {
node_ref.stats_question_sent(send_ts, bytes, wants_answer);
}
return;
}
@ -1142,7 +1148,7 @@ impl RPCProcessor {
dest: Destination,
question: RPCQuestion,
context: Option<QuestionContext>,
) ->RPCNetworkResult<WaitableReply> {
) -> RPCNetworkResult<WaitableReply> {
// Get sender peer info if we should send that
let spi = self.get_sender_peer_info(&dest);
@ -1199,7 +1205,7 @@ impl RPCProcessor {
);
RPCError::network(e)
})?;
let send_data_kind = network_result_value_or_log!( res => [ format!(": node_ref={}, destination_node_ref={}, message.len={}", node_ref, destination_node_ref, message_len) ] {
let send_data_method = network_result_value_or_log!( res => [ format!(": node_ref={}, destination_node_ref={}, message.len={}", node_ref, destination_node_ref, message_len) ] {
// If we couldn't send we're still cleaning up
self.record_send_failure(RPCKind::Question, send_ts, node_ref.clone(), safety_route, remote_private_route);
network_result_raise!(res);
@ -1216,16 +1222,24 @@ impl RPCProcessor {
remote_private_route,
);
// Ref the connection so it doesn't go away until we're done with the waitable reply
let opt_connection_ref_scope = send_data_method.unique_flow.connection_id.and_then(|id| self
.network_manager()
.connection_manager()
.try_connection_ref_scope(id));
// Pass back waitable reply completion
Ok(NetworkResult::value(WaitableReply {
handle,
timeout_us,
node_ref,
send_ts,
send_data_kind,
send_data_method,
safety_route,
remote_private_route,
reply_private_route,
_opt_connection_ref_scope: opt_connection_ref_scope,
}))
}
@ -1284,7 +1298,7 @@ impl RPCProcessor {
);
RPCError::network(e)
})?;
let _send_data_kind = network_result_value_or_log!( res => [ format!(": node_ref={}, destination_node_ref={}, message.len={}", node_ref, destination_node_ref, message_len) ] {
let _send_data_method = network_result_value_or_log!( res => [ format!(": node_ref={}, destination_node_ref={}, message.len={}", node_ref, destination_node_ref, message_len) ] {
// If we couldn't send we're still cleaning up
self.record_send_failure(RPCKind::Statement, send_ts, node_ref.clone(), safety_route, remote_private_route);
network_result_raise!(res);
@ -1423,7 +1437,7 @@ impl RPCProcessor {
// Validate the RPC operation
let validate_context = RPCValidateContext {
crypto: self.crypto.clone(),
rpc_processor: self.clone(),
// rpc_processor: self.clone(),
question_context,
};
operation.validate(&validate_context)?;
@ -1646,7 +1660,7 @@ impl RPCProcessor {
&self,
envelope: Envelope,
peer_noderef: NodeRef,
connection_descriptor: ConnectionDescriptor,
flow: Flow,
routing_domain: RoutingDomain,
body: Vec<u8>,
) -> EyreResult<()> {
@ -1654,7 +1668,7 @@ impl RPCProcessor {
detail: RPCMessageHeaderDetail::Direct(RPCMessageHeaderDetailDirect {
envelope,
peer_noderef,
connection_descriptor,
flow,
routing_domain,
}),
timestamp: get_aligned_timestamp(),

View File

@ -272,7 +272,8 @@ impl RPCProcessor {
feature = "verbose-tracing",
instrument(level = "trace", skip_all, err)
)]
pub(crate) async fn process_private_route_first_hop(
async fn process_private_route_first_hop(
&self,
mut routed_operation: RoutedOperation,
sr_pubkey: TypedKey,
@ -336,7 +337,7 @@ impl RPCProcessor {
}
/// Decrypt route hop data and sign routed operation
pub(crate) fn decrypt_private_route_hop_data(
fn decrypt_private_route_hop_data(
&self,
route_hop_data: &RouteHopData,
pr_pubkey: &TypedKey,

View File

@ -49,8 +49,8 @@ impl RPCProcessor {
// Can't allow anything other than direct packets here, as handling reverse connections
// or anything like via signals over private routes would deanonymize the route
let connection_descriptor = match &msg.header.detail {
RPCMessageHeaderDetail::Direct(d) => d.connection_descriptor,
let flow = match &msg.header.detail {
RPCMessageHeaderDetail::Direct(d) => d.flow,
RPCMessageHeaderDetail::SafetyRouted(_) | RPCMessageHeaderDetail::PrivateRouted(_) => {
return Ok(NetworkResult::invalid_message("signal must be direct"));
}
@ -70,7 +70,7 @@ impl RPCProcessor {
let network_manager = self.network_manager();
let signal_info = signal.destructure();
network_manager
.handle_signal(connection_descriptor, signal_info)
.handle_signal(flow, signal_info)
.await
.map_err(RPCError::network)
}

View File

@ -111,7 +111,7 @@ impl RPCProcessor {
network_result_try!(self.question(dest.clone(), question, None).await?);
// Note what kind of ping this was and to what peer scope
let send_data_kind = waitable_reply.send_data_kind;
let send_data_method = waitable_reply.send_data_method.clone();
// Wait for reply
let (msg, latency) = match self.wait_for_reply(waitable_reply, debug_string).await? {
@ -149,33 +149,30 @@ impl RPCProcessor {
} => {
if matches!(safety_selection, SafetySelection::Unsafe(_)) {
if let Some(sender_info) = sender_info {
match send_data_kind {
SendDataKind::Direct(connection_descriptor) => {
// Directly requested status that actually gets sent directly and not over a relay will tell us what our IP address appears as
// If this changes, we'd want to know about that to reset the networking stack
match routing_domain {
RoutingDomain::PublicInternet => self
.network_manager()
.report_public_internet_socket_address(
sender_info.socket_address,
connection_descriptor,
target,
),
RoutingDomain::LocalNetwork => {
self.network_manager().report_local_network_socket_address(
sender_info.socket_address,
connection_descriptor,
target,
)
}
if send_data_method.opt_relayed_contact_method.is_none()
&& matches!(
send_data_method.contact_method,
NodeContactMethod::Direct(_)
)
{
// Directly requested status that actually gets sent directly and not over a relay will tell us what our IP address appears as
// If this changes, we'd want to know about that to reset the networking stack
match routing_domain {
RoutingDomain::PublicInternet => self
.network_manager()
.report_public_internet_socket_address(
sender_info.socket_address,
send_data_method.unique_flow.flow,
target,
),
RoutingDomain::LocalNetwork => {
self.network_manager().report_local_network_socket_address(
sender_info.socket_address,
send_data_method.unique_flow.flow,
target,
)
}
}
SendDataKind::Indirect => {
// Do nothing in this case, as the socket address returned here would be for any node other than ours
}
SendDataKind::Existing(_) => {
// Do nothing in this case, as an existing connection could not have a different public address or it would have been reset
}
};
opt_sender_info = Some(sender_info.clone());
}
@ -211,7 +208,7 @@ impl RPCProcessor {
let (node_status, sender_info) = match &msg.header.detail {
RPCMessageHeaderDetail::Direct(detail) => {
let connection_descriptor = detail.connection_descriptor;
let flow = detail.flow;
let routing_domain = detail.routing_domain;
// Ensure the node status from the question is the kind for the routing domain we received the request in
@ -225,7 +222,7 @@ impl RPCProcessor {
// Get the peer address in the returned sender info
let sender_info = SenderInfo {
socket_address: *connection_descriptor.remote_address(),
socket_address: *flow.remote_address(),
};
// Make status answer

View File

@ -6,6 +6,7 @@ impl RPCProcessor {
feature = "verbose-tracing",
instrument(level = "trace", skip(self), ret, err)
)]
#[cfg_attr(target_arch = "wasm32", allow(dead_code))]
pub async fn rpc_call_validate_dial_info(
self,
peer: NodeRef,

View File

@ -44,7 +44,7 @@ struct StorageManagerUnlockedInner {
}
#[derive(Clone)]
pub struct StorageManager {
pub(crate) struct StorageManager {
unlocked_inner: Arc<StorageManagerUnlockedInner>,
inner: Arc<AsyncMutex<StorageManagerInner>>,
}

View File

@ -181,10 +181,10 @@ pub fn config_callback(key: String) -> ConfigCallbackReturn {
}
"network.connection_initial_timeout_ms" => Ok(Box::new(2_000u32)),
"network.connection_inactivity_timeout_ms" => Ok(Box::new(60_000u32)),
"network.max_connections_per_ip4" => Ok(Box::new(8u32)),
"network.max_connections_per_ip6_prefix" => Ok(Box::new(8u32)),
"network.max_connections_per_ip4" => Ok(Box::new(32u32)),
"network.max_connections_per_ip6_prefix" => Ok(Box::new(32u32)),
"network.max_connections_per_ip6_prefix_size" => Ok(Box::new(56u32)),
"network.max_connection_frequency_per_min" => Ok(Box::new(8u32)),
"network.max_connection_frequency_per_min" => Ok(Box::new(128u32)),
"network.client_whitelist_timeout_ms" => Ok(Box::new(300_000u32)),
"network.reverse_connection_receipt_time_ms" => Ok(Box::new(5_000u32)),
"network.hole_punch_receipt_time_ms" => Ok(Box::new(5_000u32)),
@ -192,13 +192,18 @@ pub fn config_callback(key: String) -> ConfigCallbackReturn {
"network.routing_table.node_id" => Ok(Box::new(TypedKeyGroup::new())),
"network.routing_table.node_id_secret" => Ok(Box::new(TypedSecretGroup::new())),
// "network.routing_table.bootstrap" => Ok(Box::new(Vec::<String>::new())),
#[cfg(not(target_arch = "wasm32"))]
"network.routing_table.bootstrap" => Ok(Box::new(vec!["bootstrap.veilid.net".to_string()])),
#[cfg(target_arch = "wasm32")]
"network.routing_table.bootstrap" => Ok(Box::new(vec![
"ws://bootstrap.veilid.net:5150/ws".to_string(),
])),
"network.routing_table.limit_over_attached" => Ok(Box::new(64u32)),
"network.routing_table.limit_fully_attached" => Ok(Box::new(32u32)),
"network.routing_table.limit_attached_strong" => Ok(Box::new(16u32)),
"network.routing_table.limit_attached_good" => Ok(Box::new(8u32)),
"network.routing_table.limit_attached_weak" => Ok(Box::new(4u32)),
"network.rpc.concurrency" => Ok(Box::new(2u32)),
"network.rpc.concurrency" => Ok(Box::new(0u32)),
"network.rpc.queue_size" => Ok(Box::new(1024u32)),
"network.rpc.max_timestamp_behind_ms" => Ok(Box::new(Some(10_000u32))),
"network.rpc.max_timestamp_ahead_ms" => Ok(Box::new(Some(10_000u32))),
@ -217,7 +222,7 @@ pub fn config_callback(key: String) -> ConfigCallbackReturn {
"network.dht.set_value_fanout" => Ok(Box::new(4u32)),
"network.dht.min_peer_count" => Ok(Box::new(20u32)),
"network.dht.min_peer_refresh_time_ms" => Ok(Box::new(60_000u32)),
"network.dht.validate_dial_info_receipt_time_ms" => Ok(Box::new(5_000u32)),
"network.dht.validate_dial_info_receipt_time_ms" => Ok(Box::new(2_000u32)),
"network.dht.local_subkey_cache_size" => Ok(Box::new(128u32)),
"network.dht.local_max_subkey_cache_memory_mb" => Ok(Box::new(256u32)),
"network.dht.remote_subkey_cache_size" => Ok(Box::new(1024u32)),
@ -226,7 +231,7 @@ pub fn config_callback(key: String) -> ConfigCallbackReturn {
"network.dht.remote_max_storage_space_mb" => Ok(Box::new(64u32)),
"network.upnp" => Ok(Box::new(false)),
"network.detect_address_changes" => Ok(Box::new(true)),
"network.restricted_nat_retries" => Ok(Box::new(3u32)),
"network.restricted_nat_retries" => Ok(Box::new(0u32)),
"network.tls.certificate_path" => Ok(Box::new(get_certfile_path())),
"network.tls.private_key_path" => Ok(Box::new(get_keyfile_path())),
"network.tls.connection_initial_timeout_ms" => Ok(Box::new(2_000u32)),
@ -239,7 +244,7 @@ pub fn config_callback(key: String) -> ConfigCallbackReturn {
"network.application.http.path" => Ok(Box::new(String::from("app"))),
"network.application.http.url" => Ok(Box::new(Option::<String>::None)),
"network.protocol.udp.enabled" => Ok(Box::new(true)),
"network.protocol.udp.socket_pool_size" => Ok(Box::new(16u32)),
"network.protocol.udp.socket_pool_size" => Ok(Box::new(0u32)),
"network.protocol.udp.listen_address" => Ok(Box::new("".to_owned())),
"network.protocol.udp.public_address" => Ok(Box::new(Option::<String>::None)),
"network.protocol.tcp.connect" => Ok(Box::new(true)),
@ -247,15 +252,15 @@ pub fn config_callback(key: String) -> ConfigCallbackReturn {
"network.protocol.tcp.max_connections" => Ok(Box::new(32u32)),
"network.protocol.tcp.listen_address" => Ok(Box::new("".to_owned())),
"network.protocol.tcp.public_address" => Ok(Box::new(Option::<String>::None)),
"network.protocol.ws.connect" => Ok(Box::new(false)),
"network.protocol.ws.listen" => Ok(Box::new(false)),
"network.protocol.ws.max_connections" => Ok(Box::new(16u32)),
"network.protocol.ws.connect" => Ok(Box::new(true)),
"network.protocol.ws.listen" => Ok(Box::new(true)),
"network.protocol.ws.max_connections" => Ok(Box::new(32u32)),
"network.protocol.ws.listen_address" => Ok(Box::new("".to_owned())),
"network.protocol.ws.path" => Ok(Box::new(String::from("ws"))),
"network.protocol.ws.url" => Ok(Box::new(Option::<String>::None)),
"network.protocol.wss.connect" => Ok(Box::new(false)),
"network.protocol.wss.connect" => Ok(Box::new(true)),
"network.protocol.wss.listen" => Ok(Box::new(false)),
"network.protocol.wss.max_connections" => Ok(Box::new(16u32)),
"network.protocol.wss.max_connections" => Ok(Box::new(32u32)),
"network.protocol.wss.listen_address" => Ok(Box::new("".to_owned())),
"network.protocol.wss.path" => Ok(Box::new(String::from("ws"))),
"network.protocol.wss.url" => Ok(Box::new(Option::<String>::None)),
@ -311,25 +316,31 @@ pub async fn test_config() {
);
assert_eq!(inner.network.connection_initial_timeout_ms, 2_000u32);
assert_eq!(inner.network.connection_inactivity_timeout_ms, 60_000u32);
assert_eq!(inner.network.max_connections_per_ip4, 8u32);
assert_eq!(inner.network.max_connections_per_ip6_prefix, 8u32);
assert_eq!(inner.network.max_connections_per_ip4, 32u32);
assert_eq!(inner.network.max_connections_per_ip6_prefix, 32u32);
assert_eq!(inner.network.max_connections_per_ip6_prefix_size, 56u32);
assert_eq!(inner.network.max_connection_frequency_per_min, 8u32);
assert_eq!(inner.network.max_connection_frequency_per_min, 128u32);
assert_eq!(inner.network.client_whitelist_timeout_ms, 300_000u32);
assert_eq!(inner.network.reverse_connection_receipt_time_ms, 5_000u32);
assert_eq!(inner.network.hole_punch_receipt_time_ms, 5_000u32);
assert_eq!(inner.network.network_key_password, Option::<String>::None);
assert_eq!(inner.network.rpc.concurrency, 2u32);
assert_eq!(inner.network.rpc.concurrency, 0u32);
assert_eq!(inner.network.rpc.queue_size, 1024u32);
assert_eq!(inner.network.rpc.timeout_ms, 5_000u32);
assert_eq!(inner.network.rpc.max_route_hop_count, 4u8);
assert_eq!(inner.network.rpc.default_route_hop_count, 1u8);
assert_eq!(inner.network.routing_table.node_id.len(), 0);
assert_eq!(inner.network.routing_table.node_id_secret.len(), 0);
#[cfg(not(target_arch = "wasm32"))]
assert_eq!(
inner.network.routing_table.bootstrap,
vec!["bootstrap.veilid.net"],
);
#[cfg(target_arch = "wasm32")]
assert_eq!(
inner.network.routing_table.bootstrap,
vec!["ws://bootstrap.veilid.net:5150/ws"],
);
assert_eq!(inner.network.routing_table.limit_over_attached, 64u32);
assert_eq!(inner.network.routing_table.limit_fully_attached, 32u32);
assert_eq!(inner.network.routing_table.limit_attached_strong, 16u32);
@ -350,12 +361,12 @@ pub async fn test_config() {
assert_eq!(inner.network.dht.min_peer_refresh_time_ms, 60_000u32);
assert_eq!(
inner.network.dht.validate_dial_info_receipt_time_ms,
5_000u32
2_000u32
);
assert!(!inner.network.upnp);
assert!(inner.network.detect_address_changes);
assert_eq!(inner.network.restricted_nat_retries, 3u32);
assert_eq!(inner.network.restricted_nat_retries, 0u32);
assert_eq!(inner.network.tls.certificate_path, get_certfile_path());
assert_eq!(inner.network.tls.private_key_path, get_keyfile_path());
assert_eq!(inner.network.tls.connection_initial_timeout_ms, 2_000u32);
@ -368,8 +379,9 @@ pub async fn test_config() {
assert_eq!(inner.network.application.http.listen_address, "");
assert_eq!(inner.network.application.http.path, "app");
assert_eq!(inner.network.application.http.url, None);
assert!(inner.network.protocol.udp.enabled);
assert_eq!(inner.network.protocol.udp.socket_pool_size, 16u32);
assert_eq!(inner.network.protocol.udp.socket_pool_size, 0u32);
assert_eq!(inner.network.protocol.udp.listen_address, "");
assert_eq!(inner.network.protocol.udp.public_address, None);
assert!(inner.network.protocol.tcp.connect);
@ -377,15 +389,15 @@ pub async fn test_config() {
assert_eq!(inner.network.protocol.tcp.max_connections, 32u32);
assert_eq!(inner.network.protocol.tcp.listen_address, "");
assert_eq!(inner.network.protocol.tcp.public_address, None);
assert!(!inner.network.protocol.ws.connect);
assert!(!inner.network.protocol.ws.listen);
assert_eq!(inner.network.protocol.ws.max_connections, 16u32);
assert!(inner.network.protocol.ws.connect);
assert!(inner.network.protocol.ws.listen);
assert_eq!(inner.network.protocol.ws.max_connections, 32u32);
assert_eq!(inner.network.protocol.ws.listen_address, "");
assert_eq!(inner.network.protocol.ws.path, "ws");
assert_eq!(inner.network.protocol.ws.url, None);
assert!(!inner.network.protocol.wss.connect);
assert!(inner.network.protocol.wss.connect);
assert!(!inner.network.protocol.wss.listen);
assert_eq!(inner.network.protocol.wss.max_connections, 16u32);
assert_eq!(inner.network.protocol.wss.max_connections, 32u32);
assert_eq!(inner.network.protocol.wss.listen_address, "");
assert_eq!(inner.network.protocol.wss.path, "ws");
assert_eq!(inner.network.protocol.wss.url, None);

View File

@ -1,11 +1,6 @@
//! Test suite for Native
#![cfg(not(target_arch = "wasm32"))]
use crate::crypto::tests::*;
use crate::network_manager::tests::*;
use crate::routing_table;
use crate::table_store::tests::*;
use crate::tests::common::*;
use crate::veilid_api;
use crate::tests::*;
use crate::*;
///////////////////////////////////////////////////////////////////////////
@ -101,15 +96,15 @@ cfg_if! {
pub fn setup() {
SETUP_ONCE.call_once(|| {
use tracing_subscriber::{filter, fmt, prelude::*};
let mut filters = filter::Targets::new().with_default(filter::LevelFilter::INFO);
use tracing_subscriber::{EnvFilter, filter::LevelFilter, fmt, prelude::*};
let mut env_filter = EnvFilter::builder().with_default_directive(LevelFilter::INFO.into()).from_env_lossy();
for ig in DEFAULT_LOG_IGNORE_LIST {
filters = filters.with_target(ig, filter::LevelFilter::OFF);
env_filter = env_filter.add_directive(format!("{}=off", ig).parse().unwrap());
}
let fmt_layer = fmt::layer();
tracing_subscriber::registry()
.with(fmt_layer)
.with(filters)
.with(env_filter)
.init();
});
}

View File

@ -270,6 +270,7 @@ impl VeilidAPI {
default_route_hop_count,
Direction::Inbound.into(),
&[],
false,
)?;
if !rss.test_route(route_id).await? {
rss.release_route(route_id);

View File

@ -764,7 +764,7 @@ impl VeilidAPI {
}
let netman = self.network_manager()?;
netman.net().restart_network();
netman.debug_restart_network();
Ok("Network restarted".to_owned())
} else {
@ -1095,6 +1095,7 @@ impl VeilidAPI {
hop_count,
directions,
&[],
false,
) {
Ok(v) => v.to_string(),
Err(e) => {

View File

@ -24,7 +24,6 @@ pub use crypto::*;
#[cfg(feature = "unstable-blockstore")]
pub use intf::BlockStore;
pub use intf::ProtectedStore;
pub use routing_table::{NodeRef, NodeRefBase};
pub use table_store::{TableDB, TableDBTransaction, TableStore};
use crate::*;

View File

@ -1,4 +1,5 @@
use super::*;
use routing_table::NodeRefBase;
///////////////////////////////////////////////////////////////////////////////////////
@ -18,15 +19,6 @@ pub struct RoutingContextUnlockedInner {
safety_selection: SafetySelection,
}
impl Drop for RoutingContextInner {
fn drop(&mut self) {
// self.api
// .borrow_mut()
// .routing_contexts
// //.remove(&self.id);
}
}
/// Routing contexts are the way you specify the communication preferences for Veilid.
///
/// By default routing contexts are 'direct' from node to node, offering no privacy. To enable sender

Some files were not shown because too many files have changed in this diff Show More