mirror of
https://gitlab.com/veilid/veilid.git
synced 2024-10-01 01:26:08 -04:00
Fix project formatting
This commit is contained in:
parent
c608e0a3b7
commit
5e2142eac8
@ -1384,7 +1384,6 @@ impl UISender for CursiveUISender {
|
|||||||
),
|
),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
impl CursiveUISender {
|
impl CursiveUISender {
|
||||||
pub fn push_styled(&self, styled_string: StyledString) -> std::io::Result<()> {
|
pub fn push_styled(&self, styled_string: StyledString) -> std::io::Result<()> {
|
||||||
|
@ -20,7 +20,7 @@ cfg_if! {
|
|||||||
local.block_on(&rt, f)
|
local.block_on(&rt, f)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
compile_error!("needs executor implementation")
|
compile_error!("needs executor implementation");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2,11 +2,11 @@ use super::*;
|
|||||||
use igd::*;
|
use igd::*;
|
||||||
use std::net::UdpSocket;
|
use std::net::UdpSocket;
|
||||||
|
|
||||||
|
|
||||||
const UPNP_GATEWAY_DETECT_TIMEOUT_MS: u32 = 5_000;
|
const UPNP_GATEWAY_DETECT_TIMEOUT_MS: u32 = 5_000;
|
||||||
const UPNP_MAPPING_LIFETIME_MS: u32 = 120_000;
|
const UPNP_MAPPING_LIFETIME_MS: u32 = 120_000;
|
||||||
const UPNP_MAPPING_ATTEMPTS: u32 = 3;
|
const UPNP_MAPPING_ATTEMPTS: u32 = 3;
|
||||||
const UPNP_MAPPING_LIFETIME_US:TimestampDuration = TimestampDuration::new(UPNP_MAPPING_LIFETIME_MS as u64 * 1000u64);
|
const UPNP_MAPPING_LIFETIME_US: TimestampDuration =
|
||||||
|
TimestampDuration::new(UPNP_MAPPING_LIFETIME_MS as u64 * 1000u64);
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||||
struct PortMapKey {
|
struct PortMapKey {
|
||||||
@ -36,7 +36,6 @@ pub struct IGDManager {
|
|||||||
inner: Arc<Mutex<IGDManagerInner>>,
|
inner: Arc<Mutex<IGDManagerInner>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
fn convert_llpt(llpt: LowLevelProtocolType) -> PortMappingProtocol {
|
fn convert_llpt(llpt: LowLevelProtocolType) -> PortMappingProtocol {
|
||||||
match llpt {
|
match llpt {
|
||||||
LowLevelProtocolType::UDP => PortMappingProtocol::UDP,
|
LowLevelProtocolType::UDP => PortMappingProtocol::UDP,
|
||||||
@ -44,7 +43,6 @@ fn convert_llpt(llpt: LowLevelProtocolType) -> PortMappingProtocol {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
impl IGDManager {
|
impl IGDManager {
|
||||||
//
|
//
|
||||||
|
|
||||||
@ -82,7 +80,8 @@ impl IGDManager {
|
|||||||
IpAddr::V6(Ipv6Addr::new(0x2001, 0x4860, 0x4860, 0, 0, 0, 0, 0x8888)),
|
IpAddr::V6(Ipv6Addr::new(0x2001, 0x4860, 0x4860, 0, 0, 0, 0, 0x8888)),
|
||||||
80,
|
80,
|
||||||
),
|
),
|
||||||
}).map_err(|e| {
|
})
|
||||||
|
.map_err(|e| {
|
||||||
log_net!(debug "failed to connect to dummy address: {}", e);
|
log_net!(debug "failed to connect to dummy address: {}", e);
|
||||||
e
|
e
|
||||||
})
|
})
|
||||||
@ -92,9 +91,7 @@ impl IGDManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "trace", target = "net", skip_all)]
|
#[instrument(level = "trace", target = "net", skip_all)]
|
||||||
fn find_local_ip(inner: &mut IGDManagerInner,
|
fn find_local_ip(inner: &mut IGDManagerInner, address_type: AddressType) -> Option<IpAddr> {
|
||||||
address_type: AddressType,
|
|
||||||
) -> Option<IpAddr> {
|
|
||||||
if let Some(ip) = inner.local_ip_addrs.get(&address_type) {
|
if let Some(ip) = inner.local_ip_addrs.get(&address_type) {
|
||||||
return Some(*ip);
|
return Some(*ip);
|
||||||
}
|
}
|
||||||
@ -112,10 +109,7 @@ impl IGDManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "trace", target = "net", skip_all)]
|
#[instrument(level = "trace", target = "net", skip_all)]
|
||||||
fn get_local_ip(
|
fn get_local_ip(inner: &mut IGDManagerInner, address_type: AddressType) -> Option<IpAddr> {
|
||||||
inner: &mut IGDManagerInner,
|
|
||||||
address_type: AddressType,
|
|
||||||
) -> Option<IpAddr> {
|
|
||||||
if let Some(ip) = inner.local_ip_addrs.get(&address_type) {
|
if let Some(ip) = inner.local_ip_addrs.get(&address_type) {
|
||||||
return Some(*ip);
|
return Some(*ip);
|
||||||
}
|
}
|
||||||
@ -123,20 +117,14 @@ impl IGDManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "trace", target = "net", skip_all)]
|
#[instrument(level = "trace", target = "net", skip_all)]
|
||||||
fn find_gateway(
|
fn find_gateway(inner: &mut IGDManagerInner, local_ip: IpAddr) -> Option<Arc<Gateway>> {
|
||||||
inner: &mut IGDManagerInner,
|
|
||||||
local_ip: IpAddr,
|
|
||||||
) -> Option<Arc<Gateway>> {
|
|
||||||
|
|
||||||
if let Some(gw) = inner.gateways.get(&local_ip) {
|
if let Some(gw) = inner.gateways.get(&local_ip) {
|
||||||
return Some(gw.clone());
|
return Some(gw.clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
let gateway = match local_ip {
|
let gateway = match local_ip {
|
||||||
IpAddr::V4(v4) => {
|
IpAddr::V4(v4) => {
|
||||||
let mut opts = SearchOptions::new_v4(
|
let mut opts = SearchOptions::new_v4(UPNP_GATEWAY_DETECT_TIMEOUT_MS as u64);
|
||||||
UPNP_GATEWAY_DETECT_TIMEOUT_MS as u64,
|
|
||||||
);
|
|
||||||
opts.bind_addr = SocketAddr::V4(SocketAddrV4::new(v4, 0));
|
opts.bind_addr = SocketAddr::V4(SocketAddrV4::new(v4, 0));
|
||||||
|
|
||||||
match igd::search_gateway(opts) {
|
match igd::search_gateway(opts) {
|
||||||
@ -162,7 +150,6 @@ impl IGDManager {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
};
|
};
|
||||||
let gw = Arc::new(gateway);
|
let gw = Arc::new(gateway);
|
||||||
inner.gateways.insert(local_ip, gw.clone());
|
inner.gateways.insert(local_ip, gw.clone());
|
||||||
@ -170,28 +157,33 @@ impl IGDManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "trace", target = "net", skip_all)]
|
#[instrument(level = "trace", target = "net", skip_all)]
|
||||||
fn get_gateway(
|
fn get_gateway(inner: &mut IGDManagerInner, local_ip: IpAddr) -> Option<Arc<Gateway>> {
|
||||||
inner: &mut IGDManagerInner,
|
|
||||||
local_ip: IpAddr,
|
|
||||||
) -> Option<Arc<Gateway>> {
|
|
||||||
if let Some(gw) = inner.gateways.get(&local_ip) {
|
if let Some(gw) = inner.gateways.get(&local_ip) {
|
||||||
return Some(gw.clone());
|
return Some(gw.clone());
|
||||||
}
|
}
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_description(&self, llpt: LowLevelProtocolType, local_port:u16) -> String {
|
fn get_description(&self, llpt: LowLevelProtocolType, local_port: u16) -> String {
|
||||||
format!("{} map {} for port {}", self.config.get().program_name, convert_llpt(llpt), local_port )
|
format!(
|
||||||
|
"{} map {} for port {}",
|
||||||
|
self.config.get().program_name,
|
||||||
|
convert_llpt(llpt),
|
||||||
|
local_port
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "trace", target = "net", skip_all)]
|
#[instrument(level = "trace", target = "net", skip_all)]
|
||||||
pub async fn unmap_port(&self,
|
pub async fn unmap_port(
|
||||||
|
&self,
|
||||||
llpt: LowLevelProtocolType,
|
llpt: LowLevelProtocolType,
|
||||||
at: AddressType,
|
at: AddressType,
|
||||||
mapped_port: u16,
|
mapped_port: u16,
|
||||||
) -> Option<()> {
|
) -> Option<()> {
|
||||||
let this = self.clone();
|
let this = self.clone();
|
||||||
blocking_wrapper("igd unmap_port", move || {
|
blocking_wrapper(
|
||||||
|
"igd unmap_port",
|
||||||
|
move || {
|
||||||
let mut inner = this.inner.lock();
|
let mut inner = this.inner.lock();
|
||||||
|
|
||||||
// If we already have this port mapped, just return the existing portmap
|
// If we already have this port mapped, just return the existing portmap
|
||||||
@ -203,8 +195,10 @@ impl IGDManager {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
let pmk = found?;
|
let pmk = found?;
|
||||||
let _pmv = inner.port_maps.remove(&pmk).expect("key found but remove failed");
|
let _pmv = inner
|
||||||
|
.port_maps
|
||||||
|
.remove(&pmk)
|
||||||
|
.expect("key found but remove failed");
|
||||||
|
|
||||||
// Get local ip address
|
// Get local ip address
|
||||||
let local_ip = Self::find_local_ip(&mut inner, at)?;
|
let local_ip = Self::find_local_ip(&mut inner, at)?;
|
||||||
@ -222,7 +216,9 @@ impl IGDManager {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
Some(())
|
Some(())
|
||||||
}, None)
|
},
|
||||||
|
None,
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -310,7 +306,13 @@ impl IGDManager {
|
|||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "trace", target = "net", name = "IGDManager::tick", skip_all, err)]
|
#[instrument(
|
||||||
|
level = "trace",
|
||||||
|
target = "net",
|
||||||
|
name = "IGDManager::tick",
|
||||||
|
skip_all,
|
||||||
|
err
|
||||||
|
)]
|
||||||
pub async fn tick(&self) -> EyreResult<bool> {
|
pub async fn tick(&self) -> EyreResult<bool> {
|
||||||
// Refresh mappings if we have them
|
// Refresh mappings if we have them
|
||||||
// If an error is received, then return false to restart the local network
|
// If an error is received, then return false to restart the local network
|
||||||
@ -322,11 +324,12 @@ impl IGDManager {
|
|||||||
|
|
||||||
for (k, v) in &inner.port_maps {
|
for (k, v) in &inner.port_maps {
|
||||||
let mapping_lifetime = now.saturating_sub(v.timestamp);
|
let mapping_lifetime = now.saturating_sub(v.timestamp);
|
||||||
if mapping_lifetime >= UPNP_MAPPING_LIFETIME_US || v.renewal_attempts >= UPNP_MAPPING_ATTEMPTS {
|
if mapping_lifetime >= UPNP_MAPPING_LIFETIME_US
|
||||||
|
|| v.renewal_attempts >= UPNP_MAPPING_ATTEMPTS
|
||||||
|
{
|
||||||
// Past expiration time or tried N times, do a full renew and fail out if we can't
|
// Past expiration time or tried N times, do a full renew and fail out if we can't
|
||||||
full_renews.push((*k, *v));
|
full_renews.push((*k, *v));
|
||||||
}
|
} else if mapping_lifetime >= v.renewal_lifetime {
|
||||||
else if mapping_lifetime >= v.renewal_lifetime {
|
|
||||||
// Attempt a normal renewal
|
// Attempt a normal renewal
|
||||||
renews.push((*k, *v));
|
renews.push((*k, *v));
|
||||||
}
|
}
|
||||||
@ -340,12 +343,13 @@ impl IGDManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let this = self.clone();
|
let this = self.clone();
|
||||||
blocking_wrapper("igd tick", move || {
|
blocking_wrapper(
|
||||||
|
"igd tick",
|
||||||
|
move || {
|
||||||
let mut inner = this.inner.lock();
|
let mut inner = this.inner.lock();
|
||||||
|
|
||||||
// Process full renewals
|
// Process full renewals
|
||||||
for (k, v) in full_renews {
|
for (k, v) in full_renews {
|
||||||
|
|
||||||
// Get local ip for address type
|
// Get local ip for address type
|
||||||
let local_ip = match Self::get_local_ip(&mut inner, k.at) {
|
let local_ip = match Self::get_local_ip(&mut inner, k.at) {
|
||||||
Some(ip) => ip,
|
Some(ip) => ip,
|
||||||
@ -367,17 +371,27 @@ impl IGDManager {
|
|||||||
inner.port_maps.remove(&k);
|
inner.port_maps.remove(&k);
|
||||||
|
|
||||||
let desc = this.get_description(k.llpt, k.local_port);
|
let desc = this.get_description(k.llpt, k.local_port);
|
||||||
match gw.add_any_port(convert_llpt(k.llpt), SocketAddr::new(local_ip, k.local_port), (UPNP_MAPPING_LIFETIME_MS + 999) / 1000, &desc) {
|
match gw.add_any_port(
|
||||||
|
convert_llpt(k.llpt),
|
||||||
|
SocketAddr::new(local_ip, k.local_port),
|
||||||
|
(UPNP_MAPPING_LIFETIME_MS + 999) / 1000,
|
||||||
|
&desc,
|
||||||
|
) {
|
||||||
Ok(mapped_port) => {
|
Ok(mapped_port) => {
|
||||||
log_net!(debug "full-renewed mapped port {:?} -> {:?}", v, k);
|
log_net!(debug "full-renewed mapped port {:?} -> {:?}", v, k);
|
||||||
inner.port_maps.insert(k, PortMapValue {
|
inner.port_maps.insert(
|
||||||
|
k,
|
||||||
|
PortMapValue {
|
||||||
ext_ip: v.ext_ip,
|
ext_ip: v.ext_ip,
|
||||||
mapped_port,
|
mapped_port,
|
||||||
timestamp: Timestamp::now(),
|
timestamp: Timestamp::now(),
|
||||||
renewal_lifetime: TimestampDuration::new((UPNP_MAPPING_LIFETIME_MS / 2) as u64 * 1000u64),
|
renewal_lifetime: TimestampDuration::new(
|
||||||
|
(UPNP_MAPPING_LIFETIME_MS / 2) as u64 * 1000u64,
|
||||||
|
),
|
||||||
renewal_attempts: 0,
|
renewal_attempts: 0,
|
||||||
});
|
|
||||||
},
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
info!("failed to full-renew mapped port {:?} -> {:?}: {}", v, k, e);
|
info!("failed to full-renew mapped port {:?} -> {:?}: {}", v, k, e);
|
||||||
|
|
||||||
@ -385,11 +399,9 @@ impl IGDManager {
|
|||||||
return Ok(false);
|
return Ok(false);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
// Process normal renewals
|
// Process normal renewals
|
||||||
for (k, mut v) in renews {
|
for (k, mut v) in renews {
|
||||||
|
|
||||||
// Get local ip for address type
|
// Get local ip for address type
|
||||||
let local_ip = match Self::get_local_ip(&mut inner, k.at) {
|
let local_ip = match Self::get_local_ip(&mut inner, k.at) {
|
||||||
Some(ip) => ip,
|
Some(ip) => ip,
|
||||||
@ -407,23 +419,35 @@ impl IGDManager {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let desc = this.get_description(k.llpt, k.local_port);
|
let desc = this.get_description(k.llpt, k.local_port);
|
||||||
match gw.add_port(convert_llpt(k.llpt), v.mapped_port, SocketAddr::new(local_ip, k.local_port), (UPNP_MAPPING_LIFETIME_MS + 999) / 1000, &desc) {
|
match gw.add_port(
|
||||||
|
convert_llpt(k.llpt),
|
||||||
|
v.mapped_port,
|
||||||
|
SocketAddr::new(local_ip, k.local_port),
|
||||||
|
(UPNP_MAPPING_LIFETIME_MS + 999) / 1000,
|
||||||
|
&desc,
|
||||||
|
) {
|
||||||
Ok(()) => {
|
Ok(()) => {
|
||||||
log_net!("renewed mapped port {:?} -> {:?}", v, k);
|
log_net!("renewed mapped port {:?} -> {:?}", v, k);
|
||||||
|
|
||||||
inner.port_maps.insert(k, PortMapValue {
|
inner.port_maps.insert(
|
||||||
|
k,
|
||||||
|
PortMapValue {
|
||||||
ext_ip: v.ext_ip,
|
ext_ip: v.ext_ip,
|
||||||
mapped_port: v.mapped_port,
|
mapped_port: v.mapped_port,
|
||||||
timestamp: Timestamp::now(),
|
timestamp: Timestamp::now(),
|
||||||
renewal_lifetime: ((UPNP_MAPPING_LIFETIME_MS / 2) as u64 * 1000u64).into(),
|
renewal_lifetime: ((UPNP_MAPPING_LIFETIME_MS / 2) as u64
|
||||||
|
* 1000u64)
|
||||||
|
.into(),
|
||||||
renewal_attempts: 0,
|
renewal_attempts: 0,
|
||||||
});
|
|
||||||
},
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
log_net!(debug "failed to renew mapped port {:?} -> {:?}: {}", v, k, e);
|
log_net!(debug "failed to renew mapped port {:?} -> {:?}: {}", v, k, e);
|
||||||
|
|
||||||
// Get closer to the maximum renewal timeline by a factor of two each time
|
// Get closer to the maximum renewal timeline by a factor of two each time
|
||||||
v.renewal_lifetime = (v.renewal_lifetime + UPNP_MAPPING_LIFETIME_US) / 2u64;
|
v.renewal_lifetime =
|
||||||
|
(v.renewal_lifetime + UPNP_MAPPING_LIFETIME_US) / 2u64;
|
||||||
v.renewal_attempts += 1;
|
v.renewal_attempts += 1;
|
||||||
|
|
||||||
// Store new value to try again
|
// Store new value to try again
|
||||||
@ -434,6 +458,10 @@ impl IGDManager {
|
|||||||
|
|
||||||
// Normal exit, no restart
|
// Normal exit, no restart
|
||||||
Ok(true)
|
Ok(true)
|
||||||
}, Err(eyre!("failed to process blocking task"))).instrument(tracing::trace_span!("igd tick fut")).await
|
},
|
||||||
|
Err(eyre!("failed to process blocking task")),
|
||||||
|
)
|
||||||
|
.instrument(tracing::trace_span!("igd tick fut"))
|
||||||
|
.await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -9,7 +9,7 @@ cfg_if! {
|
|||||||
pub use tokio::net::{TcpStream, TcpListener, UdpSocket};
|
pub use tokio::net::{TcpStream, TcpListener, UdpSocket};
|
||||||
pub use tokio_util::compat::*;
|
pub use tokio_util::compat::*;
|
||||||
} else {
|
} else {
|
||||||
compile_error!("needs executor implementation")
|
compile_error!("needs executor implementation");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -0,0 +1 @@
|
|||||||
|
|
@ -31,7 +31,7 @@ cfg_if! {
|
|||||||
WebsocketNetworkConnection<async_tls::client::TlsStream<Compat<TcpStream>>>;
|
WebsocketNetworkConnection<async_tls::client::TlsStream<Compat<TcpStream>>>;
|
||||||
pub type WebsocketNetworkConnectionWS = WebsocketNetworkConnection<Compat<TcpStream>>;
|
pub type WebsocketNetworkConnectionWS = WebsocketNetworkConnection<Compat<TcpStream>>;
|
||||||
} else {
|
} else {
|
||||||
compile_error!("needs executor implementation")
|
compile_error!("needs executor implementation");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -82,7 +82,6 @@ pub struct NetworkConnectionStats {
|
|||||||
last_message_recv_time: Option<Timestamp>,
|
last_message_recv_time: Option<Timestamp>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub(in crate::network_manager) struct NetworkConnection {
|
pub(in crate::network_manager) struct NetworkConnection {
|
||||||
connection_id: NetworkConnectionId,
|
connection_id: NetworkConnectionId,
|
||||||
@ -104,7 +103,6 @@ impl Drop for NetworkConnection {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
impl NetworkConnection {
|
impl NetworkConnection {
|
||||||
pub(super) fn dummy(id: NetworkConnectionId, flow: Flow) -> Self {
|
pub(super) fn dummy(id: NetworkConnectionId, flow: Flow) -> Self {
|
||||||
// Create handle for sending (dummy is immediately disconnected)
|
// Create handle for sending (dummy is immediately disconnected)
|
||||||
@ -149,7 +147,9 @@ impl NetworkConnection {
|
|||||||
let local_stop_token = stop_source.token();
|
let local_stop_token = stop_source.token();
|
||||||
|
|
||||||
// Spawn connection processor and pass in protocol connection
|
// Spawn connection processor and pass in protocol connection
|
||||||
let processor = spawn("connection processor", Self::process_connection(
|
let processor = spawn(
|
||||||
|
"connection processor",
|
||||||
|
Self::process_connection(
|
||||||
connection_manager,
|
connection_manager,
|
||||||
local_stop_token,
|
local_stop_token,
|
||||||
manager_stop_token,
|
manager_stop_token,
|
||||||
@ -158,7 +158,8 @@ impl NetworkConnection {
|
|||||||
receiver,
|
receiver,
|
||||||
protocol_connection,
|
protocol_connection,
|
||||||
stats.clone(),
|
stats.clone(),
|
||||||
));
|
),
|
||||||
|
);
|
||||||
|
|
||||||
// Return the connection
|
// Return the connection
|
||||||
Self {
|
Self {
|
||||||
@ -198,7 +199,7 @@ impl NetworkConnection {
|
|||||||
self.ref_count > 0
|
self.ref_count > 0
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn protected_node_ref(&self) -> Option<NodeRef>{
|
pub fn protected_node_ref(&self) -> Option<NodeRef> {
|
||||||
self.protected_nr.clone()
|
self.protected_nr.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -221,7 +222,7 @@ impl NetworkConnection {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level="trace", target="net", skip_all)]
|
#[instrument(level = "trace", target = "net", skip_all)]
|
||||||
async fn send_internal(
|
async fn send_internal(
|
||||||
protocol_connection: &ProtocolNetworkConnection,
|
protocol_connection: &ProtocolNetworkConnection,
|
||||||
stats: Arc<Mutex<NetworkConnectionStats>>,
|
stats: Arc<Mutex<NetworkConnectionStats>>,
|
||||||
@ -236,7 +237,7 @@ impl NetworkConnection {
|
|||||||
Ok(NetworkResult::Value(()))
|
Ok(NetworkResult::Value(()))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level="trace", target="net", skip_all)]
|
#[instrument(level = "trace", target = "net", skip_all)]
|
||||||
async fn recv_internal(
|
async fn recv_internal(
|
||||||
protocol_connection: &ProtocolNetworkConnection,
|
protocol_connection: &ProtocolNetworkConnection,
|
||||||
stats: Arc<Mutex<NetworkConnectionStats>>,
|
stats: Arc<Mutex<NetworkConnectionStats>>,
|
||||||
@ -309,7 +310,6 @@ impl NetworkConnection {
|
|||||||
let sender_fut = receiver.recv_async().then(|res| async {
|
let sender_fut = receiver.recv_async().then(|res| async {
|
||||||
match res {
|
match res {
|
||||||
Ok((_span_id, message)) => {
|
Ok((_span_id, message)) => {
|
||||||
|
|
||||||
// Touch the LRU for this connection
|
// Touch the LRU for this connection
|
||||||
connection_manager.touch_connection_by_id(connection_id);
|
connection_manager.touch_connection_by_id(connection_id);
|
||||||
|
|
||||||
@ -377,7 +377,6 @@ impl NetworkConnection {
|
|||||||
log_net!(debug "failed to process received envelope: {}", e);
|
log_net!(debug "failed to process received envelope: {}", e);
|
||||||
RecvLoopAction::Finish
|
RecvLoopAction::Finish
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
// Touch the LRU for this connection
|
// Touch the LRU for this connection
|
||||||
connection_manager.touch_connection_by_id(connection_id);
|
connection_manager.touch_connection_by_id(connection_id);
|
||||||
|
|
||||||
@ -441,21 +440,34 @@ impl NetworkConnection {
|
|||||||
if let Err(e) = protocol_connection.close().await {
|
if let Err(e) = protocol_connection.close().await {
|
||||||
log_net!(debug "Protocol connection close error: {}", e);
|
log_net!(debug "Protocol connection close error: {}", e);
|
||||||
}
|
}
|
||||||
|
|
||||||
}.in_current_span())
|
}.in_current_span())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn debug_print(&self, cur_ts: Timestamp) -> String {
|
pub fn debug_print(&self, cur_ts: Timestamp) -> String {
|
||||||
format!("{} <- {} | {} | est {} sent {} rcvd {} refcount {}{}",
|
format!(
|
||||||
|
"{} <- {} | {} | est {} sent {} rcvd {} refcount {}{}",
|
||||||
self.flow.remote_address(),
|
self.flow.remote_address(),
|
||||||
self.flow.local().map(|x| x.to_string()).unwrap_or("---".to_owned()),
|
self.flow
|
||||||
|
.local()
|
||||||
|
.map(|x| x.to_string())
|
||||||
|
.unwrap_or("---".to_owned()),
|
||||||
self.connection_id.as_u64(),
|
self.connection_id.as_u64(),
|
||||||
debug_duration(cur_ts.as_u64().saturating_sub(self.established_time.as_u64())),
|
debug_duration(
|
||||||
self.stats().last_message_sent_time.map(|ts| debug_duration(cur_ts.as_u64().saturating_sub(ts.as_u64())) ).unwrap_or("---".to_owned()),
|
cur_ts
|
||||||
self.stats().last_message_recv_time.map(|ts| debug_duration(cur_ts.as_u64().saturating_sub(ts.as_u64())) ).unwrap_or("---".to_owned()),
|
.as_u64()
|
||||||
|
.saturating_sub(self.established_time.as_u64())
|
||||||
|
),
|
||||||
|
self.stats()
|
||||||
|
.last_message_sent_time
|
||||||
|
.map(|ts| debug_duration(cur_ts.as_u64().saturating_sub(ts.as_u64())))
|
||||||
|
.unwrap_or("---".to_owned()),
|
||||||
|
self.stats()
|
||||||
|
.last_message_recv_time
|
||||||
|
.map(|ts| debug_duration(cur_ts.as_u64().saturating_sub(ts.as_u64())))
|
||||||
|
.unwrap_or("---".to_owned()),
|
||||||
self.ref_count,
|
self.ref_count,
|
||||||
if let Some(pnr) = &self.protected_nr {
|
if let Some(pnr) = &self.protected_nr {
|
||||||
format!(" PROTECTED:{}",pnr)
|
format!(" PROTECTED:{}", pnr)
|
||||||
} else {
|
} else {
|
||||||
"".to_owned()
|
"".to_owned()
|
||||||
}
|
}
|
||||||
|
@ -12,7 +12,7 @@ impl NetworkManager {
|
|||||||
///
|
///
|
||||||
/// Sending to a node requires determining a NetworkClass compatible contact method
|
/// Sending to a node requires determining a NetworkClass compatible contact method
|
||||||
/// between the source and destination node
|
/// between the source and destination node
|
||||||
#[instrument(level="trace", target="net", skip_all, err)]
|
#[instrument(level = "trace", target = "net", skip_all, err)]
|
||||||
pub(crate) async fn send_data(
|
pub(crate) async fn send_data(
|
||||||
&self,
|
&self,
|
||||||
destination_node_ref: NodeRef,
|
destination_node_ref: NodeRef,
|
||||||
@ -20,15 +20,10 @@ impl NetworkManager {
|
|||||||
) -> EyreResult<NetworkResult<SendDataMethod>> {
|
) -> EyreResult<NetworkResult<SendDataMethod>> {
|
||||||
// First try to send data to the last flow we've seen this peer on
|
// First try to send data to the last flow we've seen this peer on
|
||||||
let data = if let Some(flow) = destination_node_ref.last_flow() {
|
let data = if let Some(flow) = destination_node_ref.last_flow() {
|
||||||
match self
|
match self.net().send_data_to_existing_flow(flow, data).await? {
|
||||||
.net()
|
|
||||||
.send_data_to_existing_flow(flow, data)
|
|
||||||
.await?
|
|
||||||
{
|
|
||||||
SendDataToExistingFlowResult::Sent(unique_flow) => {
|
SendDataToExistingFlowResult::Sent(unique_flow) => {
|
||||||
// Update timestamp for this last flow since we just sent to it
|
// Update timestamp for this last flow since we just sent to it
|
||||||
destination_node_ref
|
destination_node_ref.set_last_flow(unique_flow.flow, Timestamp::now());
|
||||||
.set_last_flow(unique_flow.flow, Timestamp::now());
|
|
||||||
|
|
||||||
return Ok(NetworkResult::value(SendDataMethod {
|
return Ok(NetworkResult::value(SendDataMethod {
|
||||||
opt_relayed_contact_method: None,
|
opt_relayed_contact_method: None,
|
||||||
@ -50,13 +45,20 @@ impl NetworkManager {
|
|||||||
// No existing connection was found or usable, so we proceed to see how to make a new one
|
// No existing connection was found or usable, so we proceed to see how to make a new one
|
||||||
|
|
||||||
// Get the best way to contact this node
|
// Get the best way to contact this node
|
||||||
let possibly_relayed_contact_method = self.get_node_contact_method(destination_node_ref.clone())?;
|
let possibly_relayed_contact_method =
|
||||||
|
self.get_node_contact_method(destination_node_ref.clone())?;
|
||||||
|
|
||||||
self.try_possibly_relayed_contact_method(possibly_relayed_contact_method, destination_node_ref, data).await
|
self.try_possibly_relayed_contact_method(
|
||||||
|
possibly_relayed_contact_method,
|
||||||
|
destination_node_ref,
|
||||||
|
data,
|
||||||
|
)
|
||||||
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level="trace", target="net", skip_all)]
|
#[instrument(level = "trace", target = "net", skip_all)]
|
||||||
pub(crate) fn try_possibly_relayed_contact_method(&self,
|
pub(crate) fn try_possibly_relayed_contact_method(
|
||||||
|
&self,
|
||||||
possibly_relayed_contact_method: NodeContactMethod,
|
possibly_relayed_contact_method: NodeContactMethod,
|
||||||
destination_node_ref: NodeRef,
|
destination_node_ref: NodeRef,
|
||||||
data: Vec<u8>,
|
data: Vec<u8>,
|
||||||
@ -91,7 +93,6 @@ impl NetworkManager {
|
|||||||
target_node_ref,
|
target_node_ref,
|
||||||
relay_nr,
|
relay_nr,
|
||||||
);
|
);
|
||||||
|
|
||||||
}
|
}
|
||||||
| NodeContactMethod::InboundRelay(relay_nr) => {
|
| NodeContactMethod::InboundRelay(relay_nr) => {
|
||||||
// Relay loop or multiple relays
|
// Relay loop or multiple relays
|
||||||
@ -154,7 +155,7 @@ impl NetworkManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Send data using NodeContactMethod::Existing
|
/// Send data using NodeContactMethod::Existing
|
||||||
#[instrument(level="trace", target="net", skip_all, err)]
|
#[instrument(level = "trace", target = "net", skip_all, err)]
|
||||||
async fn send_data_ncm_existing(
|
async fn send_data_ncm_existing(
|
||||||
&self,
|
&self,
|
||||||
target_node_ref: NodeRef,
|
target_node_ref: NodeRef,
|
||||||
@ -162,16 +163,13 @@ impl NetworkManager {
|
|||||||
) -> EyreResult<NetworkResult<SendDataMethod>> {
|
) -> EyreResult<NetworkResult<SendDataMethod>> {
|
||||||
// First try to send data to the last connection we've seen this peer on
|
// First try to send data to the last connection we've seen this peer on
|
||||||
let Some(flow) = target_node_ref.last_flow() else {
|
let Some(flow) = target_node_ref.last_flow() else {
|
||||||
return Ok(NetworkResult::no_connection_other(
|
return Ok(NetworkResult::no_connection_other(format!(
|
||||||
format!("should have found an existing connection: {}", target_node_ref)
|
"should have found an existing connection: {}",
|
||||||
));
|
target_node_ref
|
||||||
|
)));
|
||||||
};
|
};
|
||||||
|
|
||||||
let unique_flow = match self
|
let unique_flow = match self.net().send_data_to_existing_flow(flow, data).await? {
|
||||||
.net()
|
|
||||||
.send_data_to_existing_flow(flow, data)
|
|
||||||
.await?
|
|
||||||
{
|
|
||||||
SendDataToExistingFlowResult::Sent(unique_flow) => unique_flow,
|
SendDataToExistingFlowResult::Sent(unique_flow) => unique_flow,
|
||||||
SendDataToExistingFlowResult::NotSent(_) => {
|
SendDataToExistingFlowResult::NotSent(_) => {
|
||||||
return Ok(NetworkResult::no_connection_other(
|
return Ok(NetworkResult::no_connection_other(
|
||||||
@ -183,43 +181,41 @@ impl NetworkManager {
|
|||||||
// Update timestamp for this last connection since we just sent to it
|
// Update timestamp for this last connection since we just sent to it
|
||||||
target_node_ref.set_last_flow(flow, Timestamp::now());
|
target_node_ref.set_last_flow(flow, Timestamp::now());
|
||||||
|
|
||||||
Ok(NetworkResult::value(SendDataMethod{
|
Ok(NetworkResult::value(SendDataMethod {
|
||||||
contact_method: NodeContactMethod::Existing,
|
contact_method: NodeContactMethod::Existing,
|
||||||
opt_relayed_contact_method: None,
|
opt_relayed_contact_method: None,
|
||||||
unique_flow
|
unique_flow,
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Send data using NodeContactMethod::Unreachable
|
/// Send data using NodeContactMethod::Unreachable
|
||||||
#[instrument(level="trace", target="net", skip_all, err)]
|
#[instrument(level = "trace", target = "net", skip_all, err)]
|
||||||
async fn send_data_ncm_unreachable(
|
async fn send_data_ncm_unreachable(
|
||||||
&self,
|
&self,
|
||||||
target_node_ref: NodeRef,
|
target_node_ref: NodeRef,
|
||||||
data: Vec<u8>,
|
data: Vec<u8>,
|
||||||
) -> EyreResult<NetworkResult<SendDataMethod>> {
|
) -> EyreResult<NetworkResult<SendDataMethod>> {
|
||||||
// Try to send data to the last socket we've seen this peer on
|
// Try to send data to the last socket we've seen this peer on
|
||||||
let Some(flow) = target_node_ref.last_flow() else {
|
let Some(flow) = target_node_ref.last_flow() else {
|
||||||
return Ok(NetworkResult::no_connection_other(
|
return Ok(NetworkResult::no_connection_other(format!(
|
||||||
format!("Node is not reachable and has no existing connection: {}", target_node_ref)
|
"Node is not reachable and has no existing connection: {}",
|
||||||
));
|
target_node_ref
|
||||||
};
|
)));
|
||||||
|
};
|
||||||
let unique_flow = match self
|
|
||||||
.net()
|
let unique_flow = match self.net().send_data_to_existing_flow(flow, data).await? {
|
||||||
.send_data_to_existing_flow(flow, data)
|
SendDataToExistingFlowResult::Sent(unique_flow) => unique_flow,
|
||||||
.await?
|
SendDataToExistingFlowResult::NotSent(_) => {
|
||||||
{
|
return Ok(NetworkResult::no_connection_other(format!(
|
||||||
SendDataToExistingFlowResult::Sent(unique_flow) => unique_flow,
|
"failed to send to unreachable node over existing connection: {:?}",
|
||||||
SendDataToExistingFlowResult::NotSent(_) => {
|
flow
|
||||||
return Ok(NetworkResult::no_connection_other(
|
)));
|
||||||
format!("failed to send to unreachable node over existing connection: {:?}", flow)
|
}
|
||||||
));
|
};
|
||||||
}
|
|
||||||
};
|
// Update timestamp for this last connection since we just sent to it
|
||||||
|
target_node_ref.set_last_flow(flow, Timestamp::now());
|
||||||
// Update timestamp for this last connection since we just sent to it
|
|
||||||
target_node_ref.set_last_flow(flow, Timestamp::now());
|
|
||||||
|
|
||||||
Ok(NetworkResult::value(SendDataMethod {
|
Ok(NetworkResult::value(SendDataMethod {
|
||||||
contact_method: NodeContactMethod::Existing,
|
contact_method: NodeContactMethod::Existing,
|
||||||
opt_relayed_contact_method: None,
|
opt_relayed_contact_method: None,
|
||||||
@ -228,7 +224,7 @@ impl NetworkManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Send data using NodeContactMethod::SignalReverse
|
/// Send data using NodeContactMethod::SignalReverse
|
||||||
#[instrument(level="trace", target="net", skip_all, err)]
|
#[instrument(level = "trace", target = "net", skip_all, err)]
|
||||||
async fn send_data_ncm_signal_reverse(
|
async fn send_data_ncm_signal_reverse(
|
||||||
&self,
|
&self,
|
||||||
relay_nr: NodeRef,
|
relay_nr: NodeRef,
|
||||||
@ -237,20 +233,15 @@ impl NetworkManager {
|
|||||||
) -> EyreResult<NetworkResult<SendDataMethod>> {
|
) -> EyreResult<NetworkResult<SendDataMethod>> {
|
||||||
// First try to send data to the last socket we've seen this peer on
|
// First try to send data to the last socket we've seen this peer on
|
||||||
let data = if let Some(flow) = target_node_ref.last_flow() {
|
let data = if let Some(flow) = target_node_ref.last_flow() {
|
||||||
match self
|
match self.net().send_data_to_existing_flow(flow, data).await? {
|
||||||
.net()
|
|
||||||
.send_data_to_existing_flow(flow, data)
|
|
||||||
.await?
|
|
||||||
{
|
|
||||||
SendDataToExistingFlowResult::Sent(unique_flow) => {
|
SendDataToExistingFlowResult::Sent(unique_flow) => {
|
||||||
// Update timestamp for this last connection since we just sent to it
|
// Update timestamp for this last connection since we just sent to it
|
||||||
target_node_ref
|
target_node_ref.set_last_flow(flow, Timestamp::now());
|
||||||
.set_last_flow(flow, Timestamp::now());
|
|
||||||
|
|
||||||
return Ok(NetworkResult::value(SendDataMethod{
|
return Ok(NetworkResult::value(SendDataMethod {
|
||||||
contact_method: NodeContactMethod::Existing,
|
contact_method: NodeContactMethod::Existing,
|
||||||
opt_relayed_contact_method: None,
|
opt_relayed_contact_method: None,
|
||||||
unique_flow
|
unique_flow,
|
||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
SendDataToExistingFlowResult::NotSent(data) => {
|
SendDataToExistingFlowResult::NotSent(data) => {
|
||||||
@ -276,7 +267,7 @@ impl NetworkManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Send data using NodeContactMethod::SignalHolePunch
|
/// Send data using NodeContactMethod::SignalHolePunch
|
||||||
#[instrument(level="trace", target="net", skip_all, err)]
|
#[instrument(level = "trace", target = "net", skip_all, err)]
|
||||||
async fn send_data_ncm_signal_hole_punch(
|
async fn send_data_ncm_signal_hole_punch(
|
||||||
&self,
|
&self,
|
||||||
relay_nr: NodeRef,
|
relay_nr: NodeRef,
|
||||||
@ -285,20 +276,15 @@ impl NetworkManager {
|
|||||||
) -> EyreResult<NetworkResult<SendDataMethod>> {
|
) -> EyreResult<NetworkResult<SendDataMethod>> {
|
||||||
// First try to send data to the last socket we've seen this peer on
|
// First try to send data to the last socket we've seen this peer on
|
||||||
let data = if let Some(flow) = target_node_ref.last_flow() {
|
let data = if let Some(flow) = target_node_ref.last_flow() {
|
||||||
match self
|
match self.net().send_data_to_existing_flow(flow, data).await? {
|
||||||
.net()
|
|
||||||
.send_data_to_existing_flow(flow, data)
|
|
||||||
.await?
|
|
||||||
{
|
|
||||||
SendDataToExistingFlowResult::Sent(unique_flow) => {
|
SendDataToExistingFlowResult::Sent(unique_flow) => {
|
||||||
// Update timestamp for this last connection since we just sent to it
|
// Update timestamp for this last connection since we just sent to it
|
||||||
target_node_ref
|
target_node_ref.set_last_flow(flow, Timestamp::now());
|
||||||
.set_last_flow(flow, Timestamp::now());
|
|
||||||
|
|
||||||
return Ok(NetworkResult::value(SendDataMethod{
|
return Ok(NetworkResult::value(SendDataMethod {
|
||||||
contact_method: NodeContactMethod::Existing,
|
contact_method: NodeContactMethod::Existing,
|
||||||
opt_relayed_contact_method: None,
|
opt_relayed_contact_method: None,
|
||||||
unique_flow
|
unique_flow,
|
||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
SendDataToExistingFlowResult::NotSent(data) => {
|
SendDataToExistingFlowResult::NotSent(data) => {
|
||||||
@ -312,8 +298,10 @@ impl NetworkManager {
|
|||||||
data
|
data
|
||||||
};
|
};
|
||||||
|
|
||||||
let unique_flow =
|
let unique_flow = network_result_try!(
|
||||||
network_result_try!(self.do_hole_punch(relay_nr.clone(), target_node_ref.clone(), data).await?);
|
self.do_hole_punch(relay_nr.clone(), target_node_ref.clone(), data)
|
||||||
|
.await?
|
||||||
|
);
|
||||||
Ok(NetworkResult::value(SendDataMethod {
|
Ok(NetworkResult::value(SendDataMethod {
|
||||||
contact_method: NodeContactMethod::SignalHolePunch(relay_nr, target_node_ref),
|
contact_method: NodeContactMethod::SignalHolePunch(relay_nr, target_node_ref),
|
||||||
opt_relayed_contact_method: None,
|
opt_relayed_contact_method: None,
|
||||||
@ -322,7 +310,7 @@ impl NetworkManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Send data using NodeContactMethod::Direct
|
/// Send data using NodeContactMethod::Direct
|
||||||
#[instrument(level="trace", target="net", skip_all, err)]
|
#[instrument(level = "trace", target = "net", skip_all, err)]
|
||||||
async fn send_data_ncm_direct(
|
async fn send_data_ncm_direct(
|
||||||
&self,
|
&self,
|
||||||
node_ref: NodeRef,
|
node_ref: NodeRef,
|
||||||
@ -340,19 +328,15 @@ impl NetworkManager {
|
|||||||
flow, node_ref
|
flow, node_ref
|
||||||
);
|
);
|
||||||
|
|
||||||
match self
|
match self.net().send_data_to_existing_flow(flow, data).await? {
|
||||||
.net()
|
|
||||||
.send_data_to_existing_flow(flow, data)
|
|
||||||
.await?
|
|
||||||
{
|
|
||||||
SendDataToExistingFlowResult::Sent(unique_flow) => {
|
SendDataToExistingFlowResult::Sent(unique_flow) => {
|
||||||
// Update timestamp for this last connection since we just sent to it
|
// Update timestamp for this last connection since we just sent to it
|
||||||
node_ref.set_last_flow(flow, Timestamp::now());
|
node_ref.set_last_flow(flow, Timestamp::now());
|
||||||
|
|
||||||
return Ok(NetworkResult::value(SendDataMethod{
|
return Ok(NetworkResult::value(SendDataMethod {
|
||||||
contact_method: NodeContactMethod::Existing,
|
contact_method: NodeContactMethod::Existing,
|
||||||
opt_relayed_contact_method: None,
|
opt_relayed_contact_method: None,
|
||||||
unique_flow
|
unique_flow,
|
||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
SendDataToExistingFlowResult::NotSent(d) => {
|
SendDataToExistingFlowResult::NotSent(d) => {
|
||||||
@ -366,8 +350,11 @@ impl NetworkManager {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// New direct connection was necessary for this dial info
|
// New direct connection was necessary for this dial info
|
||||||
let unique_flow =
|
let unique_flow = network_result_try!(
|
||||||
network_result_try!(self.net().send_data_to_dial_info(dial_info.clone(), data).await?);
|
self.net()
|
||||||
|
.send_data_to_dial_info(dial_info.clone(), data)
|
||||||
|
.await?
|
||||||
|
);
|
||||||
|
|
||||||
// If we connected to this node directly, save off the last connection so we can use it again
|
// If we connected to this node directly, save off the last connection so we can use it again
|
||||||
node_ref.set_last_flow(unique_flow.flow, Timestamp::now());
|
node_ref.set_last_flow(unique_flow.flow, Timestamp::now());
|
||||||
@ -382,7 +369,7 @@ impl NetworkManager {
|
|||||||
/// Figure out how to reach a node from our own node over the best routing domain and reference the nodes we want to access
|
/// Figure out how to reach a node from our own node over the best routing domain and reference the nodes we want to access
|
||||||
/// Uses NodeRefs to ensure nodes are referenced, this is not a part of 'RoutingTable' because RoutingTable is not
|
/// Uses NodeRefs to ensure nodes are referenced, this is not a part of 'RoutingTable' because RoutingTable is not
|
||||||
/// allowed to use NodeRefs due to recursive locking
|
/// allowed to use NodeRefs due to recursive locking
|
||||||
#[instrument(level="trace", target="net", skip_all, err)]
|
#[instrument(level = "trace", target = "net", skip_all, err)]
|
||||||
pub(crate) fn get_node_contact_method(
|
pub(crate) fn get_node_contact_method(
|
||||||
&self,
|
&self,
|
||||||
target_node_ref: NodeRef,
|
target_node_ref: NodeRef,
|
||||||
@ -390,7 +377,11 @@ impl NetworkManager {
|
|||||||
let routing_table = self.routing_table();
|
let routing_table = self.routing_table();
|
||||||
|
|
||||||
// If a node is punished, then don't try to contact it
|
// If a node is punished, then don't try to contact it
|
||||||
if target_node_ref.node_ids().iter().any(|nid| self.address_filter().is_node_id_punished(*nid)) {
|
if target_node_ref
|
||||||
|
.node_ids()
|
||||||
|
.iter()
|
||||||
|
.any(|nid| self.address_filter().is_node_id_punished(*nid))
|
||||||
|
{
|
||||||
return Ok(NodeContactMethod::Unreachable);
|
return Ok(NodeContactMethod::Unreachable);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -432,7 +423,8 @@ impl NetworkManager {
|
|||||||
let dial_info_filter = target_node_ref.dial_info_filter().filtered(
|
let dial_info_filter = target_node_ref.dial_info_filter().filtered(
|
||||||
&DialInfoFilter::all()
|
&DialInfoFilter::all()
|
||||||
.with_address_type_set(peer_a.signed_node_info().node_info().address_types())
|
.with_address_type_set(peer_a.signed_node_info().node_info().address_types())
|
||||||
.with_protocol_type_set(peer_a.signed_node_info().node_info().outbound_protocols()));
|
.with_protocol_type_set(peer_a.signed_node_info().node_info().outbound_protocols()),
|
||||||
|
);
|
||||||
let sequencing = target_node_ref.sequencing();
|
let sequencing = target_node_ref.sequencing();
|
||||||
|
|
||||||
// If the node has had lost questions or failures to send, prefer sequencing
|
// If the node has had lost questions or failures to send, prefer sequencing
|
||||||
@ -447,7 +439,11 @@ impl NetworkManager {
|
|||||||
// Deprioritize dial info that have recently failed
|
// Deprioritize dial info that have recently failed
|
||||||
let address_filter = self.address_filter();
|
let address_filter = self.address_filter();
|
||||||
let mut dial_info_failures_map = BTreeMap::<DialInfo, Timestamp>::new();
|
let mut dial_info_failures_map = BTreeMap::<DialInfo, Timestamp>::new();
|
||||||
for did in peer_b.signed_node_info().node_info().all_filtered_dial_info_details(DialInfoDetail::NO_SORT, |_| true) {
|
for did in peer_b
|
||||||
|
.signed_node_info()
|
||||||
|
.node_info()
|
||||||
|
.all_filtered_dial_info_details(DialInfoDetail::NO_SORT, |_| true)
|
||||||
|
{
|
||||||
if let Some(ts) = address_filter.get_dial_info_failed_ts(&did.dial_info) {
|
if let Some(ts) = address_filter.get_dial_info_failed_ts(&did.dial_info) {
|
||||||
dial_info_failures_map.insert(did.dial_info, ts);
|
dial_info_failures_map.insert(did.dial_info, ts);
|
||||||
}
|
}
|
||||||
@ -456,8 +452,14 @@ impl NetworkManager {
|
|||||||
None
|
None
|
||||||
} else {
|
} else {
|
||||||
Some(Arc::new(move |a: &DialInfoDetail, b: &DialInfoDetail| {
|
Some(Arc::new(move |a: &DialInfoDetail, b: &DialInfoDetail| {
|
||||||
let ats = dial_info_failures_map.get(&a.dial_info).copied().unwrap_or_default();
|
let ats = dial_info_failures_map
|
||||||
let bts = dial_info_failures_map.get(&b.dial_info).copied().unwrap_or_default();
|
.get(&a.dial_info)
|
||||||
|
.copied()
|
||||||
|
.unwrap_or_default();
|
||||||
|
let bts = dial_info_failures_map
|
||||||
|
.get(&b.dial_info)
|
||||||
|
.copied()
|
||||||
|
.unwrap_or_default();
|
||||||
ats.cmp(&bts)
|
ats.cmp(&bts)
|
||||||
}))
|
}))
|
||||||
};
|
};
|
||||||
@ -491,7 +493,8 @@ impl NetworkManager {
|
|||||||
bail!("signalreverse target noderef didn't match target key: {:?} != {} for relay {}", target_node_ref, target_key, relay_key );
|
bail!("signalreverse target noderef didn't match target key: {:?} != {} for relay {}", target_node_ref, target_key, relay_key );
|
||||||
}
|
}
|
||||||
relay_nr.set_sequencing(sequencing);
|
relay_nr.set_sequencing(sequencing);
|
||||||
let target_node_ref = target_node_ref.filtered_clone(NodeRefFilter::from(dial_info_filter));
|
let target_node_ref =
|
||||||
|
target_node_ref.filtered_clone(NodeRefFilter::from(dial_info_filter));
|
||||||
NodeContactMethod::SignalReverse(relay_nr, target_node_ref)
|
NodeContactMethod::SignalReverse(relay_nr, target_node_ref)
|
||||||
}
|
}
|
||||||
ContactMethod::SignalHolePunch(relay_key, target_key) => {
|
ContactMethod::SignalHolePunch(relay_key, target_key) => {
|
||||||
@ -511,8 +514,11 @@ impl NetworkManager {
|
|||||||
|
|
||||||
// if any other protocol were possible here we could update this and do_hole_punch
|
// if any other protocol were possible here we could update this and do_hole_punch
|
||||||
// but tcp hole punch is very very unreliable it seems
|
// but tcp hole punch is very very unreliable it seems
|
||||||
let udp_target_node_ref = target_node_ref
|
let udp_target_node_ref = target_node_ref.filtered_clone(
|
||||||
.filtered_clone(NodeRefFilter::new().with_dial_info_filter(dial_info_filter).with_protocol_type(ProtocolType::UDP));
|
NodeRefFilter::new()
|
||||||
|
.with_dial_info_filter(dial_info_filter)
|
||||||
|
.with_protocol_type(ProtocolType::UDP),
|
||||||
|
);
|
||||||
|
|
||||||
NodeContactMethod::SignalHolePunch(relay_nr, udp_target_node_ref)
|
NodeContactMethod::SignalHolePunch(relay_nr, udp_target_node_ref)
|
||||||
}
|
}
|
||||||
@ -555,14 +561,13 @@ impl NetworkManager {
|
|||||||
/// Send a reverse connection signal and wait for the return receipt over it
|
/// Send a reverse connection signal and wait for the return receipt over it
|
||||||
/// Then send the data across the new connection
|
/// Then send the data across the new connection
|
||||||
/// Only usable for PublicInternet routing domain
|
/// Only usable for PublicInternet routing domain
|
||||||
#[instrument(level="trace", target="net", skip_all, err)]
|
#[instrument(level = "trace", target = "net", skip_all, err)]
|
||||||
async fn do_reverse_connect(
|
async fn do_reverse_connect(
|
||||||
&self,
|
&self,
|
||||||
relay_nr: NodeRef,
|
relay_nr: NodeRef,
|
||||||
target_nr: NodeRef,
|
target_nr: NodeRef,
|
||||||
data: Vec<u8>,
|
data: Vec<u8>,
|
||||||
) -> EyreResult<NetworkResult<UniqueFlow>> {
|
) -> EyreResult<NetworkResult<UniqueFlow>> {
|
||||||
|
|
||||||
// Detect if network is stopping so we can break out of this
|
// Detect if network is stopping so we can break out of this
|
||||||
let Some(stop_token) = self.unlocked_inner.startup_lock.stop_token() else {
|
let Some(stop_token) = self.unlocked_inner.startup_lock.stop_token() else {
|
||||||
return Ok(NetworkResult::service_unavailable("network is stopping"));
|
return Ok(NetworkResult::service_unavailable("network is stopping"));
|
||||||
@ -580,18 +585,20 @@ impl NetworkManager {
|
|||||||
|
|
||||||
// Get target routing domain
|
// Get target routing domain
|
||||||
let Some(routing_domain) = target_nr.best_routing_domain() else {
|
let Some(routing_domain) = target_nr.best_routing_domain() else {
|
||||||
return Ok(NetworkResult::no_connection_other("No routing domain for target for reverse connect"));
|
return Ok(NetworkResult::no_connection_other(
|
||||||
|
"No routing domain for target for reverse connect",
|
||||||
|
));
|
||||||
};
|
};
|
||||||
|
|
||||||
// Ensure we have a valid network class so our peer info is useful
|
// Ensure we have a valid network class so our peer info is useful
|
||||||
if !self.routing_table().has_valid_network_class(routing_domain){
|
if !self.routing_table().has_valid_network_class(routing_domain) {
|
||||||
return Ok(NetworkResult::no_connection_other("Network class not yet valid for reverse connect"));
|
return Ok(NetworkResult::no_connection_other(
|
||||||
|
"Network class not yet valid for reverse connect",
|
||||||
|
));
|
||||||
};
|
};
|
||||||
|
|
||||||
// Get our peer info
|
// Get our peer info
|
||||||
let peer_info = self
|
let peer_info = self.routing_table().get_own_peer_info(routing_domain);
|
||||||
.routing_table()
|
|
||||||
.get_own_peer_info(routing_domain);
|
|
||||||
|
|
||||||
// Issue the signal
|
// Issue the signal
|
||||||
let rpc = self.rpc_processor();
|
let rpc = self.rpc_processor();
|
||||||
@ -604,7 +611,11 @@ impl NetworkManager {
|
|||||||
.wrap_err("failed to send signal")?);
|
.wrap_err("failed to send signal")?);
|
||||||
|
|
||||||
// Wait for the return receipt
|
// Wait for the return receipt
|
||||||
let inbound_nr = match eventual_value.timeout_at(stop_token).in_current_span().await {
|
let inbound_nr = match eventual_value
|
||||||
|
.timeout_at(stop_token)
|
||||||
|
.in_current_span()
|
||||||
|
.await
|
||||||
|
{
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
return Ok(NetworkResult::service_unavailable("network is stopping"));
|
return Ok(NetworkResult::service_unavailable("network is stopping"));
|
||||||
}
|
}
|
||||||
@ -640,27 +651,26 @@ impl NetworkManager {
|
|||||||
|
|
||||||
// And now use the existing connection to send over
|
// And now use the existing connection to send over
|
||||||
if let Some(flow) = inbound_nr.last_flow() {
|
if let Some(flow) = inbound_nr.last_flow() {
|
||||||
match self
|
match self.net().send_data_to_existing_flow(flow, data).await? {
|
||||||
.net()
|
SendDataToExistingFlowResult::Sent(unique_flow) => {
|
||||||
.send_data_to_existing_flow(flow, data)
|
Ok(NetworkResult::value(unique_flow))
|
||||||
.await?
|
}
|
||||||
{
|
|
||||||
SendDataToExistingFlowResult::Sent(unique_flow) => Ok(NetworkResult::value(unique_flow)),
|
|
||||||
SendDataToExistingFlowResult::NotSent(_) => Ok(NetworkResult::no_connection_other(
|
SendDataToExistingFlowResult::NotSent(_) => Ok(NetworkResult::no_connection_other(
|
||||||
"unable to send over reverse connection",
|
"unable to send over reverse connection",
|
||||||
)),
|
)),
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return Ok(NetworkResult::no_connection_other(format!(
|
return Ok(NetworkResult::no_connection_other(format!(
|
||||||
"reverse connection dropped from {}", target_nr)
|
"reverse connection dropped from {}",
|
||||||
));
|
target_nr
|
||||||
|
)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Send a hole punch signal and do a negotiating ping and wait for the return receipt
|
/// Send a hole punch signal and do a negotiating ping and wait for the return receipt
|
||||||
/// Then send the data across the new connection
|
/// Then send the data across the new connection
|
||||||
/// Only usable for PublicInternet routing domain
|
/// Only usable for PublicInternet routing domain
|
||||||
#[instrument(level="trace", target="net", skip_all, err)]
|
#[instrument(level = "trace", target = "net", skip_all, err)]
|
||||||
async fn do_hole_punch(
|
async fn do_hole_punch(
|
||||||
&self,
|
&self,
|
||||||
relay_nr: NodeRef,
|
relay_nr: NodeRef,
|
||||||
@ -691,18 +701,20 @@ impl NetworkManager {
|
|||||||
|
|
||||||
// Get target routing domain
|
// Get target routing domain
|
||||||
let Some(routing_domain) = target_nr.best_routing_domain() else {
|
let Some(routing_domain) = target_nr.best_routing_domain() else {
|
||||||
return Ok(NetworkResult::no_connection_other("No routing domain for target for hole punch"));
|
return Ok(NetworkResult::no_connection_other(
|
||||||
|
"No routing domain for target for hole punch",
|
||||||
|
));
|
||||||
};
|
};
|
||||||
|
|
||||||
// Ensure we have a valid network class so our peer info is useful
|
// Ensure we have a valid network class so our peer info is useful
|
||||||
if !self.routing_table().has_valid_network_class(routing_domain){
|
if !self.routing_table().has_valid_network_class(routing_domain) {
|
||||||
return Ok(NetworkResult::no_connection_other("Network class not yet valid for hole punch"));
|
return Ok(NetworkResult::no_connection_other(
|
||||||
|
"Network class not yet valid for hole punch",
|
||||||
|
));
|
||||||
};
|
};
|
||||||
|
|
||||||
// Get our peer info
|
// Get our peer info
|
||||||
let peer_info = self
|
let peer_info = self.routing_table().get_own_peer_info(routing_domain);
|
||||||
.routing_table()
|
|
||||||
.get_own_peer_info(routing_domain);
|
|
||||||
|
|
||||||
// Get the udp direct dialinfo for the hole punch
|
// Get the udp direct dialinfo for the hole punch
|
||||||
let hole_punch_did = target_nr
|
let hole_punch_did = target_nr
|
||||||
@ -730,7 +742,11 @@ impl NetworkManager {
|
|||||||
.wrap_err("failed to send signal")?);
|
.wrap_err("failed to send signal")?);
|
||||||
|
|
||||||
// Wait for the return receipt
|
// Wait for the return receipt
|
||||||
let inbound_nr = match eventual_value.timeout_at(stop_token).in_current_span().await {
|
let inbound_nr = match eventual_value
|
||||||
|
.timeout_at(stop_token)
|
||||||
|
.in_current_span()
|
||||||
|
.await
|
||||||
|
{
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
return Ok(NetworkResult::service_unavailable("network is stopping"));
|
return Ok(NetworkResult::service_unavailable("network is stopping"));
|
||||||
}
|
}
|
||||||
@ -770,20 +786,19 @@ impl NetworkManager {
|
|||||||
|
|
||||||
// And now use the existing connection to send over
|
// And now use the existing connection to send over
|
||||||
if let Some(flow) = inbound_nr.last_flow() {
|
if let Some(flow) = inbound_nr.last_flow() {
|
||||||
match self
|
match self.net().send_data_to_existing_flow(flow, data).await? {
|
||||||
.net()
|
SendDataToExistingFlowResult::Sent(unique_flow) => {
|
||||||
.send_data_to_existing_flow(flow, data)
|
Ok(NetworkResult::value(unique_flow))
|
||||||
.await?
|
}
|
||||||
{
|
|
||||||
SendDataToExistingFlowResult::Sent(unique_flow) => Ok(NetworkResult::value(unique_flow)),
|
|
||||||
SendDataToExistingFlowResult::NotSent(_) => Ok(NetworkResult::no_connection_other(
|
SendDataToExistingFlowResult::NotSent(_) => Ok(NetworkResult::no_connection_other(
|
||||||
"unable to send over hole punch",
|
"unable to send over hole punch",
|
||||||
)),
|
)),
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return Ok(NetworkResult::no_connection_other(format!(
|
return Ok(NetworkResult::no_connection_other(format!(
|
||||||
"hole punch dropped from {}", target_nr)
|
"hole punch dropped from {}",
|
||||||
));
|
target_nr
|
||||||
|
)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
use super::*;
|
use super::*;
|
||||||
use core::sync::atomic::{AtomicU32, Ordering};
|
use core::sync::atomic::{AtomicU32, Ordering};
|
||||||
|
|
||||||
|
|
||||||
/// Reliable pings are done with increased spacing between pings
|
/// Reliable pings are done with increased spacing between pings
|
||||||
|
|
||||||
/// - Start secs is the number of seconds between the first two pings
|
/// - Start secs is the number of seconds between the first two pings
|
||||||
@ -75,12 +74,10 @@ impl BucketEntryState {
|
|||||||
BucketEntryState::Reliable => 3,
|
BucketEntryState::Reliable => 3,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<BucketEntryStateReason> for BucketEntryState {
|
impl From<BucketEntryStateReason> for BucketEntryState {
|
||||||
fn from(value: BucketEntryStateReason) -> Self
|
fn from(value: BucketEntryStateReason) -> Self {
|
||||||
{
|
|
||||||
match value {
|
match value {
|
||||||
BucketEntryStateReason::Punished(_) => BucketEntryState::Punished,
|
BucketEntryStateReason::Punished(_) => BucketEntryState::Punished,
|
||||||
BucketEntryStateReason::Dead(_) => BucketEntryState::Dead,
|
BucketEntryStateReason::Dead(_) => BucketEntryState::Dead,
|
||||||
@ -90,7 +87,6 @@ impl From<BucketEntryStateReason> for BucketEntryState {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Ord, Hash)]
|
#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Ord, Hash)]
|
||||||
pub(crate) struct LastFlowKey(ProtocolType, AddressType);
|
pub(crate) struct LastFlowKey(ProtocolType, AddressType);
|
||||||
|
|
||||||
@ -223,7 +219,11 @@ impl BucketEntryInner {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// All-of capability check
|
/// All-of capability check
|
||||||
pub fn has_all_capabilities(&self, routing_domain: RoutingDomain, capabilities: &[Capability]) -> bool {
|
pub fn has_all_capabilities(
|
||||||
|
&self,
|
||||||
|
routing_domain: RoutingDomain,
|
||||||
|
capabilities: &[Capability],
|
||||||
|
) -> bool {
|
||||||
let Some(ni) = self.node_info(routing_domain) else {
|
let Some(ni) = self.node_info(routing_domain) else {
|
||||||
return false;
|
return false;
|
||||||
};
|
};
|
||||||
@ -231,7 +231,11 @@ impl BucketEntryInner {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Any-of capability check
|
/// Any-of capability check
|
||||||
pub fn has_any_capabilities(&self, routing_domain: RoutingDomain, capabilities: &[Capability]) -> bool {
|
pub fn has_any_capabilities(
|
||||||
|
&self,
|
||||||
|
routing_domain: RoutingDomain,
|
||||||
|
capabilities: &[Capability],
|
||||||
|
) -> bool {
|
||||||
let Some(ni) = self.node_info(routing_domain) else {
|
let Some(ni) = self.node_info(routing_domain) else {
|
||||||
return false;
|
return false;
|
||||||
};
|
};
|
||||||
@ -300,7 +304,9 @@ impl BucketEntryInner {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
pub fn sort_fastest_reliable_fn(cur_ts: Timestamp) -> impl FnMut(&Self, &Self) -> std::cmp::Ordering {
|
pub fn sort_fastest_reliable_fn(
|
||||||
|
cur_ts: Timestamp,
|
||||||
|
) -> impl FnMut(&Self, &Self) -> std::cmp::Ordering {
|
||||||
move |e1, e2| Self::cmp_fastest_reliable(cur_ts, e1, e2)
|
move |e1, e2| Self::cmp_fastest_reliable(cur_ts, e1, e2)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -398,11 +404,7 @@ impl BucketEntryInner {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check connections
|
// Check connections
|
||||||
let last_flows = self.last_flows(
|
let last_flows = self.last_flows(rti, true, NodeRefFilter::from(routing_domain));
|
||||||
rti,
|
|
||||||
true,
|
|
||||||
NodeRefFilter::from(routing_domain),
|
|
||||||
);
|
|
||||||
!last_flows.is_empty()
|
!last_flows.is_empty()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -429,10 +431,9 @@ impl BucketEntryInner {
|
|||||||
};
|
};
|
||||||
// Peer info includes all node ids, even unvalidated ones
|
// Peer info includes all node ids, even unvalidated ones
|
||||||
let node_ids = self.node_ids();
|
let node_ids = self.node_ids();
|
||||||
opt_current_sni.as_ref().map(|s| PeerInfo::new(
|
opt_current_sni
|
||||||
node_ids,
|
.as_ref()
|
||||||
*s.clone(),
|
.map(|s| PeerInfo::new(node_ids, *s.clone()))
|
||||||
))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn best_routing_domain(
|
pub fn best_routing_domain(
|
||||||
@ -452,15 +453,9 @@ impl BucketEntryInner {
|
|||||||
}
|
}
|
||||||
// Check connections
|
// Check connections
|
||||||
let mut best_routing_domain: Option<RoutingDomain> = None;
|
let mut best_routing_domain: Option<RoutingDomain> = None;
|
||||||
let last_connections = self.last_flows(
|
let last_connections = self.last_flows(rti, true, NodeRefFilter::from(routing_domain_set));
|
||||||
rti,
|
|
||||||
true,
|
|
||||||
NodeRefFilter::from(routing_domain_set),
|
|
||||||
);
|
|
||||||
for lc in last_connections {
|
for lc in last_connections {
|
||||||
if let Some(rd) =
|
if let Some(rd) = rti.routing_domain_for_address(lc.0.remote_address().address()) {
|
||||||
rti.routing_domain_for_address(lc.0.remote_address().address())
|
|
||||||
{
|
|
||||||
if let Some(brd) = best_routing_domain {
|
if let Some(brd) = best_routing_domain {
|
||||||
if rd < brd {
|
if rd < brd {
|
||||||
best_routing_domain = Some(rd);
|
best_routing_domain = Some(rd);
|
||||||
@ -474,10 +469,7 @@ impl BucketEntryInner {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn flow_to_key(&self, last_flow: Flow) -> LastFlowKey {
|
fn flow_to_key(&self, last_flow: Flow) -> LastFlowKey {
|
||||||
LastFlowKey(
|
LastFlowKey(last_flow.protocol_type(), last_flow.address_type())
|
||||||
last_flow.protocol_type(),
|
|
||||||
last_flow.address_type(),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stores a flow in this entry's table of last flows
|
// Stores a flow in this entry's table of last flows
|
||||||
@ -487,15 +479,13 @@ impl BucketEntryInner {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
let key = self.flow_to_key(last_flow);
|
let key = self.flow_to_key(last_flow);
|
||||||
self.last_flows
|
self.last_flows.insert(key, (last_flow, timestamp));
|
||||||
.insert(key, (last_flow, timestamp));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Removes a flow in this entry's table of last flows
|
// Removes a flow in this entry's table of last flows
|
||||||
pub fn remove_last_flow(&mut self, last_flow: Flow) {
|
pub fn remove_last_flow(&mut self, last_flow: Flow) {
|
||||||
let key = self.flow_to_key(last_flow);
|
let key = self.flow_to_key(last_flow);
|
||||||
self.last_flows
|
self.last_flows.remove(&key);
|
||||||
.remove(&key);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Clears the table of last flows to ensure we create new ones and drop any existing ones
|
// Clears the table of last flows to ensure we create new ones and drop any existing ones
|
||||||
@ -509,7 +499,7 @@ impl BucketEntryInner {
|
|||||||
// No last_connections
|
// No last_connections
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
let mut dead_keys = Vec::with_capacity(self.last_flows.len()-1);
|
let mut dead_keys = Vec::with_capacity(self.last_flows.len() - 1);
|
||||||
let mut most_recent_flow = None;
|
let mut most_recent_flow = None;
|
||||||
let mut most_recent_flow_time = 0u64;
|
let mut most_recent_flow_time = 0u64;
|
||||||
for (k, v) in &self.last_flows {
|
for (k, v) in &self.last_flows {
|
||||||
@ -539,8 +529,7 @@ impl BucketEntryInner {
|
|||||||
only_live: bool,
|
only_live: bool,
|
||||||
filter: NodeRefFilter,
|
filter: NodeRefFilter,
|
||||||
) -> Vec<(Flow, Timestamp)> {
|
) -> Vec<(Flow, Timestamp)> {
|
||||||
let opt_connection_manager =
|
let opt_connection_manager = rti.unlocked_inner.network_manager.opt_connection_manager();
|
||||||
rti.unlocked_inner.network_manager.opt_connection_manager();
|
|
||||||
|
|
||||||
let mut out: Vec<(Flow, Timestamp)> = self
|
let mut out: Vec<(Flow, Timestamp)> = self
|
||||||
.last_flows
|
.last_flows
|
||||||
@ -588,9 +577,7 @@ impl BucketEntryInner {
|
|||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
// Sort with newest timestamps
|
// Sort with newest timestamps
|
||||||
out.sort_by(|a, b| {
|
out.sort_by(|a, b| b.1.cmp(&a.1));
|
||||||
b.1.cmp(&a.1)
|
|
||||||
});
|
|
||||||
out
|
out
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -615,7 +602,11 @@ impl BucketEntryInner {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn best_envelope_version(&self) -> Option<u8> {
|
pub fn best_envelope_version(&self) -> Option<u8> {
|
||||||
self.envelope_support.iter().rev().find(|x| VALID_ENVELOPE_VERSIONS.contains(x)).copied()
|
self.envelope_support
|
||||||
|
.iter()
|
||||||
|
.rev()
|
||||||
|
.find(|x| VALID_ENVELOPE_VERSIONS.contains(x))
|
||||||
|
.copied()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn state_reason(&self, cur_ts: Timestamp) -> BucketEntryStateReason {
|
pub fn state_reason(&self, cur_ts: Timestamp) -> BucketEntryStateReason {
|
||||||
@ -657,14 +648,8 @@ impl BucketEntryInner {
|
|||||||
}
|
}
|
||||||
pub fn node_status(&self, routing_domain: RoutingDomain) -> Option<NodeStatus> {
|
pub fn node_status(&self, routing_domain: RoutingDomain) -> Option<NodeStatus> {
|
||||||
match routing_domain {
|
match routing_domain {
|
||||||
RoutingDomain::LocalNetwork => self
|
RoutingDomain::LocalNetwork => self.local_network.node_status.as_ref().cloned(),
|
||||||
.local_network
|
RoutingDomain::PublicInternet => self.public_internet.node_status.as_ref().cloned(),
|
||||||
.node_status
|
|
||||||
.as_ref().cloned(),
|
|
||||||
RoutingDomain::PublicInternet => self
|
|
||||||
.public_internet
|
|
||||||
.node_status
|
|
||||||
.as_ref().cloned()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -714,7 +699,10 @@ impl BucketEntryInner {
|
|||||||
}
|
}
|
||||||
|
|
||||||
///// state machine handling
|
///// state machine handling
|
||||||
pub(super) fn check_unreliable(&self, cur_ts: Timestamp) -> Option<BucketEntryUnreliableReason> {
|
pub(super) fn check_unreliable(
|
||||||
|
&self,
|
||||||
|
cur_ts: Timestamp,
|
||||||
|
) -> Option<BucketEntryUnreliableReason> {
|
||||||
// If we have had any failures to send, this is not reliable
|
// If we have had any failures to send, this is not reliable
|
||||||
if self.peer_stats.rpc_stats.failed_to_send > 0 {
|
if self.peer_stats.rpc_stats.failed_to_send > 0 {
|
||||||
return Some(BucketEntryUnreliableReason::FailedToSend);
|
return Some(BucketEntryUnreliableReason::FailedToSend);
|
||||||
@ -730,7 +718,8 @@ impl BucketEntryInner {
|
|||||||
None => return Some(BucketEntryUnreliableReason::NotSeenConsecutively),
|
None => return Some(BucketEntryUnreliableReason::NotSeenConsecutively),
|
||||||
// If not have seen the node consistently for longer than UNRELIABLE_PING_SPAN_SECS then it is unreliable
|
// If not have seen the node consistently for longer than UNRELIABLE_PING_SPAN_SECS then it is unreliable
|
||||||
Some(ts) => {
|
Some(ts) => {
|
||||||
let seen_consecutively = cur_ts.saturating_sub(ts) >= TimestampDuration::new(UNRELIABLE_PING_SPAN_SECS as u64 * 1_000_000u64);
|
let seen_consecutively = cur_ts.saturating_sub(ts)
|
||||||
|
>= TimestampDuration::new(UNRELIABLE_PING_SPAN_SECS as u64 * 1_000_000u64);
|
||||||
if !seen_consecutively {
|
if !seen_consecutively {
|
||||||
return Some(BucketEntryUnreliableReason::InUnreliablePingSpan);
|
return Some(BucketEntryUnreliableReason::InUnreliablePingSpan);
|
||||||
}
|
}
|
||||||
@ -749,19 +738,22 @@ impl BucketEntryInner {
|
|||||||
// a node is not dead if we haven't heard from it yet,
|
// a node is not dead if we haven't heard from it yet,
|
||||||
// but we give it NEVER_REACHED_PING_COUNT chances to ping before we say it's dead
|
// but we give it NEVER_REACHED_PING_COUNT chances to ping before we say it's dead
|
||||||
None => {
|
None => {
|
||||||
let no_answers = self.peer_stats.rpc_stats.recent_lost_answers >= NEVER_SEEN_PING_COUNT;
|
let no_answers =
|
||||||
|
self.peer_stats.rpc_stats.recent_lost_answers >= NEVER_SEEN_PING_COUNT;
|
||||||
if no_answers {
|
if no_answers {
|
||||||
return Some(BucketEntryDeadReason::TooManyLostAnswers)
|
return Some(BucketEntryDeadReason::TooManyLostAnswers);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// return dead if we have not heard from the node at all for the duration of the unreliable ping span
|
// return dead if we have not heard from the node at all for the duration of the unreliable ping span
|
||||||
// and we have tried to reach it and failed the entire time of unreliable ping span
|
// and we have tried to reach it and failed the entire time of unreliable ping span
|
||||||
Some(ts) => {
|
Some(ts) => {
|
||||||
let not_seen = cur_ts.saturating_sub(ts) >= TimestampDuration::new(UNRELIABLE_PING_SPAN_SECS as u64 * 1_000_000u64);
|
let not_seen = cur_ts.saturating_sub(ts)
|
||||||
let no_answers = self.peer_stats.rpc_stats.recent_lost_answers >= (UNRELIABLE_PING_SPAN_SECS / UNRELIABLE_PING_INTERVAL_SECS);
|
>= TimestampDuration::new(UNRELIABLE_PING_SPAN_SECS as u64 * 1_000_000u64);
|
||||||
|
let no_answers = self.peer_stats.rpc_stats.recent_lost_answers
|
||||||
|
>= (UNRELIABLE_PING_SPAN_SECS / UNRELIABLE_PING_INTERVAL_SECS);
|
||||||
if not_seen && no_answers {
|
if not_seen && no_answers {
|
||||||
return Some(BucketEntryDeadReason::NoPingResponse)
|
return Some(BucketEntryDeadReason::NoPingResponse);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -809,7 +801,9 @@ impl BucketEntryInner {
|
|||||||
let first_consecutive_seen_ts =
|
let first_consecutive_seen_ts =
|
||||||
self.peer_stats.rpc_stats.first_consecutive_seen_ts.unwrap();
|
self.peer_stats.rpc_stats.first_consecutive_seen_ts.unwrap();
|
||||||
let start_of_reliable_time = first_consecutive_seen_ts
|
let start_of_reliable_time = first_consecutive_seen_ts
|
||||||
+ TimestampDuration::new_secs(UNRELIABLE_PING_SPAN_SECS - UNRELIABLE_PING_INTERVAL_SECS);
|
+ TimestampDuration::new_secs(
|
||||||
|
UNRELIABLE_PING_SPAN_SECS - UNRELIABLE_PING_INTERVAL_SECS,
|
||||||
|
);
|
||||||
let reliable_cur = cur_ts.saturating_sub(start_of_reliable_time);
|
let reliable_cur = cur_ts.saturating_sub(start_of_reliable_time);
|
||||||
let reliable_last =
|
let reliable_last =
|
||||||
latest_contact_time.saturating_sub(start_of_reliable_time);
|
latest_contact_time.saturating_sub(start_of_reliable_time);
|
||||||
@ -826,7 +820,10 @@ impl BucketEntryInner {
|
|||||||
}
|
}
|
||||||
BucketEntryState::Unreliable => {
|
BucketEntryState::Unreliable => {
|
||||||
// If we are in an unreliable state, we need a ping every UNRELIABLE_PING_INTERVAL_SECS seconds
|
// If we are in an unreliable state, we need a ping every UNRELIABLE_PING_INTERVAL_SECS seconds
|
||||||
self.needs_constant_ping(cur_ts, TimestampDuration::new(UNRELIABLE_PING_INTERVAL_SECS as u64 * 1_000_000u64))
|
self.needs_constant_ping(
|
||||||
|
cur_ts,
|
||||||
|
TimestampDuration::new(UNRELIABLE_PING_INTERVAL_SECS as u64 * 1_000_000u64),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
BucketEntryState::Dead => {
|
BucketEntryState::Dead => {
|
||||||
error!("Should not be asking this for dead nodes");
|
error!("Should not be asking this for dead nodes");
|
||||||
@ -836,7 +833,6 @@ impl BucketEntryInner {
|
|||||||
error!("Should not be asking this for punished nodes");
|
error!("Should not be asking this for punished nodes");
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -941,7 +937,6 @@ pub(crate) struct BucketEntry {
|
|||||||
|
|
||||||
impl BucketEntry {
|
impl BucketEntry {
|
||||||
pub(super) fn new(first_node_id: TypedKey) -> Self {
|
pub(super) fn new(first_node_id: TypedKey) -> Self {
|
||||||
|
|
||||||
// First node id should always be one we support since TypedKeySets are sorted and we must have at least one supported key
|
// First node id should always be one we support since TypedKeySets are sorted and we must have at least one supported key
|
||||||
assert!(VALID_CRYPTO_KINDS.contains(&first_node_id.kind));
|
assert!(VALID_CRYPTO_KINDS.contains(&first_node_id.kind));
|
||||||
|
|
||||||
|
@ -254,16 +254,19 @@ impl RoutingTable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
//#[instrument(level = "trace", skip(self), err)]
|
//#[instrument(level = "trace", skip(self), err)]
|
||||||
pub(crate) fn bootstrap_with_peer(self, crypto_kinds: Vec<CryptoKind>, pi: PeerInfo, unord: &FuturesUnordered<SendPinBoxFuture<()>>) {
|
pub(crate) fn bootstrap_with_peer(
|
||||||
|
self,
|
||||||
|
crypto_kinds: Vec<CryptoKind>,
|
||||||
|
pi: PeerInfo,
|
||||||
|
unord: &FuturesUnordered<SendPinBoxFuture<()>>,
|
||||||
|
) {
|
||||||
log_rtab!(
|
log_rtab!(
|
||||||
"--- bootstrapping {} with {:?}",
|
"--- bootstrapping {} with {:?}",
|
||||||
pi.node_ids(),
|
pi.node_ids(),
|
||||||
pi.signed_node_info().node_info().dial_info_detail_list()
|
pi.signed_node_info().node_info().dial_info_detail_list()
|
||||||
);
|
);
|
||||||
|
|
||||||
let nr =
|
let nr = match self.register_node_with_peer_info(RoutingDomain::PublicInternet, pi, true) {
|
||||||
match self.register_node_with_peer_info(RoutingDomain::PublicInternet, pi, true) {
|
|
||||||
Ok(nr) => nr,
|
Ok(nr) => nr,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
log_rtab!(error "failed to register bootstrap peer info: {}", e);
|
log_rtab!(error "failed to register bootstrap peer info: {}", e);
|
||||||
@ -273,7 +276,6 @@ impl RoutingTable {
|
|||||||
|
|
||||||
// Add this our futures to process in parallel
|
// Add this our futures to process in parallel
|
||||||
for crypto_kind in crypto_kinds {
|
for crypto_kind in crypto_kinds {
|
||||||
|
|
||||||
// Bootstrap this crypto kind
|
// Bootstrap this crypto kind
|
||||||
let nr = nr.clone();
|
let nr = nr.clone();
|
||||||
let routing_table = self.clone();
|
let routing_table = self.clone();
|
||||||
@ -320,8 +322,11 @@ impl RoutingTable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "trace", skip(self), err)]
|
#[instrument(level = "trace", skip(self), err)]
|
||||||
pub(crate) async fn bootstrap_with_peer_list(self, peers: Vec<PeerInfo>, stop_token: StopToken) -> EyreResult<()> {
|
pub(crate) async fn bootstrap_with_peer_list(
|
||||||
|
self,
|
||||||
|
peers: Vec<PeerInfo>,
|
||||||
|
stop_token: StopToken,
|
||||||
|
) -> EyreResult<()> {
|
||||||
log_rtab!(debug " bootstrapped peers: {:?}", &peers);
|
log_rtab!(debug " bootstrapped peers: {:?}", &peers);
|
||||||
|
|
||||||
// Get crypto kinds to bootstrap
|
// Get crypto kinds to bootstrap
|
||||||
@ -332,7 +337,8 @@ impl RoutingTable {
|
|||||||
// Run all bootstrap operations concurrently
|
// Run all bootstrap operations concurrently
|
||||||
let mut unord = FuturesUnordered::<SendPinBoxFuture<()>>::new();
|
let mut unord = FuturesUnordered::<SendPinBoxFuture<()>>::new();
|
||||||
for peer in peers {
|
for peer in peers {
|
||||||
self.clone().bootstrap_with_peer(crypto_kinds.clone(), peer, &unord);
|
self.clone()
|
||||||
|
.bootstrap_with_peer(crypto_kinds.clone(), peer, &unord);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for all bootstrap operations to complete before we complete the singlefuture
|
// Wait for all bootstrap operations to complete before we complete the singlefuture
|
||||||
@ -355,7 +361,6 @@ impl RoutingTable {
|
|||||||
crypto_kinds
|
crypto_kinds
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#[instrument(level = "trace", skip(self), err)]
|
#[instrument(level = "trace", skip(self), err)]
|
||||||
pub(crate) async fn bootstrap_task_routine(self, stop_token: StopToken) -> EyreResult<()> {
|
pub(crate) async fn bootstrap_task_routine(self, stop_token: StopToken) -> EyreResult<()> {
|
||||||
let bootstrap = self
|
let bootstrap = self
|
||||||
@ -398,13 +403,15 @@ impl RoutingTable {
|
|||||||
} else {
|
} else {
|
||||||
// If not direct, resolve bootstrap servers and recurse their TXT entries
|
// If not direct, resolve bootstrap servers and recurse their TXT entries
|
||||||
let bsrecs = self.resolve_bootstrap(bootstrap).await?;
|
let bsrecs = self.resolve_bootstrap(bootstrap).await?;
|
||||||
let peers : Vec<PeerInfo> = bsrecs.into_iter().map(|bsrec| {
|
let peers: Vec<PeerInfo> = bsrecs
|
||||||
|
.into_iter()
|
||||||
|
.map(|bsrec| {
|
||||||
// Get crypto support from list of node ids
|
// Get crypto support from list of node ids
|
||||||
let crypto_support = bsrec.node_ids.kinds();
|
let crypto_support = bsrec.node_ids.kinds();
|
||||||
|
|
||||||
// Make unsigned SignedNodeInfo
|
// Make unsigned SignedNodeInfo
|
||||||
let sni =
|
let sni = SignedNodeInfo::Direct(SignedDirectNodeInfo::with_no_signature(
|
||||||
SignedNodeInfo::Direct(SignedDirectNodeInfo::with_no_signature(NodeInfo::new(
|
NodeInfo::new(
|
||||||
NetworkClass::InboundCapable, // Bootstraps are always inbound capable
|
NetworkClass::InboundCapable, // Bootstraps are always inbound capable
|
||||||
ProtocolTypeSet::only(ProtocolType::UDP), // Bootstraps do not participate in relaying and will not make outbound requests, but will have UDP enabled
|
ProtocolTypeSet::only(ProtocolType::UDP), // Bootstraps do not participate in relaying and will not make outbound requests, but will have UDP enabled
|
||||||
AddressTypeSet::all(), // Bootstraps are always IPV4 and IPV6 capable
|
AddressTypeSet::all(), // Bootstraps are always IPV4 and IPV6 capable
|
||||||
@ -412,14 +419,18 @@ impl RoutingTable {
|
|||||||
crypto_support, // Crypto support is derived from list of node ids
|
crypto_support, // Crypto support is derived from list of node ids
|
||||||
vec![], // Bootstrap needs no capabilities
|
vec![], // Bootstrap needs no capabilities
|
||||||
bsrec.dial_info_details, // Dial info is as specified in the bootstrap list
|
bsrec.dial_info_details, // Dial info is as specified in the bootstrap list
|
||||||
)));
|
),
|
||||||
|
));
|
||||||
|
|
||||||
PeerInfo::new(bsrec.node_ids, sni)
|
PeerInfo::new(bsrec.node_ids, sni)
|
||||||
}).collect();
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
peers
|
peers
|
||||||
};
|
};
|
||||||
|
|
||||||
self.clone().bootstrap_with_peer_list(peers, stop_token).await
|
self.clone()
|
||||||
|
.bootstrap_with_peer_list(peers, stop_token)
|
||||||
|
.await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -32,18 +32,18 @@ mod rpc_start_tunnel;
|
|||||||
|
|
||||||
pub(crate) use coders::*;
|
pub(crate) use coders::*;
|
||||||
pub(crate) use destination::*;
|
pub(crate) use destination::*;
|
||||||
|
pub(crate) use fanout_call::*;
|
||||||
pub(crate) use operation_waiter::*;
|
pub(crate) use operation_waiter::*;
|
||||||
pub(crate) use rpc_error::*;
|
pub(crate) use rpc_error::*;
|
||||||
pub(crate) use rpc_status::*;
|
pub(crate) use rpc_status::*;
|
||||||
pub(crate) use fanout_call::*;
|
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
use crypto::*;
|
use crypto::*;
|
||||||
|
use fanout_queue::*;
|
||||||
use futures_util::StreamExt;
|
use futures_util::StreamExt;
|
||||||
use network_manager::*;
|
use network_manager::*;
|
||||||
use routing_table::*;
|
use routing_table::*;
|
||||||
use fanout_queue::*;
|
|
||||||
use stop_token::future::FutureExt;
|
use stop_token::future::FutureExt;
|
||||||
use storage_manager::*;
|
use storage_manager::*;
|
||||||
|
|
||||||
@ -171,14 +171,13 @@ pub(crate) struct RPCMessage {
|
|||||||
opt_sender_nr: Option<NodeRef>,
|
opt_sender_nr: Option<NodeRef>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level="trace", target="rpc", skip_all, err)]
|
#[instrument(level = "trace", target = "rpc", skip_all, err)]
|
||||||
pub fn builder_to_vec<'a, T>(builder: capnp::message::Builder<T>) -> Result<Vec<u8>, RPCError>
|
pub fn builder_to_vec<'a, T>(builder: capnp::message::Builder<T>) -> Result<Vec<u8>, RPCError>
|
||||||
where
|
where
|
||||||
T: capnp::message::Allocator + 'a,
|
T: capnp::message::Allocator + 'a,
|
||||||
{
|
{
|
||||||
let mut buffer = vec![];
|
let mut buffer = vec![];
|
||||||
capnp::serialize_packed::write_message(&mut buffer, &builder)
|
capnp::serialize_packed::write_message(&mut buffer, &builder).map_err(RPCError::protocol)?;
|
||||||
.map_err(RPCError::protocol)?;
|
|
||||||
Ok(buffer)
|
Ok(buffer)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -207,8 +206,16 @@ pub struct Answer<T> {
|
|||||||
pub answer: T,
|
pub answer: T,
|
||||||
}
|
}
|
||||||
impl<T> Answer<T> {
|
impl<T> Answer<T> {
|
||||||
pub fn new(latency: TimestampDuration, reply_private_route: Option<PublicKey>, answer: T) -> Self {
|
pub fn new(
|
||||||
Self { _latency: latency, reply_private_route, answer }
|
latency: TimestampDuration,
|
||||||
|
reply_private_route: Option<PublicKey>,
|
||||||
|
answer: T,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
_latency: latency,
|
||||||
|
reply_private_route,
|
||||||
|
answer,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -395,11 +402,10 @@ impl RPCProcessor {
|
|||||||
for task_n in 0..self.unlocked_inner.concurrency {
|
for task_n in 0..self.unlocked_inner.concurrency {
|
||||||
let this = self.clone();
|
let this = self.clone();
|
||||||
let receiver = channel.1.clone();
|
let receiver = channel.1.clone();
|
||||||
let jh = spawn(&format!("rpc worker {}",task_n), Self::rpc_worker(
|
let jh = spawn(
|
||||||
this,
|
&format!("rpc worker {}", task_n),
|
||||||
inner.stop_source.as_ref().unwrap().token(),
|
Self::rpc_worker(this, inner.stop_source.as_ref().unwrap().token(), receiver),
|
||||||
receiver,
|
);
|
||||||
));
|
|
||||||
inner.worker_join_handles.push(jh);
|
inner.worker_join_handles.push(jh);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -453,11 +459,13 @@ impl RPCProcessor {
|
|||||||
|
|
||||||
/// Get waiting app call id for debugging purposes
|
/// Get waiting app call id for debugging purposes
|
||||||
pub fn get_app_call_ids(&self) -> Vec<OperationId> {
|
pub fn get_app_call_ids(&self) -> Vec<OperationId> {
|
||||||
self.unlocked_inner.waiting_app_call_table.get_operation_ids()
|
self.unlocked_inner
|
||||||
|
.waiting_app_call_table
|
||||||
|
.get_operation_ids()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Determine if a SignedNodeInfo can be placed into the specified routing domain
|
/// Determine if a SignedNodeInfo can be placed into the specified routing domain
|
||||||
#[instrument(level="trace", target="rpc", skip_all)]
|
#[instrument(level = "trace", target = "rpc", skip_all)]
|
||||||
fn verify_node_info(
|
fn verify_node_info(
|
||||||
&self,
|
&self,
|
||||||
routing_domain: RoutingDomain,
|
routing_domain: RoutingDomain,
|
||||||
@ -466,14 +474,16 @@ impl RPCProcessor {
|
|||||||
) -> bool {
|
) -> bool {
|
||||||
let routing_table = self.routing_table();
|
let routing_table = self.routing_table();
|
||||||
routing_table.signed_node_info_is_valid_in_routing_domain(routing_domain, signed_node_info)
|
routing_table.signed_node_info_is_valid_in_routing_domain(routing_domain, signed_node_info)
|
||||||
&& signed_node_info.node_info().has_all_capabilities(capabilities)
|
&& signed_node_info
|
||||||
|
.node_info()
|
||||||
|
.has_all_capabilities(capabilities)
|
||||||
}
|
}
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
/// Search the network for a single node and add it to the routing table and return the node reference
|
/// Search the network for a single node and add it to the routing table and return the node reference
|
||||||
/// If no node was found in the timeout, this returns None
|
/// If no node was found in the timeout, this returns None
|
||||||
#[instrument(level="trace", target="rpc", skip_all)]
|
#[instrument(level = "trace", target = "rpc", skip_all)]
|
||||||
async fn search_for_node_id(
|
async fn search_for_node_id(
|
||||||
&self,
|
&self,
|
||||||
node_id: TypedKey,
|
node_id: TypedKey,
|
||||||
@ -493,30 +503,29 @@ impl RPCProcessor {
|
|||||||
let call_routine = |next_node: NodeRef| {
|
let call_routine = |next_node: NodeRef| {
|
||||||
let this = self.clone();
|
let this = self.clone();
|
||||||
async move {
|
async move {
|
||||||
let v = network_result_try!(this
|
let v = network_result_try!(
|
||||||
.clone()
|
this.clone()
|
||||||
.rpc_call_find_node(
|
.rpc_call_find_node(
|
||||||
Destination::direct(next_node).with_safety(safety_selection),
|
Destination::direct(next_node).with_safety(safety_selection),
|
||||||
node_id,
|
node_id,
|
||||||
vec![],
|
vec![],
|
||||||
)
|
)
|
||||||
.await?);
|
.await?
|
||||||
|
);
|
||||||
Ok(NetworkResult::value(v.answer))
|
Ok(NetworkResult::value(v.answer))
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// Routine to call to check if we're done at each step
|
// Routine to call to check if we're done at each step
|
||||||
let check_done = |_:&[NodeRef]| {
|
let check_done = |_: &[NodeRef]| {
|
||||||
let Ok(Some(nr)) = routing_table
|
let Ok(Some(nr)) = routing_table.lookup_node_ref(node_id) else {
|
||||||
.lookup_node_ref(node_id) else {
|
|
||||||
return None;
|
return None;
|
||||||
};
|
};
|
||||||
|
|
||||||
// ensure we have some dial info for the entry already,
|
// ensure we have some dial info for the entry already,
|
||||||
// and that the node is still alive
|
// and that the node is still alive
|
||||||
// if not, we should keep looking for better info
|
// if not, we should keep looking for better info
|
||||||
if nr.state(Timestamp::now()).is_alive() &&
|
if nr.state(Timestamp::now()).is_alive() && nr.has_any_dial_info() {
|
||||||
nr.has_any_dial_info() {
|
|
||||||
return Some(nr);
|
return Some(nr);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -540,15 +549,20 @@ impl RPCProcessor {
|
|||||||
|
|
||||||
/// Search the DHT for a specific node corresponding to a key unless we have that node in our routing table already, and return the node reference
|
/// Search the DHT for a specific node corresponding to a key unless we have that node in our routing table already, and return the node reference
|
||||||
/// Note: This routine can possibly be recursive, hence the SendPinBoxFuture async form
|
/// Note: This routine can possibly be recursive, hence the SendPinBoxFuture async form
|
||||||
#[instrument(level="trace", target="rpc", skip_all)]
|
#[instrument(level = "trace", target = "rpc", skip_all)]
|
||||||
pub fn resolve_node(
|
pub fn resolve_node(
|
||||||
&self,
|
&self,
|
||||||
node_id: TypedKey,
|
node_id: TypedKey,
|
||||||
safety_selection: SafetySelection,
|
safety_selection: SafetySelection,
|
||||||
) -> SendPinBoxFuture<Result<Option<NodeRef>, RPCError>> {
|
) -> SendPinBoxFuture<Result<Option<NodeRef>, RPCError>> {
|
||||||
let this = self.clone();
|
let this = self.clone();
|
||||||
Box::pin(async move {
|
Box::pin(
|
||||||
let _guard = this.unlocked_inner.startup_lock.enter().map_err(RPCError::map_try_again("not started up"))?;
|
async move {
|
||||||
|
let _guard = this
|
||||||
|
.unlocked_inner
|
||||||
|
.startup_lock
|
||||||
|
.enter()
|
||||||
|
.map_err(RPCError::map_try_again("not started up"))?;
|
||||||
|
|
||||||
let routing_table = this.routing_table();
|
let routing_table = this.routing_table();
|
||||||
|
|
||||||
@ -560,8 +574,7 @@ impl RPCProcessor {
|
|||||||
// ensure we have some dial info for the entry already,
|
// ensure we have some dial info for the entry already,
|
||||||
// and that the node is still alive
|
// and that the node is still alive
|
||||||
// if not, we should do the find_node anyway
|
// if not, we should do the find_node anyway
|
||||||
if nr.state(Timestamp::now()).is_alive() &&
|
if nr.state(Timestamp::now()).is_alive() && nr.has_any_dial_info() {
|
||||||
nr.has_any_dial_info() {
|
|
||||||
return Ok(Some(nr));
|
return Ok(Some(nr));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -590,10 +603,12 @@ impl RPCProcessor {
|
|||||||
};
|
};
|
||||||
|
|
||||||
Ok(nr)
|
Ok(nr)
|
||||||
}.in_current_span())
|
}
|
||||||
|
.in_current_span(),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level="trace", target="rpc", skip_all)]
|
#[instrument(level = "trace", target = "rpc", skip_all)]
|
||||||
async fn wait_for_reply(
|
async fn wait_for_reply(
|
||||||
&self,
|
&self,
|
||||||
waitable_reply: WaitableReply,
|
waitable_reply: WaitableReply,
|
||||||
@ -645,17 +660,25 @@ impl RPCProcessor {
|
|||||||
if let Some(reply_private_route) = waitable_reply.reply_private_route {
|
if let Some(reply_private_route) = waitable_reply.reply_private_route {
|
||||||
match &rpcreader.header.detail {
|
match &rpcreader.header.detail {
|
||||||
RPCMessageHeaderDetail::Direct(_) => {
|
RPCMessageHeaderDetail::Direct(_) => {
|
||||||
return Err(RPCError::protocol("should have received reply over private route or stub"));
|
return Err(RPCError::protocol(
|
||||||
},
|
"should have received reply over private route or stub",
|
||||||
RPCMessageHeaderDetail::SafetyRouted(sr) => {
|
));
|
||||||
let node_id = self.routing_table.node_id(sr.direct.envelope.get_crypto_kind());
|
}
|
||||||
if node_id.value != reply_private_route {
|
RPCMessageHeaderDetail::SafetyRouted(sr) => {
|
||||||
return Err(RPCError::protocol("should have received reply from safety route to a stub"));
|
let node_id = self
|
||||||
|
.routing_table
|
||||||
|
.node_id(sr.direct.envelope.get_crypto_kind());
|
||||||
|
if node_id.value != reply_private_route {
|
||||||
|
return Err(RPCError::protocol(
|
||||||
|
"should have received reply from safety route to a stub",
|
||||||
|
));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
},
|
|
||||||
RPCMessageHeaderDetail::PrivateRouted(pr) => {
|
RPCMessageHeaderDetail::PrivateRouted(pr) => {
|
||||||
if pr.private_route != reply_private_route {
|
if pr.private_route != reply_private_route {
|
||||||
return Err(RPCError::protocol("received reply over the wrong private route"));
|
return Err(RPCError::protocol(
|
||||||
|
"received reply over the wrong private route",
|
||||||
|
));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -666,7 +689,7 @@ impl RPCProcessor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Wrap an operation with a private route inside a safety route
|
/// Wrap an operation with a private route inside a safety route
|
||||||
#[instrument(level="trace", target="rpc", skip_all)]
|
#[instrument(level = "trace", target = "rpc", skip_all)]
|
||||||
fn wrap_with_route(
|
fn wrap_with_route(
|
||||||
&self,
|
&self,
|
||||||
safety_selection: SafetySelection,
|
safety_selection: SafetySelection,
|
||||||
@ -683,12 +706,15 @@ impl RPCProcessor {
|
|||||||
let pr_pubkey = remote_private_route.public_key.value;
|
let pr_pubkey = remote_private_route.public_key.value;
|
||||||
let crypto_kind = remote_private_route.crypto_kind();
|
let crypto_kind = remote_private_route.crypto_kind();
|
||||||
let Some(vcrypto) = self.crypto.get(crypto_kind) else {
|
let Some(vcrypto) = self.crypto.get(crypto_kind) else {
|
||||||
return Err(RPCError::internal("crypto not available for selected private route"));
|
return Err(RPCError::internal(
|
||||||
|
"crypto not available for selected private route",
|
||||||
|
));
|
||||||
};
|
};
|
||||||
|
|
||||||
// Compile the safety route with the private route
|
// Compile the safety route with the private route
|
||||||
let compiled_route: CompiledRoute = network_result_try!(rss
|
let compiled_route: CompiledRoute = network_result_try!(rss
|
||||||
.compile_safety_route(safety_selection, remote_private_route).to_rpc_network_result()?);
|
.compile_safety_route(safety_selection, remote_private_route)
|
||||||
|
.to_rpc_network_result()?);
|
||||||
let sr_is_stub = compiled_route.safety_route.is_stub();
|
let sr_is_stub = compiled_route.safety_route.is_stub();
|
||||||
let sr_pubkey = compiled_route.safety_route.public_key.value;
|
let sr_pubkey = compiled_route.safety_route.public_key.value;
|
||||||
|
|
||||||
@ -741,12 +767,12 @@ impl RPCProcessor {
|
|||||||
/// Produce a byte buffer that represents the wire encoding of the entire
|
/// Produce a byte buffer that represents the wire encoding of the entire
|
||||||
/// unencrypted envelope body for a RPC message. This incorporates
|
/// unencrypted envelope body for a RPC message. This incorporates
|
||||||
/// wrapping a private and/or safety route if they are specified.
|
/// wrapping a private and/or safety route if they are specified.
|
||||||
#[instrument(level="trace", target="rpc", skip_all)]
|
#[instrument(level = "trace", target = "rpc", skip_all)]
|
||||||
fn render_operation(
|
fn render_operation(
|
||||||
&self,
|
&self,
|
||||||
dest: Destination,
|
dest: Destination,
|
||||||
operation: &RPCOperation,
|
operation: &RPCOperation,
|
||||||
) ->RPCNetworkResult<RenderedOperation> {
|
) -> RPCNetworkResult<RenderedOperation> {
|
||||||
let out: NetworkResult<RenderedOperation>;
|
let out: NetworkResult<RenderedOperation>;
|
||||||
|
|
||||||
// Encode message to a builder and make a message reader for it
|
// Encode message to a builder and make a message reader for it
|
||||||
@ -873,14 +899,17 @@ impl RPCProcessor {
|
|||||||
/// routing table caching when it is okay to do so
|
/// routing table caching when it is okay to do so
|
||||||
/// Also check target's timestamp of our own node info, to see if we should send that
|
/// Also check target's timestamp of our own node info, to see if we should send that
|
||||||
/// And send our timestamp of the target's node info so they can determine if they should update us on their next rpc
|
/// And send our timestamp of the target's node info so they can determine if they should update us on their next rpc
|
||||||
#[instrument(level="trace", target="rpc", skip_all)]
|
#[instrument(level = "trace", target = "rpc", skip_all)]
|
||||||
fn get_sender_peer_info(&self, dest: &Destination) -> SenderPeerInfo {
|
fn get_sender_peer_info(&self, dest: &Destination) -> SenderPeerInfo {
|
||||||
// Don't do this if the sender is to remain private
|
// Don't do this if the sender is to remain private
|
||||||
// Otherwise we would be attaching the original sender's identity to the final destination,
|
// Otherwise we would be attaching the original sender's identity to the final destination,
|
||||||
// thus defeating the purpose of the safety route entirely :P
|
// thus defeating the purpose of the safety route entirely :P
|
||||||
let Some(UnsafeRoutingInfo {
|
let Some(UnsafeRoutingInfo {
|
||||||
opt_node, opt_relay: _, opt_routing_domain
|
opt_node,
|
||||||
}) = dest.get_unsafe_routing_info(self.routing_table.clone()) else {
|
opt_relay: _,
|
||||||
|
opt_routing_domain,
|
||||||
|
}) = dest.get_unsafe_routing_info(self.routing_table.clone())
|
||||||
|
else {
|
||||||
return SenderPeerInfo::default();
|
return SenderPeerInfo::default();
|
||||||
};
|
};
|
||||||
let Some(node) = opt_node else {
|
let Some(node) = opt_node else {
|
||||||
@ -915,7 +944,7 @@ impl RPCProcessor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Record failure to send to node or route
|
/// Record failure to send to node or route
|
||||||
#[instrument(level="trace", target="rpc", skip_all)]
|
#[instrument(level = "trace", target = "rpc", skip_all)]
|
||||||
fn record_send_failure(
|
fn record_send_failure(
|
||||||
&self,
|
&self,
|
||||||
rpc_kind: RPCKind,
|
rpc_kind: RPCKind,
|
||||||
@ -950,7 +979,7 @@ impl RPCProcessor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Record question lost to node or route
|
/// Record question lost to node or route
|
||||||
#[instrument(level="trace", target="rpc", skip_all)]
|
#[instrument(level = "trace", target = "rpc", skip_all)]
|
||||||
fn record_question_lost(
|
fn record_question_lost(
|
||||||
&self,
|
&self,
|
||||||
send_ts: Timestamp,
|
send_ts: Timestamp,
|
||||||
@ -993,7 +1022,7 @@ impl RPCProcessor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Record success sending to node or route
|
/// Record success sending to node or route
|
||||||
#[instrument(level="trace", target="rpc", skip_all)]
|
#[instrument(level = "trace", target = "rpc", skip_all)]
|
||||||
fn record_send_success(
|
fn record_send_success(
|
||||||
&self,
|
&self,
|
||||||
rpc_kind: RPCKind,
|
rpc_kind: RPCKind,
|
||||||
@ -1037,7 +1066,7 @@ impl RPCProcessor {
|
|||||||
|
|
||||||
/// Record answer received from node or route
|
/// Record answer received from node or route
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
#[instrument(level="trace", target="rpc", skip_all)]
|
#[instrument(level = "trace", target = "rpc", skip_all)]
|
||||||
fn record_answer_received(
|
fn record_answer_received(
|
||||||
&self,
|
&self,
|
||||||
send_ts: Timestamp,
|
send_ts: Timestamp,
|
||||||
@ -1123,7 +1152,7 @@ impl RPCProcessor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Record question or statement received from node or route
|
/// Record question or statement received from node or route
|
||||||
#[instrument(level="trace", target="rpc", skip_all)]
|
#[instrument(level = "trace", target = "rpc", skip_all)]
|
||||||
fn record_question_received(&self, msg: &RPCMessage) {
|
fn record_question_received(&self, msg: &RPCMessage) {
|
||||||
let recv_ts = msg.header.timestamp;
|
let recv_ts = msg.header.timestamp;
|
||||||
let bytes = msg.header.body_len;
|
let bytes = msg.header.body_len;
|
||||||
@ -1168,7 +1197,7 @@ impl RPCProcessor {
|
|||||||
|
|
||||||
/// Issue a question over the network, possibly using an anonymized route
|
/// Issue a question over the network, possibly using an anonymized route
|
||||||
/// Optionally keeps a context to be passed to the answer processor when an answer is received
|
/// Optionally keeps a context to be passed to the answer processor when an answer is received
|
||||||
#[instrument(level="trace", target="rpc", skip_all)]
|
#[instrument(level = "trace", target = "rpc", skip_all)]
|
||||||
async fn question(
|
async fn question(
|
||||||
&self,
|
&self,
|
||||||
dest: Destination,
|
dest: Destination,
|
||||||
@ -1248,12 +1277,12 @@ impl RPCProcessor {
|
|||||||
remote_private_route,
|
remote_private_route,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
||||||
// Ref the connection so it doesn't go away until we're done with the waitable reply
|
// Ref the connection so it doesn't go away until we're done with the waitable reply
|
||||||
let opt_connection_ref_scope = send_data_method.unique_flow.connection_id.and_then(|id| self
|
let opt_connection_ref_scope = send_data_method.unique_flow.connection_id.and_then(|id| {
|
||||||
.network_manager()
|
self.network_manager()
|
||||||
.connection_manager()
|
.connection_manager()
|
||||||
.try_connection_ref_scope(id));
|
.try_connection_ref_scope(id)
|
||||||
|
});
|
||||||
|
|
||||||
// Pass back waitable reply completion
|
// Pass back waitable reply completion
|
||||||
Ok(NetworkResult::value(WaitableReply {
|
Ok(NetworkResult::value(WaitableReply {
|
||||||
@ -1270,12 +1299,8 @@ impl RPCProcessor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Issue a statement over the network, possibly using an anonymized route
|
/// Issue a statement over the network, possibly using an anonymized route
|
||||||
#[instrument(level="trace", target="rpc", skip_all)]
|
#[instrument(level = "trace", target = "rpc", skip_all)]
|
||||||
async fn statement(
|
async fn statement(&self, dest: Destination, statement: RPCStatement) -> RPCNetworkResult<()> {
|
||||||
&self,
|
|
||||||
dest: Destination,
|
|
||||||
statement: RPCStatement,
|
|
||||||
) ->RPCNetworkResult<()> {
|
|
||||||
// Get sender peer info if we should send that
|
// Get sender peer info if we should send that
|
||||||
let spi = self.get_sender_peer_info(&dest);
|
let spi = self.get_sender_peer_info(&dest);
|
||||||
|
|
||||||
@ -1342,13 +1367,8 @@ impl RPCProcessor {
|
|||||||
}
|
}
|
||||||
/// Issue a reply over the network, possibly using an anonymized route
|
/// Issue a reply over the network, possibly using an anonymized route
|
||||||
/// The request must want a response, or this routine fails
|
/// The request must want a response, or this routine fails
|
||||||
#[instrument(level="trace", target="rpc", skip_all)]
|
#[instrument(level = "trace", target = "rpc", skip_all)]
|
||||||
async fn answer(
|
async fn answer(&self, request: RPCMessage, answer: RPCAnswer) -> RPCNetworkResult<()> {
|
||||||
&self,
|
|
||||||
request: RPCMessage,
|
|
||||||
answer: RPCAnswer,
|
|
||||||
) ->RPCNetworkResult<()> {
|
|
||||||
|
|
||||||
// Extract destination from respond_to
|
// Extract destination from respond_to
|
||||||
let dest = network_result_try!(self.get_respond_to_destination(&request));
|
let dest = network_result_try!(self.get_respond_to_destination(&request));
|
||||||
|
|
||||||
@ -1420,7 +1440,7 @@ impl RPCProcessor {
|
|||||||
/// Decoding RPC from the wire
|
/// Decoding RPC from the wire
|
||||||
/// This performs a capnp decode on the data, and if it passes the capnp schema
|
/// This performs a capnp decode on the data, and if it passes the capnp schema
|
||||||
/// it performs the cryptographic validation required to pass the operation up for processing
|
/// it performs the cryptographic validation required to pass the operation up for processing
|
||||||
#[instrument(level="trace", target="rpc", skip_all)]
|
#[instrument(level = "trace", target = "rpc", skip_all)]
|
||||||
fn decode_rpc_operation(
|
fn decode_rpc_operation(
|
||||||
&self,
|
&self,
|
||||||
encoded_msg: &RPCMessageEncoded,
|
encoded_msg: &RPCMessageEncoded,
|
||||||
@ -1448,7 +1468,7 @@ impl RPCProcessor {
|
|||||||
/// caller or receiver. This does not mean the operation is 'semantically correct'. For
|
/// caller or receiver. This does not mean the operation is 'semantically correct'. For
|
||||||
/// complex operations that require stateful validation and a more robust context than
|
/// complex operations that require stateful validation and a more robust context than
|
||||||
/// 'signatures', the caller must still perform whatever validation is necessary
|
/// 'signatures', the caller must still perform whatever validation is necessary
|
||||||
#[instrument(level="trace", target="rpc", skip_all)]
|
#[instrument(level = "trace", target = "rpc", skip_all)]
|
||||||
fn validate_rpc_operation(&self, operation: &mut RPCOperation) -> Result<(), RPCError> {
|
fn validate_rpc_operation(&self, operation: &mut RPCOperation) -> Result<(), RPCError> {
|
||||||
// If this is an answer, get the question context for this answer
|
// If this is an answer, get the question context for this answer
|
||||||
// If we received an answer for a question we did not ask, this will return an error
|
// If we received an answer for a question we did not ask, this will return an error
|
||||||
@ -1473,11 +1493,8 @@ impl RPCProcessor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////
|
||||||
#[instrument(level="trace", target="rpc", skip_all)]
|
#[instrument(level = "trace", target = "rpc", skip_all)]
|
||||||
async fn process_rpc_message(
|
async fn process_rpc_message(&self, encoded_msg: RPCMessageEncoded) -> RPCNetworkResult<()> {
|
||||||
&self,
|
|
||||||
encoded_msg: RPCMessageEncoded,
|
|
||||||
) ->RPCNetworkResult<()> {
|
|
||||||
let address_filter = self.network_manager.address_filter();
|
let address_filter = self.network_manager.address_filter();
|
||||||
|
|
||||||
// Decode operation appropriately based on header detail
|
// Decode operation appropriately based on header detail
|
||||||
@ -1497,19 +1514,22 @@ impl RPCProcessor {
|
|||||||
log_rpc!(debug "Invalid RPC Operation: {}", e);
|
log_rpc!(debug "Invalid RPC Operation: {}", e);
|
||||||
|
|
||||||
// Punish nodes that send direct undecodable crap
|
// Punish nodes that send direct undecodable crap
|
||||||
address_filter.punish_node_id(sender_node_id, PunishmentReason::FailedToDecodeOperation);
|
address_filter.punish_node_id(
|
||||||
},
|
sender_node_id,
|
||||||
|
PunishmentReason::FailedToDecodeOperation,
|
||||||
|
);
|
||||||
|
}
|
||||||
// Ignored messages that should be dropped
|
// Ignored messages that should be dropped
|
||||||
RPCError::Ignore(_) | RPCError::Network(_) | RPCError::TryAgain(_) => {
|
RPCError::Ignore(_) | RPCError::Network(_) | RPCError::TryAgain(_) => {
|
||||||
log_rpc!("Dropping RPC Operation: {}", e);
|
log_rpc!("Dropping RPC Operation: {}", e);
|
||||||
},
|
}
|
||||||
// Internal errors that deserve louder logging
|
// Internal errors that deserve louder logging
|
||||||
RPCError::Unimplemented(_) | RPCError::Internal(_) => {
|
RPCError::Unimplemented(_) | RPCError::Internal(_) => {
|
||||||
log_rpc!(error "Error decoding RPC operation: {}", e);
|
log_rpc!(error "Error decoding RPC operation: {}", e);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
return Ok(NetworkResult::invalid_message(e));
|
return Ok(NetworkResult::invalid_message(e));
|
||||||
},
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// Get the routing domain this message came over
|
// Get the routing domain this message came over
|
||||||
@ -1521,7 +1541,8 @@ impl RPCProcessor {
|
|||||||
// Ensure the sender peer info is for the actual sender specified in the envelope
|
// Ensure the sender peer info is for the actual sender specified in the envelope
|
||||||
if !sender_peer_info.node_ids().contains(&sender_node_id) {
|
if !sender_peer_info.node_ids().contains(&sender_node_id) {
|
||||||
// Attempted to update peer info for the wrong node id
|
// Attempted to update peer info for the wrong node id
|
||||||
address_filter.punish_node_id(sender_node_id, PunishmentReason::WrongSenderPeerInfo);
|
address_filter
|
||||||
|
.punish_node_id(sender_node_id, PunishmentReason::WrongSenderPeerInfo);
|
||||||
return Ok(NetworkResult::invalid_message(
|
return Ok(NetworkResult::invalid_message(
|
||||||
"attempt to update peer info for non-sender node id",
|
"attempt to update peer info for non-sender node id",
|
||||||
));
|
));
|
||||||
@ -1533,10 +1554,14 @@ impl RPCProcessor {
|
|||||||
sender_peer_info.signed_node_info(),
|
sender_peer_info.signed_node_info(),
|
||||||
&[],
|
&[],
|
||||||
) {
|
) {
|
||||||
address_filter.punish_node_id(sender_node_id, PunishmentReason::FailedToVerifySenderPeerInfo);
|
address_filter.punish_node_id(
|
||||||
return Ok(NetworkResult::invalid_message(
|
sender_node_id,
|
||||||
format!("sender peerinfo has invalid peer scope: {:?}",sender_peer_info.signed_node_info())
|
PunishmentReason::FailedToVerifySenderPeerInfo,
|
||||||
));
|
);
|
||||||
|
return Ok(NetworkResult::invalid_message(format!(
|
||||||
|
"sender peerinfo has invalid peer scope: {:?}",
|
||||||
|
sender_peer_info.signed_node_info()
|
||||||
|
)));
|
||||||
}
|
}
|
||||||
opt_sender_nr = match self.routing_table().register_node_with_peer_info(
|
opt_sender_nr = match self.routing_table().register_node_with_peer_info(
|
||||||
routing_domain,
|
routing_domain,
|
||||||
@ -1545,7 +1570,10 @@ impl RPCProcessor {
|
|||||||
) {
|
) {
|
||||||
Ok(v) => Some(v),
|
Ok(v) => Some(v),
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
address_filter.punish_node_id(sender_node_id, PunishmentReason::FailedToRegisterSenderPeerInfo);
|
address_filter.punish_node_id(
|
||||||
|
sender_node_id,
|
||||||
|
PunishmentReason::FailedToRegisterSenderPeerInfo,
|
||||||
|
);
|
||||||
return Ok(NetworkResult::invalid_message(e));
|
return Ok(NetworkResult::invalid_message(e));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1664,23 +1692,24 @@ impl RPCProcessor {
|
|||||||
},
|
},
|
||||||
RPCOperationKind::Answer(_) => {
|
RPCOperationKind::Answer(_) => {
|
||||||
let op_id = msg.operation.op_id();
|
let op_id = msg.operation.op_id();
|
||||||
if let Err(e) = self.unlocked_inner
|
if let Err(e) = self
|
||||||
|
.unlocked_inner
|
||||||
.waiting_rpc_table
|
.waiting_rpc_table
|
||||||
.complete_op_waiter(op_id, msg) {
|
.complete_op_waiter(op_id, msg)
|
||||||
|
{
|
||||||
match e {
|
match e {
|
||||||
RPCError::Unimplemented(_) |
|
RPCError::Unimplemented(_) | RPCError::Internal(_) => {
|
||||||
RPCError::Internal(_) => {
|
|
||||||
log_rpc!(error "Could not complete rpc operation: id = {}: {}", op_id, e);
|
log_rpc!(error "Could not complete rpc operation: id = {}: {}", op_id, e);
|
||||||
},
|
}
|
||||||
RPCError::InvalidFormat(_) |
|
RPCError::InvalidFormat(_)
|
||||||
RPCError::Protocol(_) |
|
| RPCError::Protocol(_)
|
||||||
RPCError::Network(_) |
|
| RPCError::Network(_)
|
||||||
RPCError::TryAgain(_) => {
|
| RPCError::TryAgain(_) => {
|
||||||
log_rpc!(debug "Could not complete rpc operation: id = {}: {}", op_id, e);
|
log_rpc!(debug "Could not complete rpc operation: id = {}: {}", op_id, e);
|
||||||
},
|
}
|
||||||
RPCError::Ignore(_) => {
|
RPCError::Ignore(_) => {
|
||||||
log_rpc!("Answer late: id = {}", op_id);
|
log_rpc!("Answer late: id = {}", op_id);
|
||||||
},
|
}
|
||||||
};
|
};
|
||||||
// Don't throw an error here because it's okay if the original operation timed out
|
// Don't throw an error here because it's okay if the original operation timed out
|
||||||
}
|
}
|
||||||
@ -1716,7 +1745,7 @@ impl RPCProcessor {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level="trace", target="rpc", skip_all)]
|
#[instrument(level = "trace", target = "rpc", skip_all)]
|
||||||
pub fn enqueue_direct_message(
|
pub fn enqueue_direct_message(
|
||||||
&self,
|
&self,
|
||||||
envelope: Envelope,
|
envelope: Envelope,
|
||||||
@ -1725,7 +1754,11 @@ impl RPCProcessor {
|
|||||||
routing_domain: RoutingDomain,
|
routing_domain: RoutingDomain,
|
||||||
body: Vec<u8>,
|
body: Vec<u8>,
|
||||||
) -> EyreResult<()> {
|
) -> EyreResult<()> {
|
||||||
let _guard = self.unlocked_inner.startup_lock.enter().map_err(RPCError::map_try_again("not started up"))?;
|
let _guard = self
|
||||||
|
.unlocked_inner
|
||||||
|
.startup_lock
|
||||||
|
.enter()
|
||||||
|
.map_err(RPCError::map_try_again("not started up"))?;
|
||||||
|
|
||||||
let header = RPCMessageHeader {
|
let header = RPCMessageHeader {
|
||||||
detail: RPCMessageHeaderDetail::Direct(RPCMessageHeaderDetailDirect {
|
detail: RPCMessageHeaderDetail::Direct(RPCMessageHeaderDetailDirect {
|
||||||
@ -1756,7 +1789,7 @@ impl RPCProcessor {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level="trace", target="rpc", skip_all)]
|
#[instrument(level = "trace", target = "rpc", skip_all)]
|
||||||
fn enqueue_safety_routed_message(
|
fn enqueue_safety_routed_message(
|
||||||
&self,
|
&self,
|
||||||
direct: RPCMessageHeaderDetailDirect,
|
direct: RPCMessageHeaderDetailDirect,
|
||||||
@ -1791,7 +1824,7 @@ impl RPCProcessor {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level="trace", target="rpc", skip_all)]
|
#[instrument(level = "trace", target = "rpc", skip_all)]
|
||||||
fn enqueue_private_routed_message(
|
fn enqueue_private_routed_message(
|
||||||
&self,
|
&self,
|
||||||
direct: RPCMessageHeaderDetailDirect,
|
direct: RPCMessageHeaderDetailDirect,
|
||||||
|
@ -29,7 +29,7 @@ impl RPCProcessor {
|
|||||||
key: TypedKey,
|
key: TypedKey,
|
||||||
subkey: ValueSubkey,
|
subkey: ValueSubkey,
|
||||||
last_descriptor: Option<SignedValueDescriptor>,
|
last_descriptor: Option<SignedValueDescriptor>,
|
||||||
) ->RPCNetworkResult<Answer<GetValueAnswer>> {
|
) -> RPCNetworkResult<Answer<GetValueAnswer>> {
|
||||||
let _guard = self
|
let _guard = self
|
||||||
.unlocked_inner
|
.unlocked_inner
|
||||||
.startup_lock
|
.startup_lock
|
||||||
@ -105,31 +105,34 @@ impl RPCProcessor {
|
|||||||
|
|
||||||
let (value, peers, descriptor) = get_value_a.destructure();
|
let (value, peers, descriptor) = get_value_a.destructure();
|
||||||
if debug_target_enabled!("dht") {
|
if debug_target_enabled!("dht") {
|
||||||
let debug_string_value = value.as_ref().map(|v| {
|
let debug_string_value = value
|
||||||
format!(" len={} seq={} writer={}",
|
.as_ref()
|
||||||
|
.map(|v| {
|
||||||
|
format!(
|
||||||
|
" len={} seq={} writer={}",
|
||||||
v.value_data().data().len(),
|
v.value_data().data().len(),
|
||||||
v.value_data().seq(),
|
v.value_data().seq(),
|
||||||
v.value_data().writer(),
|
v.value_data().writer(),
|
||||||
)
|
)
|
||||||
}).unwrap_or_default();
|
})
|
||||||
|
.unwrap_or_default();
|
||||||
|
|
||||||
let debug_string_answer = format!(
|
let debug_string_answer = format!(
|
||||||
"OUT <== GetValueA({} #{}{}{} peers={}) <= {}",
|
"OUT <== GetValueA({} #{}{}{} peers={}) <= {}",
|
||||||
key,
|
key,
|
||||||
subkey,
|
subkey,
|
||||||
debug_string_value,
|
debug_string_value,
|
||||||
if descriptor.is_some() {
|
if descriptor.is_some() { " +desc" } else { "" },
|
||||||
" +desc"
|
|
||||||
} else {
|
|
||||||
""
|
|
||||||
},
|
|
||||||
peers.len(),
|
peers.len(),
|
||||||
dest
|
dest
|
||||||
);
|
);
|
||||||
|
|
||||||
log_dht!(debug "{}", debug_string_answer);
|
log_dht!(debug "{}", debug_string_answer);
|
||||||
|
|
||||||
let peer_ids:Vec<String> = peers.iter().filter_map(|p| p.node_ids().get(key.kind).map(|k| k.to_string())).collect();
|
let peer_ids: Vec<String> = peers
|
||||||
|
.iter()
|
||||||
|
.filter_map(|p| p.node_ids().get(key.kind).map(|k| k.to_string()))
|
||||||
|
.collect();
|
||||||
log_dht!(debug "Peers: {:#?}", peer_ids);
|
log_dht!(debug "Peers: {:#?}", peer_ids);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -153,7 +156,10 @@ impl RPCProcessor {
|
|||||||
if let Some(value) = &value {
|
if let Some(value) = &value {
|
||||||
tracing::Span::current().record("ret.value.data.len", value.value_data().data().len());
|
tracing::Span::current().record("ret.value.data.len", value.value_data().data().len());
|
||||||
tracing::Span::current().record("ret.value.data.seq", value.value_data().seq());
|
tracing::Span::current().record("ret.value.data.seq", value.value_data().seq());
|
||||||
tracing::Span::current().record("ret.value.data.writer", value.value_data().writer().to_string());
|
tracing::Span::current().record(
|
||||||
|
"ret.value.data.writer",
|
||||||
|
value.value_data().writer().to_string(),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
#[cfg(feature = "verbose-tracing")]
|
#[cfg(feature = "verbose-tracing")]
|
||||||
tracing::Span::current().record("ret.peers.len", peers.len());
|
tracing::Span::current().record("ret.peers.len", peers.len());
|
||||||
@ -172,11 +178,7 @@ impl RPCProcessor {
|
|||||||
////////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
#[instrument(level = "trace", target = "rpc", skip(self, msg), fields(msg.operation.op_id), ret, err)]
|
#[instrument(level = "trace", target = "rpc", skip(self, msg), fields(msg.operation.op_id), ret, err)]
|
||||||
pub(crate) async fn process_get_value_q(
|
pub(crate) async fn process_get_value_q(&self, msg: RPCMessage) -> RPCNetworkResult<()> {
|
||||||
&self,
|
|
||||||
msg: RPCMessage,
|
|
||||||
) ->RPCNetworkResult<()> {
|
|
||||||
|
|
||||||
// Ensure this never came over a private route, safety route is okay though
|
// Ensure this never came over a private route, safety route is okay though
|
||||||
match &msg.header.detail {
|
match &msg.header.detail {
|
||||||
RPCMessageHeaderDetail::Direct(_) | RPCMessageHeaderDetail::SafetyRouted(_) => {}
|
RPCMessageHeaderDetail::Direct(_) | RPCMessageHeaderDetail::SafetyRouted(_) => {}
|
||||||
@ -189,14 +191,8 @@ impl RPCProcessor {
|
|||||||
// Ignore if disabled
|
// Ignore if disabled
|
||||||
let routing_table = self.routing_table();
|
let routing_table = self.routing_table();
|
||||||
let opi = routing_table.get_own_peer_info(msg.header.routing_domain());
|
let opi = routing_table.get_own_peer_info(msg.header.routing_domain());
|
||||||
if !opi
|
if !opi.signed_node_info().node_info().has_capability(CAP_DHT) {
|
||||||
.signed_node_info()
|
return Ok(NetworkResult::service_unavailable("dht is not available"));
|
||||||
.node_info()
|
|
||||||
.has_capability(CAP_DHT)
|
|
||||||
{
|
|
||||||
return Ok(NetworkResult::service_unavailable(
|
|
||||||
"dht is not available",
|
|
||||||
));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the question
|
// Get the question
|
||||||
@ -214,18 +210,16 @@ impl RPCProcessor {
|
|||||||
|
|
||||||
// Get the nodes that we know about that are closer to the the key than our own node
|
// Get the nodes that we know about that are closer to the the key than our own node
|
||||||
let routing_table = self.routing_table();
|
let routing_table = self.routing_table();
|
||||||
let closer_to_key_peers = network_result_try!(routing_table.find_preferred_peers_closer_to_key(key, vec![CAP_DHT]));
|
let closer_to_key_peers = network_result_try!(
|
||||||
|
routing_table.find_preferred_peers_closer_to_key(key, vec![CAP_DHT])
|
||||||
|
);
|
||||||
|
|
||||||
if debug_target_enabled!("dht") {
|
if debug_target_enabled!("dht") {
|
||||||
let debug_string = format!(
|
let debug_string = format!(
|
||||||
"IN <=== GetValueQ({} #{}{}) <== {}",
|
"IN <=== GetValueQ({} #{}{}) <== {}",
|
||||||
key,
|
key,
|
||||||
subkey,
|
subkey,
|
||||||
if want_descriptor {
|
if want_descriptor { " +wantdesc" } else { "" },
|
||||||
" +wantdesc"
|
|
||||||
} else {
|
|
||||||
""
|
|
||||||
},
|
|
||||||
msg.header.direct_sender_node_id()
|
msg.header.direct_sender_node_id()
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -237,7 +231,8 @@ impl RPCProcessor {
|
|||||||
let c = self.config.get();
|
let c = self.config.get();
|
||||||
c.network.dht.set_value_count as usize
|
c.network.dht.set_value_count as usize
|
||||||
};
|
};
|
||||||
let (get_result_value, get_result_descriptor) = if closer_to_key_peers.len() >= set_value_count {
|
let (get_result_value, get_result_descriptor) =
|
||||||
|
if closer_to_key_peers.len() >= set_value_count {
|
||||||
// Not close enough
|
// Not close enough
|
||||||
(None, None)
|
(None, None)
|
||||||
} else {
|
} else {
|
||||||
@ -253,13 +248,17 @@ impl RPCProcessor {
|
|||||||
};
|
};
|
||||||
|
|
||||||
if debug_target_enabled!("dht") {
|
if debug_target_enabled!("dht") {
|
||||||
let debug_string_value = get_result_value.as_ref().map(|v| {
|
let debug_string_value = get_result_value
|
||||||
format!(" len={} seq={} writer={}",
|
.as_ref()
|
||||||
|
.map(|v| {
|
||||||
|
format!(
|
||||||
|
" len={} seq={} writer={}",
|
||||||
v.value_data().data().len(),
|
v.value_data().data().len(),
|
||||||
v.value_data().seq(),
|
v.value_data().seq(),
|
||||||
v.value_data().writer(),
|
v.value_data().writer(),
|
||||||
)
|
)
|
||||||
}).unwrap_or_default();
|
})
|
||||||
|
.unwrap_or_default();
|
||||||
|
|
||||||
let debug_string_answer = format!(
|
let debug_string_answer = format!(
|
||||||
"IN ===> GetValueA({} #{}{}{} peers={}) ==> {}",
|
"IN ===> GetValueA({} #{}{}{} peers={}) ==> {}",
|
||||||
@ -286,7 +285,10 @@ impl RPCProcessor {
|
|||||||
)?;
|
)?;
|
||||||
|
|
||||||
// Send GetValue answer
|
// Send GetValue answer
|
||||||
self.answer(msg, RPCAnswer::new(RPCAnswerDetail::GetValueA(Box::new(get_value_a))))
|
self.answer(
|
||||||
|
msg,
|
||||||
|
RPCAnswer::new(RPCAnswerDetail::GetValueA(Box::new(get_value_a))),
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -33,7 +33,7 @@ impl RPCProcessor {
|
|||||||
value: SignedValueData,
|
value: SignedValueData,
|
||||||
descriptor: SignedValueDescriptor,
|
descriptor: SignedValueDescriptor,
|
||||||
send_descriptor: bool,
|
send_descriptor: bool,
|
||||||
) ->RPCNetworkResult<Answer<SetValueAnswer>> {
|
) -> RPCNetworkResult<Answer<SetValueAnswer>> {
|
||||||
let _guard = self
|
let _guard = self
|
||||||
.unlocked_inner
|
.unlocked_inner
|
||||||
.startup_lock
|
.startup_lock
|
||||||
@ -62,11 +62,7 @@ impl RPCProcessor {
|
|||||||
subkey,
|
subkey,
|
||||||
value.value_data().data().len(),
|
value.value_data().data().len(),
|
||||||
value.value_data().writer(),
|
value.value_data().writer(),
|
||||||
if send_descriptor {
|
if send_descriptor { " +senddesc" } else { "" },
|
||||||
" +senddesc"
|
|
||||||
} else {
|
|
||||||
""
|
|
||||||
},
|
|
||||||
dest
|
dest
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -122,23 +118,22 @@ impl RPCProcessor {
|
|||||||
let (set, value, peers) = set_value_a.destructure();
|
let (set, value, peers) = set_value_a.destructure();
|
||||||
|
|
||||||
if debug_target_enabled!("dht") {
|
if debug_target_enabled!("dht") {
|
||||||
let debug_string_value = value.as_ref().map(|v| {
|
let debug_string_value = value
|
||||||
format!(" len={} writer={}",
|
.as_ref()
|
||||||
|
.map(|v| {
|
||||||
|
format!(
|
||||||
|
" len={} writer={}",
|
||||||
v.value_data().data().len(),
|
v.value_data().data().len(),
|
||||||
v.value_data().writer(),
|
v.value_data().writer(),
|
||||||
)
|
)
|
||||||
}).unwrap_or_default();
|
})
|
||||||
|
.unwrap_or_default();
|
||||||
|
|
||||||
let debug_string_answer = format!(
|
let debug_string_answer = format!(
|
||||||
"OUT <== SetValueA({} #{}{}{} peers={}) <= {}",
|
"OUT <== SetValueA({} #{}{}{} peers={}) <= {}",
|
||||||
key,
|
key,
|
||||||
subkey,
|
subkey,
|
||||||
if set {
|
if set { " +set" } else { "" },
|
||||||
" +set"
|
|
||||||
} else {
|
|
||||||
""
|
|
||||||
},
|
|
||||||
debug_string_value,
|
debug_string_value,
|
||||||
peers.len(),
|
peers.len(),
|
||||||
dest,
|
dest,
|
||||||
@ -146,7 +141,10 @@ impl RPCProcessor {
|
|||||||
|
|
||||||
log_dht!(debug "{}", debug_string_answer);
|
log_dht!(debug "{}", debug_string_answer);
|
||||||
|
|
||||||
let peer_ids:Vec<String> = peers.iter().filter_map(|p| p.node_ids().get(key.kind).map(|k| k.to_string())).collect();
|
let peer_ids: Vec<String> = peers
|
||||||
|
.iter()
|
||||||
|
.filter_map(|p| p.node_ids().get(key.kind).map(|k| k.to_string()))
|
||||||
|
.collect();
|
||||||
log_dht!(debug "Peers: {:#?}", peer_ids);
|
log_dht!(debug "Peers: {:#?}", peer_ids);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -172,7 +170,10 @@ impl RPCProcessor {
|
|||||||
if let Some(value) = &value {
|
if let Some(value) = &value {
|
||||||
tracing::Span::current().record("ret.value.data.len", value.value_data().data().len());
|
tracing::Span::current().record("ret.value.data.len", value.value_data().data().len());
|
||||||
tracing::Span::current().record("ret.value.data.seq", value.value_data().seq());
|
tracing::Span::current().record("ret.value.data.seq", value.value_data().seq());
|
||||||
tracing::Span::current().record("ret.value.data.writer", value.value_data().writer().to_string());
|
tracing::Span::current().record(
|
||||||
|
"ret.value.data.writer",
|
||||||
|
value.value_data().writer().to_string(),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
#[cfg(feature = "verbose-tracing")]
|
#[cfg(feature = "verbose-tracing")]
|
||||||
tracing::Span::current().record("ret.peers.len", peers.len());
|
tracing::Span::current().record("ret.peers.len", peers.len());
|
||||||
@ -187,23 +188,14 @@ impl RPCProcessor {
|
|||||||
////////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
#[instrument(level = "trace", target = "rpc", skip(self, msg), fields(msg.operation.op_id), ret, err)]
|
#[instrument(level = "trace", target = "rpc", skip(self, msg), fields(msg.operation.op_id), ret, err)]
|
||||||
pub(crate) async fn process_set_value_q(
|
pub(crate) async fn process_set_value_q(&self, msg: RPCMessage) -> RPCNetworkResult<()> {
|
||||||
&self,
|
|
||||||
msg: RPCMessage,
|
|
||||||
) ->RPCNetworkResult<()> {
|
|
||||||
// Ignore if disabled
|
// Ignore if disabled
|
||||||
let routing_table = self.routing_table();
|
let routing_table = self.routing_table();
|
||||||
let rss = routing_table.route_spec_store();
|
let rss = routing_table.route_spec_store();
|
||||||
|
|
||||||
let opi = routing_table.get_own_peer_info(msg.header.routing_domain());
|
let opi = routing_table.get_own_peer_info(msg.header.routing_domain());
|
||||||
if !opi
|
if !opi.signed_node_info().node_info().has_capability(CAP_DHT) {
|
||||||
.signed_node_info()
|
return Ok(NetworkResult::service_unavailable("dht is not available"));
|
||||||
.node_info()
|
|
||||||
.has_capability(CAP_DHT)
|
|
||||||
{
|
|
||||||
return Ok(NetworkResult::service_unavailable(
|
|
||||||
"dht is not available",
|
|
||||||
));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure this never came over a private route, safety route is okay though
|
// Ensure this never came over a private route, safety route is okay though
|
||||||
@ -235,7 +227,9 @@ impl RPCProcessor {
|
|||||||
|
|
||||||
// Get the nodes that we know about that are closer to the the key than our own node
|
// Get the nodes that we know about that are closer to the the key than our own node
|
||||||
let routing_table = self.routing_table();
|
let routing_table = self.routing_table();
|
||||||
let closer_to_key_peers = network_result_try!(routing_table.find_preferred_peers_closer_to_key(key, vec![CAP_DHT]));
|
let closer_to_key_peers = network_result_try!(
|
||||||
|
routing_table.find_preferred_peers_closer_to_key(key, vec![CAP_DHT])
|
||||||
|
);
|
||||||
|
|
||||||
let debug_string = format!(
|
let debug_string = format!(
|
||||||
"IN <=== SetValueQ({} #{} len={} seq={} writer={}{}) <== {}",
|
"IN <=== SetValueQ({} #{} len={} seq={} writer={}{}) <== {}",
|
||||||
@ -244,11 +238,7 @@ impl RPCProcessor {
|
|||||||
value.value_data().data().len(),
|
value.value_data().data().len(),
|
||||||
value.value_data().seq(),
|
value.value_data().seq(),
|
||||||
value.value_data().writer(),
|
value.value_data().writer(),
|
||||||
if descriptor.is_some() {
|
if descriptor.is_some() { " +desc" } else { "" },
|
||||||
" +desc"
|
|
||||||
} else {
|
|
||||||
""
|
|
||||||
},
|
|
||||||
msg.header.direct_sender_node_id()
|
msg.header.direct_sender_node_id()
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -268,7 +258,13 @@ impl RPCProcessor {
|
|||||||
// Save the subkey, creating a new record if necessary
|
// Save the subkey, creating a new record if necessary
|
||||||
let storage_manager = self.storage_manager();
|
let storage_manager = self.storage_manager();
|
||||||
let new_value = network_result_try!(storage_manager
|
let new_value = network_result_try!(storage_manager
|
||||||
.inbound_set_value(key, subkey, Arc::new(value), descriptor.map(Arc::new), target)
|
.inbound_set_value(
|
||||||
|
key,
|
||||||
|
subkey,
|
||||||
|
Arc::new(value),
|
||||||
|
descriptor.map(Arc::new),
|
||||||
|
target
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
.map_err(RPCError::internal)?);
|
.map_err(RPCError::internal)?);
|
||||||
|
|
||||||
@ -276,23 +272,23 @@ impl RPCProcessor {
|
|||||||
};
|
};
|
||||||
|
|
||||||
if debug_target_enabled!("dht") {
|
if debug_target_enabled!("dht") {
|
||||||
let debug_string_value = new_value.as_ref().map(|v| {
|
let debug_string_value = new_value
|
||||||
format!(" len={} seq={} writer={}",
|
.as_ref()
|
||||||
|
.map(|v| {
|
||||||
|
format!(
|
||||||
|
" len={} seq={} writer={}",
|
||||||
v.value_data().data().len(),
|
v.value_data().data().len(),
|
||||||
v.value_data().seq(),
|
v.value_data().seq(),
|
||||||
v.value_data().writer(),
|
v.value_data().writer(),
|
||||||
)
|
)
|
||||||
}).unwrap_or_default();
|
})
|
||||||
|
.unwrap_or_default();
|
||||||
|
|
||||||
let debug_string_answer = format!(
|
let debug_string_answer = format!(
|
||||||
"IN ===> SetValueA({} #{}{}{} peers={}) ==> {}",
|
"IN ===> SetValueA({} #{}{}{} peers={}) ==> {}",
|
||||||
key,
|
key,
|
||||||
subkey,
|
subkey,
|
||||||
if set {
|
if set { " +set" } else { "" },
|
||||||
" +set"
|
|
||||||
} else {
|
|
||||||
""
|
|
||||||
},
|
|
||||||
debug_string_value,
|
debug_string_value,
|
||||||
closer_to_key_peers.len(),
|
closer_to_key_peers.len(),
|
||||||
msg.header.direct_sender_node_id()
|
msg.header.direct_sender_node_id()
|
||||||
@ -302,10 +298,14 @@ impl RPCProcessor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Make SetValue answer
|
// Make SetValue answer
|
||||||
let set_value_a = RPCOperationSetValueA::new(set, new_value.map(|x| (*x).clone()), closer_to_key_peers)?;
|
let set_value_a =
|
||||||
|
RPCOperationSetValueA::new(set, new_value.map(|x| (*x).clone()), closer_to_key_peers)?;
|
||||||
|
|
||||||
// Send SetValue answer
|
// Send SetValue answer
|
||||||
self.answer(msg, RPCAnswer::new(RPCAnswerDetail::SetValueA(Box::new(set_value_a))))
|
self.answer(
|
||||||
|
msg,
|
||||||
|
RPCAnswer::new(RPCAnswerDetail::SetValueA(Box::new(set_value_a))),
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -177,7 +177,6 @@ impl StorageManager {
|
|||||||
// Send an update since the value changed
|
// Send an update since the value changed
|
||||||
ctx.send_partial_update = true;
|
ctx.send_partial_update = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return peers if we have some
|
// Return peers if we have some
|
||||||
log_network_result!(debug "GetValue fanout call returned peers {}", gva.answer.peers.len());
|
log_network_result!(debug "GetValue fanout call returned peers {}", gva.answer.peers.len());
|
||||||
|
|
||||||
@ -195,7 +194,7 @@ impl StorageManager {
|
|||||||
|
|
||||||
// send partial update if desired
|
// send partial update if desired
|
||||||
if ctx.send_partial_update {
|
if ctx.send_partial_update {
|
||||||
ctx.send_partial_update=false;
|
ctx.send_partial_update = false;
|
||||||
|
|
||||||
// return partial result
|
// return partial result
|
||||||
let fanout_result = FanoutResult {
|
let fanout_result = FanoutResult {
|
||||||
@ -225,7 +224,10 @@ impl StorageManager {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// Call the fanout in a spawned task
|
// Call the fanout in a spawned task
|
||||||
spawn("outbound_get_value fanout", Box::pin(async move {
|
spawn(
|
||||||
|
"outbound_get_value fanout",
|
||||||
|
Box::pin(
|
||||||
|
async move {
|
||||||
let fanout_call = FanoutCall::new(
|
let fanout_call = FanoutCall::new(
|
||||||
routing_table.clone(),
|
routing_table.clone(),
|
||||||
key,
|
key,
|
||||||
@ -271,14 +273,24 @@ impl StorageManager {
|
|||||||
})) {
|
})) {
|
||||||
log_dht!(debug "Sending GetValue result failed: {}", e);
|
log_dht!(debug "Sending GetValue result failed: {}", e);
|
||||||
}
|
}
|
||||||
}.instrument(tracing::trace_span!("outbound_get_value result"))))
|
}
|
||||||
|
.instrument(tracing::trace_span!("outbound_get_value result")),
|
||||||
|
),
|
||||||
|
)
|
||||||
.detach();
|
.detach();
|
||||||
|
|
||||||
Ok(out_rx)
|
Ok(out_rx)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "trace", target = "dht", skip_all)]
|
#[instrument(level = "trace", target = "dht", skip_all)]
|
||||||
pub(super) fn process_deferred_outbound_get_value_result_inner(&self, inner: &mut StorageManagerInner, res_rx: flume::Receiver<Result<get_value::OutboundGetValueResult, VeilidAPIError>>, key: TypedKey, subkey: ValueSubkey, last_seq: ValueSeqNum) {
|
pub(super) fn process_deferred_outbound_get_value_result_inner(
|
||||||
|
&self,
|
||||||
|
inner: &mut StorageManagerInner,
|
||||||
|
res_rx: flume::Receiver<Result<get_value::OutboundGetValueResult, VeilidAPIError>>,
|
||||||
|
key: TypedKey,
|
||||||
|
subkey: ValueSubkey,
|
||||||
|
last_seq: ValueSeqNum,
|
||||||
|
) {
|
||||||
let this = self.clone();
|
let this = self.clone();
|
||||||
inner.process_deferred_results(
|
inner.process_deferred_results(
|
||||||
res_rx,
|
res_rx,
|
||||||
@ -326,7 +338,13 @@ impl StorageManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "trace", target = "dht", skip_all)]
|
#[instrument(level = "trace", target = "dht", skip_all)]
|
||||||
pub(super) async fn process_outbound_get_value_result(&self, key: TypedKey, subkey: ValueSubkey, opt_last_seq: Option<u32>, result: get_value::OutboundGetValueResult) -> Result<Option<ValueData>, VeilidAPIError> {
|
pub(super) async fn process_outbound_get_value_result(
|
||||||
|
&self,
|
||||||
|
key: TypedKey,
|
||||||
|
subkey: ValueSubkey,
|
||||||
|
opt_last_seq: Option<u32>,
|
||||||
|
result: get_value::OutboundGetValueResult,
|
||||||
|
) -> Result<Option<ValueData>, VeilidAPIError> {
|
||||||
// See if we got a value back
|
// See if we got a value back
|
||||||
let Some(get_result_value) = result.get_result.opt_value else {
|
let Some(get_result_value) = result.get_result.opt_value else {
|
||||||
// If we got nothing back then we also had nothing beforehand, return nothing
|
// If we got nothing back then we also had nothing beforehand, return nothing
|
||||||
|
@ -145,7 +145,6 @@ impl StorageManager {
|
|||||||
// If we got a value back it should be different than the one we are setting
|
// If we got a value back it should be different than the one we are setting
|
||||||
// But in the case of a benign bug, we can just move to the next node
|
// But in the case of a benign bug, we can just move to the next node
|
||||||
if ctx.value.value_data() == value.value_data() {
|
if ctx.value.value_data() == value.value_data() {
|
||||||
|
|
||||||
ctx.value_nodes.push(next_node);
|
ctx.value_nodes.push(next_node);
|
||||||
ctx.missed_since_last_set = 0;
|
ctx.missed_since_last_set = 0;
|
||||||
|
|
||||||
@ -197,9 +196,10 @@ impl StorageManager {
|
|||||||
kind: FanoutResultKind::Partial,
|
kind: FanoutResultKind::Partial,
|
||||||
value_nodes: ctx.value_nodes.clone(),
|
value_nodes: ctx.value_nodes.clone(),
|
||||||
};
|
};
|
||||||
let out=OutboundSetValueResult {
|
let out = OutboundSetValueResult {
|
||||||
fanout_result,
|
fanout_result,
|
||||||
signed_value_data: ctx.value.clone()};
|
signed_value_data: ctx.value.clone(),
|
||||||
|
};
|
||||||
log_dht!(debug "Sending partial SetValue result: {:?}", out);
|
log_dht!(debug "Sending partial SetValue result: {:?}", out);
|
||||||
|
|
||||||
if let Err(e) = out_tx.send(Ok(out)) {
|
if let Err(e) = out_tx.send(Ok(out)) {
|
||||||
@ -224,7 +224,10 @@ impl StorageManager {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// Call the fanout in a spawned task
|
// Call the fanout in a spawned task
|
||||||
spawn("outbound_set_value fanout", Box::pin(async move {
|
spawn(
|
||||||
|
"outbound_set_value fanout",
|
||||||
|
Box::pin(
|
||||||
|
async move {
|
||||||
let fanout_call = FanoutCall::new(
|
let fanout_call = FanoutCall::new(
|
||||||
routing_table.clone(),
|
routing_table.clone(),
|
||||||
key,
|
key,
|
||||||
@ -267,16 +270,25 @@ impl StorageManager {
|
|||||||
})) {
|
})) {
|
||||||
log_dht!(debug "Sending SetValue result failed: {}", e);
|
log_dht!(debug "Sending SetValue result failed: {}", e);
|
||||||
}
|
}
|
||||||
}.instrument(tracing::trace_span!("outbound_set_value fanout routine"))))
|
}
|
||||||
|
.instrument(tracing::trace_span!("outbound_set_value fanout routine")),
|
||||||
|
),
|
||||||
|
)
|
||||||
.detach();
|
.detach();
|
||||||
|
|
||||||
Ok(out_rx)
|
Ok(out_rx)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "trace", target = "dht", skip_all)]
|
#[instrument(level = "trace", target = "dht", skip_all)]
|
||||||
pub(super) fn process_deferred_outbound_set_value_result_inner(&self, inner: &mut StorageManagerInner,
|
pub(super) fn process_deferred_outbound_set_value_result_inner(
|
||||||
|
&self,
|
||||||
|
inner: &mut StorageManagerInner,
|
||||||
res_rx: flume::Receiver<Result<set_value::OutboundSetValueResult, VeilidAPIError>>,
|
res_rx: flume::Receiver<Result<set_value::OutboundSetValueResult, VeilidAPIError>>,
|
||||||
key: TypedKey, subkey: ValueSubkey, last_value_data: ValueData, safety_selection: SafetySelection, ) {
|
key: TypedKey,
|
||||||
|
subkey: ValueSubkey,
|
||||||
|
last_value_data: ValueData,
|
||||||
|
safety_selection: SafetySelection,
|
||||||
|
) {
|
||||||
let this = self.clone();
|
let this = self.clone();
|
||||||
let last_value_data = Arc::new(Mutex::new(last_value_data));
|
let last_value_data = Arc::new(Mutex::new(last_value_data));
|
||||||
inner.process_deferred_results(
|
inner.process_deferred_results(
|
||||||
@ -336,8 +348,14 @@ impl StorageManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "trace", target = "stor", skip_all, err)]
|
#[instrument(level = "trace", target = "stor", skip_all, err)]
|
||||||
pub(super) async fn process_outbound_set_value_result(&self, key: TypedKey, subkey: ValueSubkey, last_value_data: ValueData, safety_selection: SafetySelection, result: set_value::OutboundSetValueResult) -> Result<Option<ValueData>, VeilidAPIError> {
|
pub(super) async fn process_outbound_set_value_result(
|
||||||
|
&self,
|
||||||
|
key: TypedKey,
|
||||||
|
subkey: ValueSubkey,
|
||||||
|
last_value_data: ValueData,
|
||||||
|
safety_selection: SafetySelection,
|
||||||
|
result: set_value::OutboundSetValueResult,
|
||||||
|
) -> Result<Option<ValueData>, VeilidAPIError> {
|
||||||
// Regain the lock after network access
|
// Regain the lock after network access
|
||||||
let mut inner = self.lock().await?;
|
let mut inner = self.lock().await?;
|
||||||
|
|
||||||
|
@ -210,6 +210,7 @@ pub async fn test_protect_unprotect(vcrypto: CryptoSystemVersion, ts: TableStore
|
|||||||
0, 0, 0,
|
0, 0, 0,
|
||||||
]),
|
]),
|
||||||
);
|
);
|
||||||
|
|
||||||
let dek2 = TypedSharedSecret::new(
|
let dek2 = TypedSharedSecret::new(
|
||||||
vcrypto.kind(),
|
vcrypto.kind(),
|
||||||
SharedSecret::new([
|
SharedSecret::new([
|
||||||
@ -217,13 +218,22 @@ pub async fn test_protect_unprotect(vcrypto: CryptoSystemVersion, ts: TableStore
|
|||||||
0, 0, 0xFF,
|
0, 0, 0xFF,
|
||||||
]),
|
]),
|
||||||
);
|
);
|
||||||
|
|
||||||
let dek3 = TypedSharedSecret::new(
|
let dek3 = TypedSharedSecret::new(
|
||||||
vcrypto.kind(),
|
vcrypto.kind(),
|
||||||
SharedSecret::new([0x80u8; SHARED_SECRET_LENGTH]),
|
SharedSecret::new([0x80u8; SHARED_SECRET_LENGTH]),
|
||||||
);
|
);
|
||||||
|
|
||||||
let deks = [dek1, dek2, dek3];
|
let deks = [dek1, dek2, dek3];
|
||||||
let passwords = ["", " ", " ", "12345678", "|/\\!@#$%^&*()_+", "Ⓜ️", "🔥🔥♾️"];
|
let passwords = [
|
||||||
|
"",
|
||||||
|
" ",
|
||||||
|
" ",
|
||||||
|
"12345678",
|
||||||
|
"|/\\!@#$%^&*()_+",
|
||||||
|
"Ⓜ️",
|
||||||
|
"🔥🔥♾️",
|
||||||
|
];
|
||||||
|
|
||||||
for dek in deks {
|
for dek in deks {
|
||||||
for password in passwords {
|
for password in passwords {
|
||||||
|
@ -49,7 +49,7 @@ cfg_if::cfg_if! {
|
|||||||
rt.block_on(f)
|
rt.block_on(f)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
compile_error!("needs executor implementation")
|
compile_error!("needs executor implementation");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -29,6 +29,6 @@ cfg_if! {
|
|||||||
static ref GLOBAL_RUNTIME: tokio::runtime::Runtime = tokio::runtime::Runtime::new().unwrap();
|
static ref GLOBAL_RUNTIME: tokio::runtime::Runtime = tokio::runtime::Runtime::new().unwrap();
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
compile_error!("needs executor implementation")
|
compile_error!("needs executor implementation");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -27,7 +27,7 @@ cfg_if! {
|
|||||||
use tokio::io::AsyncBufReadExt;
|
use tokio::io::AsyncBufReadExt;
|
||||||
use tokio::io::AsyncWriteExt;
|
use tokio::io::AsyncWriteExt;
|
||||||
} else {
|
} else {
|
||||||
compile_error!("needs executor implementation")
|
compile_error!("needs executor implementation");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -49,6 +49,6 @@ cfg_if! {
|
|||||||
local.block_on(&rt, f)
|
local.block_on(&rt, f)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
compile_error!("needs executor implementation")
|
compile_error!("needs executor implementation");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -20,7 +20,7 @@ cfg_if! {
|
|||||||
} else if #[cfg(feature="rt-tokio")] {
|
} else if #[cfg(feature="rt-tokio")] {
|
||||||
use netlink_sys::{TokioSocket as RTNetLinkSocket};
|
use netlink_sys::{TokioSocket as RTNetLinkSocket};
|
||||||
} else {
|
} else {
|
||||||
compile_error!("needs executor implementation")
|
compile_error!("needs executor implementation");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
use std::convert::TryInto;
|
use std::convert::TryInto;
|
||||||
|
Loading…
Reference in New Issue
Block a user