mirror of
https://gitlab.com/veilid/veilid.git
synced 2025-02-22 15:39:54 -05:00
private route work
This commit is contained in:
parent
5ae0bd834c
commit
d96b83fb4e
@ -342,7 +342,7 @@ impl AttachmentManager {
|
|||||||
#[instrument(level = "trace", skip(self))]
|
#[instrument(level = "trace", skip(self))]
|
||||||
fn attach(&self) {
|
fn attach(&self) {
|
||||||
// Create long-running connection maintenance routine
|
// Create long-running connection maintenance routine
|
||||||
let inner = self.inner.lock();
|
let mut inner = self.inner.lock();
|
||||||
if inner.attachment_maintainer_jh.is_some() {
|
if inner.attachment_maintainer_jh.is_some() {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -788,6 +788,7 @@ impl NetworkManager {
|
|||||||
pub async fn handle_private_receipt<R: AsRef<[u8]>>(
|
pub async fn handle_private_receipt<R: AsRef<[u8]>>(
|
||||||
&self,
|
&self,
|
||||||
receipt_data: R,
|
receipt_data: R,
|
||||||
|
private_route: DHTKey,
|
||||||
) -> NetworkResult<()> {
|
) -> NetworkResult<()> {
|
||||||
let receipt_manager = self.receipt_manager();
|
let receipt_manager = self.receipt_manager();
|
||||||
|
|
||||||
@ -799,7 +800,7 @@ impl NetworkManager {
|
|||||||
};
|
};
|
||||||
|
|
||||||
receipt_manager
|
receipt_manager
|
||||||
.handle_receipt(receipt, ReceiptReturned::Private)
|
.handle_receipt(receipt, ReceiptReturned::Private { private_route })
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1023,7 +1024,8 @@ impl NetworkManager {
|
|||||||
|
|
||||||
// Wait for the return receipt
|
// Wait for the return receipt
|
||||||
let inbound_nr = match eventual_value.await.take_value().unwrap() {
|
let inbound_nr = match eventual_value.await.take_value().unwrap() {
|
||||||
ReceiptEvent::ReturnedPrivate | ReceiptEvent::ReturnedOutOfBand => {
|
ReceiptEvent::ReturnedPrivate { private_route: _ }
|
||||||
|
| ReceiptEvent::ReturnedOutOfBand => {
|
||||||
return Ok(NetworkResult::invalid_message(
|
return Ok(NetworkResult::invalid_message(
|
||||||
"reverse connect receipt should be returned in-band",
|
"reverse connect receipt should be returned in-band",
|
||||||
));
|
));
|
||||||
@ -1124,7 +1126,8 @@ impl NetworkManager {
|
|||||||
|
|
||||||
// Wait for the return receipt
|
// Wait for the return receipt
|
||||||
let inbound_nr = match eventual_value.await.take_value().unwrap() {
|
let inbound_nr = match eventual_value.await.take_value().unwrap() {
|
||||||
ReceiptEvent::ReturnedPrivate | ReceiptEvent::ReturnedOutOfBand => {
|
ReceiptEvent::ReturnedPrivate { private_route: _ }
|
||||||
|
| ReceiptEvent::ReturnedOutOfBand => {
|
||||||
return Ok(NetworkResult::invalid_message(
|
return Ok(NetworkResult::invalid_message(
|
||||||
"hole punch receipt should be returned in-band",
|
"hole punch receipt should be returned in-band",
|
||||||
));
|
));
|
||||||
|
@ -125,22 +125,24 @@ impl DiscoveryContext {
|
|||||||
RoutingDomain::PublicInternet,
|
RoutingDomain::PublicInternet,
|
||||||
dial_info_filter.clone(),
|
dial_info_filter.clone(),
|
||||||
);
|
);
|
||||||
let disallow_relays_filter = move |_rti, e: &BucketEntryInner| {
|
let disallow_relays_filter = Box::new(
|
||||||
if let Some(n) = e.node_info(RoutingDomain::PublicInternet) {
|
move |rti: &RoutingTableInner, _k: DHTKey, v: Option<Arc<BucketEntry>>| {
|
||||||
n.relay_peer_info.is_none()
|
let v = v.unwrap();
|
||||||
} else {
|
v.with(rti, |_rti, e| {
|
||||||
false
|
if let Some(n) = e.node_info(RoutingDomain::PublicInternet) {
|
||||||
}
|
n.relay_peer_info.is_none()
|
||||||
};
|
} else {
|
||||||
let filter = RoutingTable::combine_entry_filters(
|
false
|
||||||
inbound_dial_info_entry_filter,
|
}
|
||||||
disallow_relays_filter,
|
})
|
||||||
);
|
},
|
||||||
|
) as RoutingTableEntryFilter;
|
||||||
|
let filters = VecDeque::from([inbound_dial_info_entry_filter, disallow_relays_filter]);
|
||||||
|
|
||||||
// Find public nodes matching this filter
|
// Find public nodes matching this filter
|
||||||
let peers = self
|
let peers = self
|
||||||
.routing_table
|
.routing_table
|
||||||
.find_fast_public_nodes_filtered(node_count, filter);
|
.find_fast_public_nodes_filtered(node_count, filters);
|
||||||
if peers.is_empty() {
|
if peers.is_empty() {
|
||||||
log_net!(
|
log_net!(
|
||||||
"no external address detection peers of type {:?}:{:?}",
|
"no external address detection peers of type {:?}:{:?}",
|
||||||
|
@ -495,7 +495,7 @@ impl NetworkManager {
|
|||||||
// even the unreliable ones, and ask them to find nodes close to our node too
|
// even the unreliable ones, and ask them to find nodes close to our node too
|
||||||
let noderefs = routing_table.find_fastest_nodes(
|
let noderefs = routing_table.find_fastest_nodes(
|
||||||
min_peer_count,
|
min_peer_count,
|
||||||
|_rti, _k, _v| true,
|
VecDeque::new(),
|
||||||
|_rti, k: DHTKey, v: Option<Arc<BucketEntry>>| {
|
|_rti, k: DHTKey, v: Option<Arc<BucketEntry>>| {
|
||||||
NodeRef::new(routing_table.clone(), k, v.unwrap().clone(), None)
|
NodeRef::new(routing_table.clone(), k, v.unwrap().clone(), None)
|
||||||
},
|
},
|
||||||
|
@ -11,7 +11,7 @@ use xx::*;
|
|||||||
pub enum ReceiptEvent {
|
pub enum ReceiptEvent {
|
||||||
ReturnedOutOfBand,
|
ReturnedOutOfBand,
|
||||||
ReturnedInBand { inbound_noderef: NodeRef },
|
ReturnedInBand { inbound_noderef: NodeRef },
|
||||||
ReturnedPrivate,
|
ReturnedPrivate { private_route: DHTKey },
|
||||||
Expired,
|
Expired,
|
||||||
Cancelled,
|
Cancelled,
|
||||||
}
|
}
|
||||||
@ -20,7 +20,7 @@ pub enum ReceiptEvent {
|
|||||||
pub enum ReceiptReturned {
|
pub enum ReceiptReturned {
|
||||||
OutOfBand,
|
OutOfBand,
|
||||||
InBand { inbound_noderef: NodeRef },
|
InBand { inbound_noderef: NodeRef },
|
||||||
Private,
|
Private { private_route: DHTKey },
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait ReceiptCallback: Send + 'static {
|
pub trait ReceiptCallback: Send + 'static {
|
||||||
@ -412,7 +412,7 @@ impl ReceiptManager {
|
|||||||
match receipt_returned {
|
match receipt_returned {
|
||||||
ReceiptReturned::OutOfBand => "OutOfBand".to_owned(),
|
ReceiptReturned::OutOfBand => "OutOfBand".to_owned(),
|
||||||
ReceiptReturned::InBand { ref inbound_noderef } => format!("InBand({})", inbound_noderef),
|
ReceiptReturned::InBand { ref inbound_noderef } => format!("InBand({})", inbound_noderef),
|
||||||
ReceiptReturned::Private => "Private".to_owned(),
|
ReceiptReturned::Private { ref private_route } => format!("Private({})", private_route),
|
||||||
},
|
},
|
||||||
if extra_data.is_empty() {
|
if extra_data.is_empty() {
|
||||||
"".to_owned()
|
"".to_owned()
|
||||||
@ -450,7 +450,9 @@ impl ReceiptManager {
|
|||||||
} => ReceiptEvent::ReturnedInBand {
|
} => ReceiptEvent::ReturnedInBand {
|
||||||
inbound_noderef: inbound_noderef.clone(),
|
inbound_noderef: inbound_noderef.clone(),
|
||||||
},
|
},
|
||||||
ReceiptReturned::Private => ReceiptEvent::ReturnedPrivate,
|
ReceiptReturned::Private { ref private_route } => ReceiptEvent::ReturnedPrivate {
|
||||||
|
private_route: private_route.clone(),
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
let callback_future = Self::perform_callback(receipt_event, &mut record_mut);
|
let callback_future = Self::perform_callback(receipt_event, &mut record_mut);
|
||||||
|
@ -25,8 +25,6 @@ pub use routing_domains::*;
|
|||||||
pub use routing_table_inner::*;
|
pub use routing_table_inner::*;
|
||||||
pub use stats_accounting::*;
|
pub use stats_accounting::*;
|
||||||
|
|
||||||
const RECENT_PEERS_TABLE_SIZE: usize = 64;
|
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
pub type LowLevelProtocolPorts = BTreeSet<(LowLevelProtocolType, AddressType, u16)>;
|
pub type LowLevelProtocolPorts = BTreeSet<(LowLevelProtocolType, AddressType, u16)>;
|
||||||
@ -36,6 +34,8 @@ pub struct LowLevelPortInfo {
|
|||||||
pub low_level_protocol_ports: LowLevelProtocolPorts,
|
pub low_level_protocol_ports: LowLevelProtocolPorts,
|
||||||
pub protocol_to_port: ProtocolToPortMapping,
|
pub protocol_to_port: ProtocolToPortMapping,
|
||||||
}
|
}
|
||||||
|
pub type RoutingTableEntryFilter =
|
||||||
|
Box<dyn for<'r> FnMut(&'r RoutingTableInner, DHTKey, Option<Arc<BucketEntry>>) -> bool + Send>;
|
||||||
|
|
||||||
#[derive(Clone, Debug, Default)]
|
#[derive(Clone, Debug, Default)]
|
||||||
pub struct RoutingTableHealth {
|
pub struct RoutingTableHealth {
|
||||||
@ -47,7 +47,7 @@ pub struct RoutingTableHealth {
|
|||||||
pub dead_entry_count: usize,
|
pub dead_entry_count: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
struct RoutingTableUnlockedInner {
|
pub(super) struct RoutingTableUnlockedInner {
|
||||||
// Accessors
|
// Accessors
|
||||||
config: VeilidConfig,
|
config: VeilidConfig,
|
||||||
network_manager: NetworkManager,
|
network_manager: NetworkManager,
|
||||||
@ -164,7 +164,7 @@ impl RoutingTable {
|
|||||||
debug!("finished route spec store init");
|
debug!("finished route spec store init");
|
||||||
|
|
||||||
let mut inner = self.inner.write();
|
let mut inner = self.inner.write();
|
||||||
inner.init(self.clone());
|
inner.init(self.clone())?;
|
||||||
|
|
||||||
inner.route_spec_store = Some(route_spec_store);
|
inner.route_spec_store = Some(route_spec_store);
|
||||||
|
|
||||||
@ -192,7 +192,9 @@ impl RoutingTable {
|
|||||||
inner.route_spec_store.take()
|
inner.route_spec_store.take()
|
||||||
};
|
};
|
||||||
if let Some(rss) = rss {
|
if let Some(rss) = rss {
|
||||||
rss.save().await;
|
if let Err(e) = rss.save().await {
|
||||||
|
error!("couldn't save route spec store: {}", e);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
debug!("shutting down routing table");
|
debug!("shutting down routing table");
|
||||||
|
|
||||||
@ -545,23 +547,30 @@ impl RoutingTable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Makes a filter that finds nodes with a matching inbound dialinfo
|
/// Makes a filter that finds nodes with a matching inbound dialinfo
|
||||||
pub fn make_inbound_dial_info_entry_filter(
|
pub fn make_inbound_dial_info_entry_filter<'a>(
|
||||||
routing_domain: RoutingDomain,
|
routing_domain: RoutingDomain,
|
||||||
dial_info_filter: DialInfoFilter,
|
dial_info_filter: DialInfoFilter,
|
||||||
) -> Box<dyn FnMut(&RoutingTableInner, &BucketEntryInner) -> bool> {
|
) -> RoutingTableEntryFilter {
|
||||||
// does it have matching public dial info?
|
// does it have matching public dial info?
|
||||||
Box::new(move |_rti, e| {
|
Box::new(move |rti, _k, e| {
|
||||||
if let Some(ni) = e.node_info(routing_domain) {
|
if let Some(e) = e {
|
||||||
if ni
|
e.with(rti, |_rti, e| {
|
||||||
.first_filtered_dial_info_detail(DialInfoDetail::NO_SORT, |did| {
|
if let Some(ni) = e.node_info(routing_domain) {
|
||||||
did.matches_filter(&dial_info_filter)
|
if ni
|
||||||
})
|
.first_filtered_dial_info_detail(DialInfoDetail::NO_SORT, |did| {
|
||||||
|
did.matches_filter(&dial_info_filter)
|
||||||
|
})
|
||||||
|
.is_some()
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
false
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
rti.first_filtered_dial_info_detail(routing_domain.into(), &dial_info_filter)
|
||||||
.is_some()
|
.is_some()
|
||||||
{
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
false
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -569,48 +578,36 @@ impl RoutingTable {
|
|||||||
pub fn make_outbound_dial_info_entry_filter(
|
pub fn make_outbound_dial_info_entry_filter(
|
||||||
routing_domain: RoutingDomain,
|
routing_domain: RoutingDomain,
|
||||||
dial_info: DialInfo,
|
dial_info: DialInfo,
|
||||||
) -> Box<dyn FnMut(&RoutingTableInner, &BucketEntryInner) -> bool> {
|
) -> RoutingTableEntryFilter {
|
||||||
// does the node's outbound capabilities match the dialinfo?
|
// does the node's outbound capabilities match the dialinfo?
|
||||||
Box::new(move |_rti, e| {
|
Box::new(move |rti, _k, e| {
|
||||||
if let Some(ni) = e.node_info(routing_domain) {
|
if let Some(e) = e {
|
||||||
let dif = DialInfoFilter::all()
|
e.with(rti, |_rti, e| {
|
||||||
.with_protocol_type_set(ni.outbound_protocols)
|
if let Some(ni) = e.node_info(routing_domain) {
|
||||||
.with_address_type_set(ni.address_types);
|
let dif = DialInfoFilter::all()
|
||||||
if dial_info.matches_filter(&dif) {
|
.with_protocol_type_set(ni.outbound_protocols)
|
||||||
return true;
|
.with_address_type_set(ni.address_types);
|
||||||
}
|
if dial_info.matches_filter(&dif) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
false
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
let dif = rti.get_outbound_dial_info_filter(routing_domain);
|
||||||
|
dial_info.matches_filter(&dif)
|
||||||
}
|
}
|
||||||
false
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Make a filter that wraps another filter
|
pub fn find_fast_public_nodes_filtered(
|
||||||
pub fn combine_entry_filters(
|
|
||||||
mut f1: Box<dyn FnMut(&RoutingTableInner, &BucketEntryInner) -> bool>,
|
|
||||||
mut f2: Box<dyn FnMut(&RoutingTableInner, &BucketEntryInner) -> bool>,
|
|
||||||
) -> Box<dyn FnMut(&RoutingTableInner, &BucketEntryInner) -> bool> {
|
|
||||||
Box::new(move |rti, e| {
|
|
||||||
if !f1(rti, e) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if !f2(rti, e) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
true
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn find_fast_public_nodes_filtered<F>(
|
|
||||||
&self,
|
&self,
|
||||||
node_count: usize,
|
node_count: usize,
|
||||||
mut entry_filter: F,
|
filters: VecDeque<RoutingTableEntryFilter>,
|
||||||
) -> Vec<NodeRef>
|
) -> Vec<NodeRef> {
|
||||||
where
|
|
||||||
F: FnMut(&RoutingTableInner, &BucketEntryInner) -> bool,
|
|
||||||
{
|
|
||||||
self.inner
|
self.inner
|
||||||
.read()
|
.read()
|
||||||
.find_fast_public_nodes_filtered(self.clone(), node_count, entry_filter)
|
.find_fast_public_nodes_filtered(self.clone(), node_count, filters)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Retrieve up to N of each type of protocol capable nodes
|
/// Retrieve up to N of each type of protocol capable nodes
|
||||||
@ -621,14 +618,12 @@ impl RoutingTable {
|
|||||||
ProtocolType::WS,
|
ProtocolType::WS,
|
||||||
ProtocolType::WSS,
|
ProtocolType::WSS,
|
||||||
];
|
];
|
||||||
|
let protocol_types_len = protocol_types.len();
|
||||||
let mut nodes_proto_v4 = vec![0usize, 0usize, 0usize, 0usize];
|
let mut nodes_proto_v4 = vec![0usize, 0usize, 0usize, 0usize];
|
||||||
let mut nodes_proto_v6 = vec![0usize, 0usize, 0usize, 0usize];
|
let mut nodes_proto_v6 = vec![0usize, 0usize, 0usize, 0usize];
|
||||||
|
|
||||||
self.find_fastest_nodes(
|
let filter = Box::new(
|
||||||
// count
|
move |rti: &RoutingTableInner, _k: DHTKey, v: Option<Arc<BucketEntry>>| {
|
||||||
protocol_types.len() * 2 * max_per_type,
|
|
||||||
// filter
|
|
||||||
move |rti, _k: DHTKey, v: Option<Arc<BucketEntry>>| {
|
|
||||||
let entry = v.unwrap();
|
let entry = v.unwrap();
|
||||||
entry.with(rti, |_rti, e| {
|
entry.with(rti, |_rti, e| {
|
||||||
// skip nodes on our local network here
|
// skip nodes on our local network here
|
||||||
@ -668,64 +663,68 @@ impl RoutingTable {
|
|||||||
.unwrap_or(false)
|
.unwrap_or(false)
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
// transform
|
) as RoutingTableEntryFilter;
|
||||||
|
|
||||||
|
let filters = VecDeque::from([filter]);
|
||||||
|
|
||||||
|
self.find_fastest_nodes(
|
||||||
|
protocol_types_len * 2 * max_per_type,
|
||||||
|
filters,
|
||||||
|_rti, k: DHTKey, v: Option<Arc<BucketEntry>>| {
|
|_rti, k: DHTKey, v: Option<Arc<BucketEntry>>| {
|
||||||
NodeRef::new(self.clone(), k, v.unwrap().clone(), None)
|
NodeRef::new(self.clone(), k, v.unwrap().clone(), None)
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_peers_with_sort_and_filter<'a, 'b, F, C, T, O>(
|
pub fn find_peers_with_sort_and_filter<C, T, O>(
|
||||||
&self,
|
&self,
|
||||||
node_count: usize,
|
node_count: usize,
|
||||||
cur_ts: u64,
|
cur_ts: u64,
|
||||||
mut filter: F,
|
filters: VecDeque<RoutingTableEntryFilter>,
|
||||||
compare: C,
|
compare: C,
|
||||||
mut transform: T,
|
transform: T,
|
||||||
) -> Vec<O>
|
) -> Vec<O>
|
||||||
where
|
where
|
||||||
F: FnMut(&'a RoutingTableInner, DHTKey, Option<Arc<BucketEntry>>) -> bool,
|
C: for<'a, 'b> FnMut(
|
||||||
C: FnMut(
|
|
||||||
&'a RoutingTableInner,
|
&'a RoutingTableInner,
|
||||||
&'b (DHTKey, Option<Arc<BucketEntry>>),
|
&'b (DHTKey, Option<Arc<BucketEntry>>),
|
||||||
&'b (DHTKey, Option<Arc<BucketEntry>>),
|
&'b (DHTKey, Option<Arc<BucketEntry>>),
|
||||||
) -> core::cmp::Ordering,
|
) -> core::cmp::Ordering,
|
||||||
T: FnMut(&'a RoutingTableInner, DHTKey, Option<Arc<BucketEntry>>) -> O,
|
T: for<'r> FnMut(&'r RoutingTableInner, DHTKey, Option<Arc<BucketEntry>>) -> O + Send,
|
||||||
{
|
{
|
||||||
self.inner
|
self.inner
|
||||||
.read()
|
.read()
|
||||||
.find_peers_with_sort_and_filter(node_count, cur_ts, filter, compare, transform)
|
.find_peers_with_sort_and_filter(node_count, cur_ts, filters, compare, transform)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_fastest_nodes<'a, T, F, O>(
|
pub fn find_fastest_nodes<'a, T, O>(
|
||||||
&self,
|
&self,
|
||||||
node_count: usize,
|
node_count: usize,
|
||||||
mut filter: F,
|
filters: VecDeque<RoutingTableEntryFilter>,
|
||||||
transform: T,
|
transform: T,
|
||||||
) -> Vec<O>
|
) -> Vec<O>
|
||||||
where
|
where
|
||||||
F: FnMut(&'a RoutingTableInner, DHTKey, Option<Arc<BucketEntry>>) -> bool,
|
T: for<'r> FnMut(&'r RoutingTableInner, DHTKey, Option<Arc<BucketEntry>>) -> O + Send,
|
||||||
T: FnMut(&'a RoutingTableInner, DHTKey, Option<Arc<BucketEntry>>) -> O,
|
|
||||||
{
|
{
|
||||||
self.inner
|
self.inner
|
||||||
.read()
|
.read()
|
||||||
.find_fastest_nodes(node_count, filter, transform)
|
.find_fastest_nodes(node_count, filters, transform)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_closest_nodes<'a, F, T, O>(
|
pub fn find_closest_nodes<'a, T, O>(
|
||||||
&self,
|
&self,
|
||||||
node_id: DHTKey,
|
node_id: DHTKey,
|
||||||
filter: F,
|
filters: VecDeque<RoutingTableEntryFilter>,
|
||||||
mut transform: T,
|
transform: T,
|
||||||
) -> Vec<O>
|
) -> Vec<O>
|
||||||
where
|
where
|
||||||
F: FnMut(&'a RoutingTableInner, DHTKey, Option<Arc<BucketEntry>>) -> bool,
|
T: for<'r> FnMut(&'r RoutingTableInner, DHTKey, Option<Arc<BucketEntry>>) -> O + Send,
|
||||||
T: FnMut(&'a RoutingTableInner, DHTKey, Option<Arc<BucketEntry>>) -> O,
|
|
||||||
{
|
{
|
||||||
self.inner
|
self.inner
|
||||||
.read()
|
.read()
|
||||||
.find_closest_nodes(node_id, filter, transform)
|
.find_closest_nodes(node_id, filters, transform)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "trace", skip(self), ret)]
|
#[instrument(level = "trace", skip(self), ret)]
|
||||||
pub fn register_find_node_answer(&self, peers: Vec<PeerInfo>) -> Vec<NodeRef> {
|
pub fn register_find_node_answer(&self, peers: Vec<PeerInfo>) -> Vec<NodeRef> {
|
||||||
let node_id = self.node_id();
|
let node_id = self.node_id();
|
||||||
|
@ -124,7 +124,7 @@ fn route_permutation_to_hop_cache(nodes: &[(DHTKey, NodeInfo)], perm: &[usize])
|
|||||||
|
|
||||||
/// number of route permutations is the number of unique orderings
|
/// number of route permutations is the number of unique orderings
|
||||||
/// for a set of nodes, given that the first node is fixed
|
/// for a set of nodes, given that the first node is fixed
|
||||||
fn get_route_permutation_count(hop_count: usize) -> usize {
|
fn _get_route_permutation_count(hop_count: usize) -> usize {
|
||||||
if hop_count == 0 {
|
if hop_count == 0 {
|
||||||
unreachable!();
|
unreachable!();
|
||||||
}
|
}
|
||||||
@ -374,39 +374,43 @@ impl RouteSpecStore {
|
|||||||
|
|
||||||
// Get list of all nodes, and sort them for selection
|
// Get list of all nodes, and sort them for selection
|
||||||
let cur_ts = intf::get_timestamp();
|
let cur_ts = intf::get_timestamp();
|
||||||
let filter = |rti, _k: DHTKey, v: Option<Arc<BucketEntry>>| -> bool {
|
let filter = Box::new(
|
||||||
// Exclude our own node from routes
|
move |rti: &RoutingTableInner, _k: DHTKey, v: Option<Arc<BucketEntry>>| -> bool {
|
||||||
if v.is_none() {
|
// Exclude our own node from routes
|
||||||
return false;
|
if v.is_none() {
|
||||||
}
|
return false;
|
||||||
let v = v.unwrap();
|
}
|
||||||
|
let v = v.unwrap();
|
||||||
|
|
||||||
// Exclude nodes on our local network
|
// Exclude nodes on our local network
|
||||||
let on_local_network = v.with(rti, |_rti, e| {
|
let on_local_network = v.with(rti, |_rti, e| {
|
||||||
e.node_info(RoutingDomain::LocalNetwork).is_some()
|
e.node_info(RoutingDomain::LocalNetwork).is_some()
|
||||||
});
|
});
|
||||||
if on_local_network {
|
if on_local_network {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Exclude nodes with no publicinternet nodeinfo, or incompatible nodeinfo or node status won't route
|
// Exclude nodes with no publicinternet nodeinfo, or incompatible nodeinfo or node status won't route
|
||||||
v.with(rti, |_rti, e| {
|
v.with(rti, move |_rti, e| {
|
||||||
let node_info_ok = if let Some(ni) = e.node_info(RoutingDomain::PublicInternet) {
|
let node_info_ok = if let Some(ni) = e.node_info(RoutingDomain::PublicInternet)
|
||||||
ni.has_sequencing_matched_dial_info(sequencing)
|
{
|
||||||
} else {
|
ni.has_sequencing_matched_dial_info(sequencing)
|
||||||
false
|
} else {
|
||||||
};
|
false
|
||||||
let node_status_ok = if let Some(ns) = e.node_status(RoutingDomain::PublicInternet)
|
};
|
||||||
{
|
let node_status_ok =
|
||||||
ns.will_route()
|
if let Some(ns) = e.node_status(RoutingDomain::PublicInternet) {
|
||||||
} else {
|
ns.will_route()
|
||||||
false
|
} else {
|
||||||
};
|
false
|
||||||
|
};
|
||||||
|
|
||||||
node_info_ok && node_status_ok
|
node_info_ok && node_status_ok
|
||||||
})
|
})
|
||||||
};
|
},
|
||||||
let compare = |rti,
|
) as RoutingTableEntryFilter;
|
||||||
|
let filters = VecDeque::from([filter]);
|
||||||
|
let compare = |rti: &RoutingTableInner,
|
||||||
v1: &(DHTKey, Option<Arc<BucketEntry>>),
|
v1: &(DHTKey, Option<Arc<BucketEntry>>),
|
||||||
v2: &(DHTKey, Option<Arc<BucketEntry>>)|
|
v2: &(DHTKey, Option<Arc<BucketEntry>>)|
|
||||||
-> Ordering {
|
-> Ordering {
|
||||||
@ -461,7 +465,10 @@ impl RouteSpecStore {
|
|||||||
});
|
});
|
||||||
cmpout
|
cmpout
|
||||||
};
|
};
|
||||||
let transform = |rti, k: DHTKey, v: Option<Arc<BucketEntry>>| -> (DHTKey, NodeInfo) {
|
let transform = |rti: &RoutingTableInner,
|
||||||
|
k: DHTKey,
|
||||||
|
v: Option<Arc<BucketEntry>>|
|
||||||
|
-> (DHTKey, NodeInfo) {
|
||||||
// Return the key and the nodeinfo for that key
|
// Return the key and the nodeinfo for that key
|
||||||
(
|
(
|
||||||
k,
|
k,
|
||||||
@ -479,7 +486,7 @@ impl RouteSpecStore {
|
|||||||
BucketEntryState::Unreliable,
|
BucketEntryState::Unreliable,
|
||||||
);
|
);
|
||||||
let nodes =
|
let nodes =
|
||||||
rti.find_peers_with_sort_and_filter(node_count, cur_ts, filter, compare, transform);
|
rti.find_peers_with_sort_and_filter(node_count, cur_ts, filters, compare, transform);
|
||||||
|
|
||||||
// If we couldn't find enough nodes, wait until we have more nodes in the routing table
|
// If we couldn't find enough nodes, wait until we have more nodes in the routing table
|
||||||
if nodes.len() < hop_count {
|
if nodes.len() < hop_count {
|
||||||
@ -606,12 +613,49 @@ impl RouteSpecStore {
|
|||||||
Ok(Some(public_key))
|
Ok(Some(public_key))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn with_route_spec_detail<F, R>(&self, public_key: &DHTKey, f: F) -> Option<R>
|
pub fn validate_signatures(
|
||||||
where
|
&self,
|
||||||
F: FnOnce(&RouteSpecDetail) -> R,
|
public_key: &DHTKey,
|
||||||
{
|
signatures: &[DHTSignature],
|
||||||
let inner = self.inner.lock();
|
data: &[u8],
|
||||||
Self::detail(&*inner, &public_key).map(f)
|
last_hop_id: DHTKey,
|
||||||
|
) -> EyreResult<Option<(DHTKeySecret, SafetySelection)>> {
|
||||||
|
let inner = &*self.inner.lock();
|
||||||
|
let rsd = Self::detail(inner, &public_key).ok_or_else(|| eyre!("route does not exist"))?;
|
||||||
|
|
||||||
|
// Ensure we have the right number of signatures
|
||||||
|
if signatures.len() != rsd.hops.len() - 1 {
|
||||||
|
// Wrong number of signatures
|
||||||
|
log_rpc!(debug "wrong number of signatures ({} should be {}) for routed operation on private route {}", signatures.len(), rsd.hops.len() - 1, public_key);
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
// Validate signatures to ensure the route was handled by the nodes and not messed with
|
||||||
|
for (hop_n, hop_public_key) in rsd.hops.iter().enumerate() {
|
||||||
|
// The last hop is not signed, as the whole packet is signed
|
||||||
|
if hop_n == signatures.len() {
|
||||||
|
// Verify the node we received the routed operation from is the last hop in our route
|
||||||
|
if *hop_public_key != last_hop_id {
|
||||||
|
log_rpc!(debug "received routed operation from the wrong hop ({} should be {}) on private route {}", hop_public_key.encode(), last_hop_id.encode(), public_key);
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Verify a signature for a hop node along the route
|
||||||
|
if let Err(e) = verify(hop_public_key, data, &signatures[hop_n]) {
|
||||||
|
log_rpc!(debug "failed to verify signature for hop {} at {} on private route {}: {}", hop_n, hop_public_key, public_key, e);
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// We got the correct signatures, return a key ans
|
||||||
|
Ok(Some((
|
||||||
|
rsd.secret_key,
|
||||||
|
SafetySelection::Safe(SafetySpec {
|
||||||
|
preferred_route: Some(*public_key),
|
||||||
|
hop_count: rsd.hops.len(),
|
||||||
|
stability: rsd.stability,
|
||||||
|
sequencing: rsd.sequencing,
|
||||||
|
}),
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn release_route(&self, public_key: DHTKey) {
|
pub fn release_route(&self, public_key: DHTKey) {
|
||||||
|
@ -2,7 +2,7 @@ use super::*;
|
|||||||
|
|
||||||
/// Mechanism required to contact another node
|
/// Mechanism required to contact another node
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub(crate) enum ContactMethod {
|
pub enum ContactMethod {
|
||||||
/// Node is not reachable by any means
|
/// Node is not reachable by any means
|
||||||
Unreachable,
|
Unreachable,
|
||||||
/// Connection should have already existed
|
/// Connection should have already existed
|
||||||
|
@ -34,7 +34,7 @@ pub struct RoutingTableInner {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl RoutingTableInner {
|
impl RoutingTableInner {
|
||||||
pub fn new(unlocked_inner: Arc<RoutingTableUnlockedInner>) -> RoutingTableInner {
|
pub(super) fn new(unlocked_inner: Arc<RoutingTableUnlockedInner>) -> RoutingTableInner {
|
||||||
RoutingTableInner {
|
RoutingTableInner {
|
||||||
unlocked_inner,
|
unlocked_inner,
|
||||||
buckets: Vec::new(),
|
buckets: Vec::new(),
|
||||||
@ -794,22 +794,16 @@ impl RoutingTableInner {
|
|||||||
// Find Nodes
|
// Find Nodes
|
||||||
|
|
||||||
// Retrieve the fastest nodes in the routing table matching an entry filter
|
// Retrieve the fastest nodes in the routing table matching an entry filter
|
||||||
pub fn find_fast_public_nodes_filtered<F>(
|
pub fn find_fast_public_nodes_filtered(
|
||||||
&self,
|
&self,
|
||||||
outer_self: RoutingTable,
|
outer_self: RoutingTable,
|
||||||
node_count: usize,
|
node_count: usize,
|
||||||
mut entry_filter: F,
|
mut filters: VecDeque<RoutingTableEntryFilter>,
|
||||||
) -> Vec<NodeRef>
|
) -> Vec<NodeRef> {
|
||||||
where
|
let public_node_filter = Box::new(
|
||||||
F: FnMut(&RoutingTableInner, &BucketEntryInner) -> bool,
|
|rti: &RoutingTableInner, _k: DHTKey, v: Option<Arc<BucketEntry>>| {
|
||||||
{
|
|
||||||
self.find_fastest_nodes(
|
|
||||||
// count
|
|
||||||
node_count,
|
|
||||||
// filter
|
|
||||||
|rti, _k: DHTKey, v: Option<Arc<BucketEntry>>| {
|
|
||||||
let entry = v.unwrap();
|
let entry = v.unwrap();
|
||||||
entry.with(rti, |rti, e| {
|
entry.with(rti, |_rti, e| {
|
||||||
// skip nodes on local network
|
// skip nodes on local network
|
||||||
if e.node_info(RoutingDomain::LocalNetwork).is_some() {
|
if e.node_info(RoutingDomain::LocalNetwork).is_some() {
|
||||||
return false;
|
return false;
|
||||||
@ -818,12 +812,16 @@ impl RoutingTableInner {
|
|||||||
if e.node_info(RoutingDomain::PublicInternet).is_none() {
|
if e.node_info(RoutingDomain::PublicInternet).is_none() {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
// skip nodes that dont match entry filter
|
true
|
||||||
entry_filter(rti, e)
|
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
// transform
|
) as RoutingTableEntryFilter;
|
||||||
|_rti, k: DHTKey, v: Option<Arc<BucketEntry>>| {
|
filters.push_front(public_node_filter);
|
||||||
|
|
||||||
|
self.find_fastest_nodes(
|
||||||
|
node_count,
|
||||||
|
filters,
|
||||||
|
|_rti: &RoutingTableInner, k: DHTKey, v: Option<Arc<BucketEntry>>| {
|
||||||
NodeRef::new(outer_self.clone(), k, v.unwrap().clone(), None)
|
NodeRef::new(outer_self.clone(), k, v.unwrap().clone(), None)
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
@ -858,37 +856,42 @@ impl RoutingTableInner {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_peers_with_sort_and_filter<'a, 'b, F, C, T, O>(
|
pub fn find_peers_with_sort_and_filter<C, T, O>(
|
||||||
&self,
|
&self,
|
||||||
node_count: usize,
|
node_count: usize,
|
||||||
cur_ts: u64,
|
cur_ts: u64,
|
||||||
mut filter: F,
|
mut filters: VecDeque<RoutingTableEntryFilter>,
|
||||||
compare: C,
|
mut compare: C,
|
||||||
mut transform: T,
|
mut transform: T,
|
||||||
) -> Vec<O>
|
) -> Vec<O>
|
||||||
where
|
where
|
||||||
F: FnMut(&'a RoutingTableInner, DHTKey, Option<Arc<BucketEntry>>) -> bool,
|
C: for<'a, 'b> FnMut(
|
||||||
C: FnMut(
|
|
||||||
&'a RoutingTableInner,
|
&'a RoutingTableInner,
|
||||||
&'b (DHTKey, Option<Arc<BucketEntry>>),
|
&'b (DHTKey, Option<Arc<BucketEntry>>),
|
||||||
&'b (DHTKey, Option<Arc<BucketEntry>>),
|
&'b (DHTKey, Option<Arc<BucketEntry>>),
|
||||||
) -> core::cmp::Ordering,
|
) -> core::cmp::Ordering,
|
||||||
T: FnMut(&'a RoutingTableInner, DHTKey, Option<Arc<BucketEntry>>) -> O,
|
T: for<'r> FnMut(&'r RoutingTableInner, DHTKey, Option<Arc<BucketEntry>>) -> O,
|
||||||
{
|
{
|
||||||
// collect all the nodes for sorting
|
// collect all the nodes for sorting
|
||||||
let mut nodes =
|
let mut nodes =
|
||||||
Vec::<(DHTKey, Option<Arc<BucketEntry>>)>::with_capacity(self.bucket_entry_count + 1);
|
Vec::<(DHTKey, Option<Arc<BucketEntry>>)>::with_capacity(self.bucket_entry_count + 1);
|
||||||
|
|
||||||
// add our own node (only one of there with the None entry)
|
// add our own node (only one of there with the None entry)
|
||||||
if filter(self, self.unlocked_inner.node_id, None) {
|
for filter in &mut filters {
|
||||||
nodes.push((self.unlocked_inner.node_id, None));
|
if filter(self, self.unlocked_inner.node_id, None) {
|
||||||
|
nodes.push((self.unlocked_inner.node_id, None));
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// add all nodes from buckets
|
// add all nodes from buckets
|
||||||
self.with_entries(cur_ts, BucketEntryState::Unreliable, |rti, k, v| {
|
self.with_entries(cur_ts, BucketEntryState::Unreliable, |rti, k, v| {
|
||||||
// Apply filter
|
// Apply filter
|
||||||
if filter(rti, k, Some(v.clone())) {
|
for filter in &mut filters {
|
||||||
nodes.push((k, Some(v.clone())));
|
if filter(rti, k, Some(v.clone())) {
|
||||||
|
nodes.push((k, Some(v.clone())));
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
Option::<()>::None
|
Option::<()>::None
|
||||||
});
|
});
|
||||||
@ -907,97 +910,99 @@ impl RoutingTableInner {
|
|||||||
out
|
out
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_fastest_nodes<'a, T, F, O>(
|
pub fn find_fastest_nodes<T, O>(
|
||||||
&self,
|
&self,
|
||||||
node_count: usize,
|
node_count: usize,
|
||||||
mut filter: F,
|
mut filters: VecDeque<RoutingTableEntryFilter>,
|
||||||
transform: T,
|
transform: T,
|
||||||
) -> Vec<O>
|
) -> Vec<O>
|
||||||
where
|
where
|
||||||
F: FnMut(&'a RoutingTableInner, DHTKey, Option<Arc<BucketEntry>>) -> bool,
|
T: for<'r> FnMut(&'r RoutingTableInner, DHTKey, Option<Arc<BucketEntry>>) -> O,
|
||||||
T: FnMut(&'a RoutingTableInner, DHTKey, Option<Arc<BucketEntry>>) -> O,
|
|
||||||
{
|
{
|
||||||
let cur_ts = intf::get_timestamp();
|
let cur_ts = intf::get_timestamp();
|
||||||
let out = self.find_peers_with_sort_and_filter(
|
|
||||||
node_count,
|
// Add filter to remove dead nodes always
|
||||||
cur_ts,
|
let filter_dead = Box::new(
|
||||||
// filter
|
move |rti: &RoutingTableInner, _k: DHTKey, v: Option<Arc<BucketEntry>>| {
|
||||||
|rti, k, v| {
|
|
||||||
if let Some(entry) = &v {
|
if let Some(entry) = &v {
|
||||||
// always filter out dead nodes
|
// always filter out dead nodes
|
||||||
if entry.with(rti, |_rti, e| e.state(cur_ts) == BucketEntryState::Dead) {
|
if entry.with(rti, |_rti, e| e.state(cur_ts) == BucketEntryState::Dead) {
|
||||||
false
|
false
|
||||||
} else {
|
} else {
|
||||||
filter(rti, k, v)
|
true
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// always filter out self peer, as it is irrelevant to the 'fastest nodes' search
|
// always filter out self peer, as it is irrelevant to the 'fastest nodes' search
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
// sort
|
) as RoutingTableEntryFilter;
|
||||||
|rti, (a_key, a_entry), (b_key, b_entry)| {
|
filters.push_front(filter_dead);
|
||||||
// same nodes are always the same
|
|
||||||
if a_key == b_key {
|
|
||||||
return core::cmp::Ordering::Equal;
|
|
||||||
}
|
|
||||||
// our own node always comes last (should not happen, here for completeness)
|
|
||||||
if a_entry.is_none() {
|
|
||||||
return core::cmp::Ordering::Greater;
|
|
||||||
}
|
|
||||||
if b_entry.is_none() {
|
|
||||||
return core::cmp::Ordering::Less;
|
|
||||||
}
|
|
||||||
// reliable nodes come first
|
|
||||||
let ae = a_entry.as_ref().unwrap();
|
|
||||||
let be = b_entry.as_ref().unwrap();
|
|
||||||
ae.with(rti, |rti, ae| {
|
|
||||||
be.with(rti, |_rti, be| {
|
|
||||||
let ra = ae.check_reliable(cur_ts);
|
|
||||||
let rb = be.check_reliable(cur_ts);
|
|
||||||
if ra != rb {
|
|
||||||
if ra {
|
|
||||||
return core::cmp::Ordering::Less;
|
|
||||||
} else {
|
|
||||||
return core::cmp::Ordering::Greater;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// latency is the next metric, closer nodes first
|
// Fastest sort
|
||||||
let a_latency = match ae.peer_stats().latency.as_ref() {
|
let sort = |rti: &RoutingTableInner,
|
||||||
None => {
|
(a_key, a_entry): &(DHTKey, Option<Arc<BucketEntry>>),
|
||||||
// treat unknown latency as slow
|
(b_key, b_entry): &(DHTKey, Option<Arc<BucketEntry>>)| {
|
||||||
return core::cmp::Ordering::Greater;
|
// same nodes are always the same
|
||||||
}
|
if a_key == b_key {
|
||||||
Some(l) => l,
|
return core::cmp::Ordering::Equal;
|
||||||
};
|
}
|
||||||
let b_latency = match be.peer_stats().latency.as_ref() {
|
// our own node always comes last (should not happen, here for completeness)
|
||||||
None => {
|
if a_entry.is_none() {
|
||||||
// treat unknown latency as slow
|
return core::cmp::Ordering::Greater;
|
||||||
return core::cmp::Ordering::Less;
|
}
|
||||||
}
|
if b_entry.is_none() {
|
||||||
Some(l) => l,
|
return core::cmp::Ordering::Less;
|
||||||
};
|
}
|
||||||
// Sort by average latency
|
// reliable nodes come first
|
||||||
a_latency.average.cmp(&b_latency.average)
|
let ae = a_entry.as_ref().unwrap();
|
||||||
})
|
let be = b_entry.as_ref().unwrap();
|
||||||
|
ae.with(rti, |rti, ae| {
|
||||||
|
be.with(rti, |_rti, be| {
|
||||||
|
let ra = ae.check_reliable(cur_ts);
|
||||||
|
let rb = be.check_reliable(cur_ts);
|
||||||
|
if ra != rb {
|
||||||
|
if ra {
|
||||||
|
return core::cmp::Ordering::Less;
|
||||||
|
} else {
|
||||||
|
return core::cmp::Ordering::Greater;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// latency is the next metric, closer nodes first
|
||||||
|
let a_latency = match ae.peer_stats().latency.as_ref() {
|
||||||
|
None => {
|
||||||
|
// treat unknown latency as slow
|
||||||
|
return core::cmp::Ordering::Greater;
|
||||||
|
}
|
||||||
|
Some(l) => l,
|
||||||
|
};
|
||||||
|
let b_latency = match be.peer_stats().latency.as_ref() {
|
||||||
|
None => {
|
||||||
|
// treat unknown latency as slow
|
||||||
|
return core::cmp::Ordering::Less;
|
||||||
|
}
|
||||||
|
Some(l) => l,
|
||||||
|
};
|
||||||
|
// Sort by average latency
|
||||||
|
a_latency.average.cmp(&b_latency.average)
|
||||||
})
|
})
|
||||||
},
|
})
|
||||||
// transform,
|
};
|
||||||
transform,
|
|
||||||
);
|
let out =
|
||||||
|
self.find_peers_with_sort_and_filter(node_count, cur_ts, filters, sort, transform);
|
||||||
out
|
out
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_closest_nodes<'a, F, T, O>(
|
pub fn find_closest_nodes<T, O>(
|
||||||
&self,
|
&self,
|
||||||
node_id: DHTKey,
|
node_id: DHTKey,
|
||||||
filter: F,
|
filters: VecDeque<RoutingTableEntryFilter>,
|
||||||
mut transform: T,
|
transform: T,
|
||||||
) -> Vec<O>
|
) -> Vec<O>
|
||||||
where
|
where
|
||||||
F: FnMut(&'a RoutingTableInner, DHTKey, Option<Arc<BucketEntry>>) -> bool,
|
T: for<'r> FnMut(&'r RoutingTableInner, DHTKey, Option<Arc<BucketEntry>>) -> O,
|
||||||
T: FnMut(&'a RoutingTableInner, DHTKey, Option<Arc<BucketEntry>>) -> O,
|
|
||||||
{
|
{
|
||||||
let cur_ts = intf::get_timestamp();
|
let cur_ts = intf::get_timestamp();
|
||||||
let node_count = {
|
let node_count = {
|
||||||
@ -1005,41 +1010,39 @@ impl RoutingTableInner {
|
|||||||
let c = config.get();
|
let c = config.get();
|
||||||
c.network.dht.max_find_node_count as usize
|
c.network.dht.max_find_node_count as usize
|
||||||
};
|
};
|
||||||
let out = self.find_peers_with_sort_and_filter(
|
|
||||||
node_count,
|
|
||||||
cur_ts,
|
|
||||||
// filter
|
|
||||||
filter,
|
|
||||||
// sort
|
|
||||||
|rti, (a_key, a_entry), (b_key, b_entry)| {
|
|
||||||
// same nodes are always the same
|
|
||||||
if a_key == b_key {
|
|
||||||
return core::cmp::Ordering::Equal;
|
|
||||||
}
|
|
||||||
|
|
||||||
// reliable nodes come first, pessimistically treating our own node as unreliable
|
// closest sort
|
||||||
let ra = a_entry
|
let sort = |rti: &RoutingTableInner,
|
||||||
.as_ref()
|
(a_key, a_entry): &(DHTKey, Option<Arc<BucketEntry>>),
|
||||||
.map_or(false, |x| x.with(rti, |_rti, x| x.check_reliable(cur_ts)));
|
(b_key, b_entry): &(DHTKey, Option<Arc<BucketEntry>>)| {
|
||||||
let rb = b_entry
|
// same nodes are always the same
|
||||||
.as_ref()
|
if a_key == b_key {
|
||||||
.map_or(false, |x| x.with(rti, |_rti, x| x.check_reliable(cur_ts)));
|
return core::cmp::Ordering::Equal;
|
||||||
if ra != rb {
|
}
|
||||||
if ra {
|
|
||||||
return core::cmp::Ordering::Less;
|
|
||||||
} else {
|
|
||||||
return core::cmp::Ordering::Greater;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// distance is the next metric, closer nodes first
|
// reliable nodes come first, pessimistically treating our own node as unreliable
|
||||||
let da = distance(a_key, &node_id);
|
let ra = a_entry
|
||||||
let db = distance(b_key, &node_id);
|
.as_ref()
|
||||||
da.cmp(&db)
|
.map_or(false, |x| x.with(rti, |_rti, x| x.check_reliable(cur_ts)));
|
||||||
},
|
let rb = b_entry
|
||||||
// transform,
|
.as_ref()
|
||||||
&mut transform,
|
.map_or(false, |x| x.with(rti, |_rti, x| x.check_reliable(cur_ts)));
|
||||||
);
|
if ra != rb {
|
||||||
|
if ra {
|
||||||
|
return core::cmp::Ordering::Less;
|
||||||
|
} else {
|
||||||
|
return core::cmp::Ordering::Greater;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// distance is the next metric, closer nodes first
|
||||||
|
let da = distance(a_key, &node_id);
|
||||||
|
let db = distance(b_key, &node_id);
|
||||||
|
da.cmp(&db)
|
||||||
|
};
|
||||||
|
|
||||||
|
let out =
|
||||||
|
self.find_peers_with_sort_and_filter(node_count, cur_ts, filters, sort, transform);
|
||||||
log_rtab!(">> find_closest_nodes: node count = {}", out.len());
|
log_rtab!(">> find_closest_nodes: node count = {}", out.len());
|
||||||
out
|
out
|
||||||
}
|
}
|
||||||
|
@ -171,7 +171,6 @@ pub struct RPCProcessorUnlockedInner {
|
|||||||
queue_size: u32,
|
queue_size: u32,
|
||||||
concurrency: u32,
|
concurrency: u32,
|
||||||
max_route_hop_count: usize,
|
max_route_hop_count: usize,
|
||||||
default_route_hop_count: usize,
|
|
||||||
validate_dial_info_receipt_time_ms: u32,
|
validate_dial_info_receipt_time_ms: u32,
|
||||||
update_callback: UpdateCallback,
|
update_callback: UpdateCallback,
|
||||||
waiting_rpc_table: OperationWaiter<RPCMessage>,
|
waiting_rpc_table: OperationWaiter<RPCMessage>,
|
||||||
@ -208,7 +207,6 @@ impl RPCProcessor {
|
|||||||
let queue_size = c.network.rpc.queue_size;
|
let queue_size = c.network.rpc.queue_size;
|
||||||
let timeout = ms_to_us(c.network.rpc.timeout_ms);
|
let timeout = ms_to_us(c.network.rpc.timeout_ms);
|
||||||
let max_route_hop_count = c.network.rpc.max_route_hop_count as usize;
|
let max_route_hop_count = c.network.rpc.max_route_hop_count as usize;
|
||||||
let default_route_hop_count = c.network.rpc.default_route_hop_count as usize;
|
|
||||||
if concurrency == 0 {
|
if concurrency == 0 {
|
||||||
concurrency = intf::get_concurrency() / 2;
|
concurrency = intf::get_concurrency() / 2;
|
||||||
if concurrency == 0 {
|
if concurrency == 0 {
|
||||||
@ -222,7 +220,6 @@ impl RPCProcessor {
|
|||||||
queue_size,
|
queue_size,
|
||||||
concurrency,
|
concurrency,
|
||||||
max_route_hop_count,
|
max_route_hop_count,
|
||||||
default_route_hop_count,
|
|
||||||
validate_dial_info_receipt_time_ms,
|
validate_dial_info_receipt_time_ms,
|
||||||
update_callback,
|
update_callback,
|
||||||
waiting_rpc_table: OperationWaiter::new(),
|
waiting_rpc_table: OperationWaiter::new(),
|
||||||
@ -418,7 +415,7 @@ impl RPCProcessor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Wrap an operation with a private route inside a safety route
|
// Wrap an operation with a private route inside a safety route
|
||||||
pub(super) fn wrap_with_route(
|
fn wrap_with_route(
|
||||||
&self,
|
&self,
|
||||||
safety_selection: SafetySelection,
|
safety_selection: SafetySelection,
|
||||||
private_route: PrivateRoute,
|
private_route: PrivateRoute,
|
||||||
@ -540,6 +537,7 @@ impl RPCProcessor {
|
|||||||
match safety_selection {
|
match safety_selection {
|
||||||
SafetySelection::Unsafe(sequencing) => {
|
SafetySelection::Unsafe(sequencing) => {
|
||||||
// Apply safety selection sequencing requirement if it is more strict than the node_ref's sequencing requirement
|
// Apply safety selection sequencing requirement if it is more strict than the node_ref's sequencing requirement
|
||||||
|
let mut node_ref = node_ref.clone();
|
||||||
if sequencing > node_ref.sequencing() {
|
if sequencing > node_ref.sequencing() {
|
||||||
node_ref.set_sequencing(sequencing)
|
node_ref.set_sequencing(sequencing)
|
||||||
}
|
}
|
||||||
|
@ -62,22 +62,26 @@ impl RPCProcessor {
|
|||||||
|
|
||||||
// add node information for the requesting node to our routing table
|
// add node information for the requesting node to our routing table
|
||||||
let routing_table = self.routing_table();
|
let routing_table = self.routing_table();
|
||||||
let network_manager = self.network_manager();
|
|
||||||
let has_valid_own_node_info =
|
let has_valid_own_node_info =
|
||||||
routing_table.has_valid_own_node_info(RoutingDomain::PublicInternet);
|
routing_table.has_valid_own_node_info(RoutingDomain::PublicInternet);
|
||||||
let own_peer_info = routing_table.get_own_peer_info(RoutingDomain::PublicInternet);
|
let own_peer_info = routing_table.get_own_peer_info(RoutingDomain::PublicInternet);
|
||||||
|
|
||||||
// find N nodes closest to the target node in our routing table
|
// find N nodes closest to the target node in our routing table
|
||||||
let closest_nodes = routing_table.find_closest_nodes(
|
|
||||||
find_node_q.node_id,
|
let filter = Box::new(
|
||||||
// filter
|
move |rti: &RoutingTableInner, _k: DHTKey, v: Option<Arc<BucketEntry>>| {
|
||||||
|rti, _k, v| {
|
|
||||||
rti.filter_has_valid_signed_node_info(
|
rti.filter_has_valid_signed_node_info(
|
||||||
RoutingDomain::PublicInternet,
|
RoutingDomain::PublicInternet,
|
||||||
has_valid_own_node_info,
|
has_valid_own_node_info,
|
||||||
v,
|
v,
|
||||||
)
|
)
|
||||||
},
|
},
|
||||||
|
) as RoutingTableEntryFilter;
|
||||||
|
let filters = VecDeque::from([filter]);
|
||||||
|
|
||||||
|
let closest_nodes = routing_table.find_closest_nodes(
|
||||||
|
find_node_q.node_id,
|
||||||
|
filters,
|
||||||
// transform
|
// transform
|
||||||
|rti, k, v| {
|
|rti, k, v| {
|
||||||
rti.transform_to_peer_info(
|
rti.transform_to_peer_info(
|
||||||
|
@ -45,7 +45,7 @@ impl RPCProcessor {
|
|||||||
RPCMessageHeaderDetail::PrivateRoute(detail) => {
|
RPCMessageHeaderDetail::PrivateRoute(detail) => {
|
||||||
network_result_value_or_log!(debug
|
network_result_value_or_log!(debug
|
||||||
network_manager
|
network_manager
|
||||||
.handle_private_receipt(receipt)
|
.handle_private_receipt(receipt, detail.private_route)
|
||||||
.await => {}
|
.await => {}
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -121,7 +121,8 @@ impl RPCProcessor {
|
|||||||
if next_private_route.hop_count != 0 {
|
if next_private_route.hop_count != 0 {
|
||||||
let node_id = self.routing_table.node_id();
|
let node_id = self.routing_table.node_id();
|
||||||
let node_id_secret = self.routing_table.node_id_secret();
|
let node_id_secret = self.routing_table.node_id_secret();
|
||||||
let sig = sign(&node_id, &node_id_secret, &route.operation.data).map_err(RPCError::internal)?;
|
let sig = sign(&node_id, &node_id_secret, &route.operation.data)
|
||||||
|
.map_err(RPCError::internal)?;
|
||||||
route.operation.signatures.push(sig);
|
route.operation.signatures.push(sig);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -169,14 +170,16 @@ impl RPCProcessor {
|
|||||||
|
|
||||||
// If the private route public key is our node id, then this was sent via safety route to our node directly
|
// If the private route public key is our node id, then this was sent via safety route to our node directly
|
||||||
// so there will be no signatures to validate
|
// so there will be no signatures to validate
|
||||||
let opt_pr_info = if private_route.public_key == self.routing_table.node_id() {
|
let (secret_key, safety_selection) = if private_route.public_key
|
||||||
|
== self.routing_table.node_id()
|
||||||
|
{
|
||||||
// The private route was a stub
|
// The private route was a stub
|
||||||
// Return our secret key and an appropriate safety selection
|
// Return our secret key and an appropriate safety selection
|
||||||
//
|
//
|
||||||
// Note: it is important that we never respond with a safety route to questions that come
|
// Note: it is important that we never respond with a safety route to questions that come
|
||||||
// in without a private route. Giving away a safety route when the node id is known is
|
// in without a private route. Giving away a safety route when the node id is known is
|
||||||
// a privacy violation!
|
// a privacy violation!
|
||||||
|
|
||||||
// Get sequencing preference
|
// Get sequencing preference
|
||||||
let sequencing = if detail
|
let sequencing = if detail
|
||||||
.connection_descriptor
|
.connection_descriptor
|
||||||
@ -187,65 +190,25 @@ impl RPCProcessor {
|
|||||||
} else {
|
} else {
|
||||||
Sequencing::NoPreference
|
Sequencing::NoPreference
|
||||||
};
|
};
|
||||||
Some((
|
(
|
||||||
self.routing_table.node_id_secret(),
|
self.routing_table.node_id_secret(),
|
||||||
SafetySelection::Unsafe(sequencing),
|
SafetySelection::Unsafe(sequencing),
|
||||||
))
|
)
|
||||||
} else {
|
} else {
|
||||||
// Get sender id
|
// Get sender id
|
||||||
let sender_id = detail.envelope.get_sender_id();
|
let sender_id = detail.envelope.get_sender_id();
|
||||||
|
|
||||||
// Look up the private route and ensure it's one in our spec store
|
// Look up the private route and ensure it's one in our spec store
|
||||||
let rss= self.routing_table.route_spec_store();
|
let rss = self.routing_table.route_spec_store();
|
||||||
let opt_signatures_valid = rss.with_route_spec_detail(&private_route.public_key, |rsd| {
|
rss.validate_signatures(
|
||||||
// Ensure we have the right number of signatures
|
&private_route.public_key,
|
||||||
if routed_operation.signatures.len() != rsd.hops.len() - 1 {
|
&routed_operation.signatures,
|
||||||
// Wrong number of signatures
|
&routed_operation.data,
|
||||||
log_rpc!(debug "wrong number of signatures ({} should be {}) for routed operation on private route {}", routed_operation.signatures.len(), rsd.hops.len() - 1, private_route.public_key);
|
sender_id,
|
||||||
return None;
|
)
|
||||||
}
|
.map_err(RPCError::protocol)?
|
||||||
// Validate signatures to ensure the route was handled by the nodes and not messed with
|
.ok_or_else(|| RPCError::protocol("signatures did not validate for private route"))?
|
||||||
for (hop_n, hop_public_key) in rsd.hops.iter().enumerate() {
|
|
||||||
// The last hop is not signed, as the whole packet is signed
|
|
||||||
if hop_n == routed_operation.signatures.len() {
|
|
||||||
// Verify the node we received the routed operation from is the last hop in our route
|
|
||||||
if *hop_public_key != sender_id {
|
|
||||||
log_rpc!(debug "received routed operation from the wrong hop ({} should be {}) on private route {}", hop_public_key.encode(), sender_id.encode(), private_route.public_key);
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Verify a signature for a hop node along the route
|
|
||||||
if let Err(e) = verify(
|
|
||||||
hop_public_key,
|
|
||||||
&routed_operation.data,
|
|
||||||
&routed_operation.signatures[hop_n],
|
|
||||||
) {
|
|
||||||
log_rpc!(debug "failed to verify signature for hop {} at {} on private route {}", hop_n, hop_public_key, private_route.public_key);
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// We got the correct signatures, return a key ans
|
|
||||||
Some((
|
|
||||||
rsd.secret_key,
|
|
||||||
SafetySelection::Safe(SafetySpec {
|
|
||||||
preferred_route: Some(private_route.public_key),
|
|
||||||
hop_count: rsd.hops.len(),
|
|
||||||
stability: rsd.stability,
|
|
||||||
sequencing: rsd.sequencing,
|
|
||||||
})
|
|
||||||
))
|
|
||||||
});
|
|
||||||
opt_signatures_valid.ok_or_else(|| {
|
|
||||||
RPCError::protocol("routed operation received on unallocated private route")
|
|
||||||
})?
|
|
||||||
};
|
};
|
||||||
if opt_pr_info.is_none() {
|
|
||||||
return Err(RPCError::protocol(
|
|
||||||
"signatures did not validate for private route",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
let (secret_key, safety_selection) = opt_pr_info.unwrap();
|
|
||||||
|
|
||||||
// Now that things are valid, decrypt the routed operation with DEC(nonce, DH(the SR's public key, the PR's (or node's) secret)
|
// Now that things are valid, decrypt the routed operation with DEC(nonce, DH(the SR's public key, the PR's (or node's) secret)
|
||||||
// xxx: punish nodes that send messages that fail to decrypt eventually
|
// xxx: punish nodes that send messages that fail to decrypt eventually
|
||||||
@ -344,8 +307,13 @@ impl RPCProcessor {
|
|||||||
.await?;
|
.await?;
|
||||||
} else {
|
} else {
|
||||||
// Private route is empty, process routed operation
|
// Private route is empty, process routed operation
|
||||||
self.process_routed_operation(detail, route.operation, &route.safety_route, &private_route)
|
self.process_routed_operation(
|
||||||
.await?;
|
detail,
|
||||||
|
route.operation,
|
||||||
|
&route.safety_route,
|
||||||
|
&private_route,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
}
|
}
|
||||||
} else if blob_tag == 0 {
|
} else if blob_tag == 0 {
|
||||||
// RouteHop
|
// RouteHop
|
||||||
@ -410,8 +378,13 @@ impl RPCProcessor {
|
|||||||
.await?;
|
.await?;
|
||||||
} else {
|
} else {
|
||||||
// No hops left, time to process the routed operation
|
// No hops left, time to process the routed operation
|
||||||
self.process_routed_operation(detail, route.operation, &route.safety_route, private_route)
|
self.process_routed_operation(
|
||||||
.await?;
|
detail,
|
||||||
|
route.operation,
|
||||||
|
&route.safety_route,
|
||||||
|
private_route,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -34,7 +34,8 @@ impl RPCProcessor {
|
|||||||
|
|
||||||
// Wait for receipt
|
// Wait for receipt
|
||||||
match eventual_value.await.take_value().unwrap() {
|
match eventual_value.await.take_value().unwrap() {
|
||||||
ReceiptEvent::ReturnedPrivate | ReceiptEvent::ReturnedInBand { inbound_noderef: _ } => {
|
ReceiptEvent::ReturnedPrivate { private_route: _ }
|
||||||
|
| ReceiptEvent::ReturnedInBand { inbound_noderef: _ } => {
|
||||||
log_net!(debug "validate_dial_info receipt should be returned out-of-band".green());
|
log_net!(debug "validate_dial_info receipt should be returned out-of-band".green());
|
||||||
Ok(false)
|
Ok(false)
|
||||||
}
|
}
|
||||||
@ -94,20 +95,26 @@ impl RPCProcessor {
|
|||||||
routing_domain,
|
routing_domain,
|
||||||
dial_info.clone(),
|
dial_info.clone(),
|
||||||
);
|
);
|
||||||
let will_validate_dial_info_filter = Box::new(move |_rti, e: &BucketEntryInner| {
|
let will_validate_dial_info_filter = Box::new(
|
||||||
if let Some(status) = &e.node_status(routing_domain) {
|
move |rti: &RoutingTableInner, _k: DHTKey, v: Option<Arc<BucketEntry>>| {
|
||||||
status.will_validate_dial_info()
|
let entry = v.unwrap();
|
||||||
} else {
|
entry.with(rti, move |_rti, e| {
|
||||||
true
|
if let Some(status) = &e.node_status(routing_domain) {
|
||||||
}
|
status.will_validate_dial_info()
|
||||||
});
|
} else {
|
||||||
let filter = RoutingTable::combine_entry_filters(
|
true
|
||||||
|
}
|
||||||
|
})
|
||||||
|
},
|
||||||
|
) as RoutingTableEntryFilter;
|
||||||
|
|
||||||
|
let filters = VecDeque::from([
|
||||||
outbound_dial_info_entry_filter,
|
outbound_dial_info_entry_filter,
|
||||||
will_validate_dial_info_filter, fuck this shit. do it tomorrow.
|
will_validate_dial_info_filter,
|
||||||
);
|
]);
|
||||||
|
|
||||||
// Find nodes matching filter to redirect this to
|
// Find nodes matching filter to redirect this to
|
||||||
let peers = routing_table.find_fast_public_nodes_filtered(node_count, filter);
|
let peers = routing_table.find_fast_public_nodes_filtered(node_count, filters);
|
||||||
if peers.is_empty() {
|
if peers.is_empty() {
|
||||||
return Err(RPCError::internal(format!(
|
return Err(RPCError::internal(format!(
|
||||||
"no peers able to reach dialinfo '{:?}'",
|
"no peers able to reach dialinfo '{:?}'",
|
||||||
|
@ -647,7 +647,7 @@ impl NodeInfo {
|
|||||||
.as_ref()
|
.as_ref()
|
||||||
.map(|rpi| {
|
.map(|rpi| {
|
||||||
let relay_ni = &rpi.signed_node_info.node_info;
|
let relay_ni = &rpi.signed_node_info.node_info;
|
||||||
for did in relay_ni.dial_info_detail_list {
|
for did in &relay_ni.dial_info_detail_list {
|
||||||
match sequencing {
|
match sequencing {
|
||||||
Sequencing::NoPreference | Sequencing::PreferOrdered => return true,
|
Sequencing::NoPreference | Sequencing::PreferOrdered => return true,
|
||||||
Sequencing::EnsureOrdered => {
|
Sequencing::EnsureOrdered => {
|
||||||
|
Loading…
x
Reference in New Issue
Block a user