mirror of
https://gitlab.com/veilid/veilid.git
synced 2025-04-19 23:36:04 -04:00
Merge branch 'dev' into 'main'
dev merge See merge request veilid/veilid!8
This commit is contained in:
commit
5665185a66
11
Cargo.lock
generated
11
Cargo.lock
generated
@ -1209,6 +1209,15 @@ dependencies = [
|
||||
"xi-unicode",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cursive_table_view"
|
||||
version = "0.14.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f8935dd87d19c54b7506b245bc988a7b4e65b1058e1d0d64c0ad9b3188e48060"
|
||||
dependencies = [
|
||||
"cursive_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "curve25519-dalek"
|
||||
version = "3.2.1"
|
||||
@ -5087,6 +5096,7 @@ dependencies = [
|
||||
"cursive",
|
||||
"cursive-flexi-logger-view",
|
||||
"cursive_buffered_backend",
|
||||
"cursive_table_view",
|
||||
"directories",
|
||||
"flexi_logger",
|
||||
"futures",
|
||||
@ -5155,6 +5165,7 @@ dependencies = [
|
||||
"nix 0.25.0",
|
||||
"no-std-net",
|
||||
"once_cell",
|
||||
"owning_ref",
|
||||
"owo-colors",
|
||||
"parking_lot 0.12.1",
|
||||
"rand 0.7.3",
|
||||
|
@ -3,5 +3,4 @@ core:
|
||||
network:
|
||||
dht:
|
||||
min_peer_count: 1
|
||||
enable_local_peer_scope: true
|
||||
bootstrap: []
|
||||
|
@ -23,9 +23,9 @@ tokio-util = { version = "^0", features = ["compat"], optional = true}
|
||||
async-tungstenite = { version = "^0.8" }
|
||||
cursive-flexi-logger-view = { path = "../external/cursive-flexi-logger-view" }
|
||||
cursive_buffered_backend = { path = "../external/cursive_buffered_backend" }
|
||||
# cursive-multiplex = "0.4.0"
|
||||
# cursive-multiplex = "0.6.0"
|
||||
# cursive_tree_view = "0.6.0"
|
||||
# cursive_table_view = "0.12.0"
|
||||
cursive_table_view = "0.14.0"
|
||||
# cursive-tabs = "0.5.0"
|
||||
clap = "^3"
|
||||
directories = "^4"
|
||||
|
@ -8,7 +8,7 @@ use std::net::SocketAddr;
|
||||
use std::rc::Rc;
|
||||
use std::time::{Duration, SystemTime};
|
||||
use veilid_core::xx::{Eventual, EventualCommon};
|
||||
use veilid_core::VeilidConfigLogLevel;
|
||||
use veilid_core::*;
|
||||
|
||||
pub fn convert_loglevel(s: &str) -> Result<VeilidConfigLogLevel, String> {
|
||||
match s.to_ascii_lowercase().as_str() {
|
||||
@ -111,7 +111,8 @@ attach - attach the server to the Veilid network
|
||||
detach - detach the server from the Veilid network
|
||||
debug - send a debugging command to the Veilid server
|
||||
change_log_level - change the log level for a tracing layer
|
||||
"#,
|
||||
"#
|
||||
.to_owned(),
|
||||
);
|
||||
let ui = self.ui();
|
||||
ui.send_callback(callback);
|
||||
@ -202,7 +203,7 @@ change_log_level - change the log level for a tracing layer
|
||||
let log_level = match convert_loglevel(&rest.unwrap_or_default()) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
error!("failed to change log level: {}", e);
|
||||
ui.add_node_event(format!("Failed to change log level: {}", e));
|
||||
ui.send_callback(callback);
|
||||
return;
|
||||
}
|
||||
@ -210,12 +211,14 @@ change_log_level - change the log level for a tracing layer
|
||||
|
||||
match capi.server_change_log_level(layer, log_level).await {
|
||||
Ok(()) => {
|
||||
info!("Log level changed");
|
||||
ui.send_callback(callback);
|
||||
ui.display_string_dialog("Success", "Log level changed", callback);
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Server command 'change_log_level' failed: {}", e);
|
||||
ui.send_callback(callback);
|
||||
ui.display_string_dialog(
|
||||
"Server command 'change_log_level' failed",
|
||||
e.to_string(),
|
||||
callback,
|
||||
);
|
||||
}
|
||||
}
|
||||
});
|
||||
@ -320,14 +323,25 @@ change_log_level - change the log level for a tracing layer
|
||||
}
|
||||
|
||||
pub fn update_network_status(&mut self, network: veilid_core::VeilidStateNetwork) {
|
||||
self.inner_mut()
|
||||
.ui
|
||||
.set_network_status(network.started, network.bps_down, network.bps_up);
|
||||
self.inner_mut().ui.set_network_status(
|
||||
network.started,
|
||||
network.bps_down,
|
||||
network.bps_up,
|
||||
network.peers,
|
||||
);
|
||||
}
|
||||
|
||||
pub fn update_log(&mut self, log: veilid_core::VeilidStateLog) {
|
||||
let message = format!("{}: {}", log.log_level, log.message);
|
||||
self.inner().ui.add_node_event(&message);
|
||||
self.inner().ui.add_node_event(format!(
|
||||
"{}: {}{}",
|
||||
log.log_level,
|
||||
log.message,
|
||||
if let Some(bt) = log.backtrace {
|
||||
format!("\nBacktrace:\n{}", bt)
|
||||
} else {
|
||||
"".to_owned()
|
||||
}
|
||||
));
|
||||
}
|
||||
|
||||
pub fn update_shutdown(&mut self) {
|
||||
|
@ -12,6 +12,7 @@ use tools::*;
|
||||
|
||||
mod client_api_connection;
|
||||
mod command_processor;
|
||||
mod peers_table_view;
|
||||
mod settings;
|
||||
mod tools;
|
||||
mod ui;
|
||||
|
99
veilid-cli/src/peers_table_view.rs
Normal file
99
veilid-cli/src/peers_table_view.rs
Normal file
@ -0,0 +1,99 @@
|
||||
use super::*;
|
||||
use cursive_table_view::*;
|
||||
use std::cmp::Ordering;
|
||||
use veilid_core::PeerTableData;
|
||||
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
|
||||
pub enum PeerTableColumn {
|
||||
NodeId,
|
||||
Address,
|
||||
LatencyAvg,
|
||||
TransferDownAvg,
|
||||
TransferUpAvg,
|
||||
}
|
||||
|
||||
// impl PeerTableColumn {
|
||||
// fn as_str(&self) -> &str {
|
||||
// match self {
|
||||
// PeerTableColumn::NodeId => "Node Id",
|
||||
// PeerTableColumn::Address => "Address",
|
||||
// PeerTableColumn::LatencyAvg => "Latency",
|
||||
// PeerTableColumn::TransferDownAvg => "Down",
|
||||
// PeerTableColumn::TransferUpAvg => "Up",
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
fn format_ts(ts: u64) -> String {
|
||||
let secs = timestamp_to_secs(ts);
|
||||
if secs >= 1.0 {
|
||||
format!("{:.2}s", timestamp_to_secs(ts))
|
||||
} else {
|
||||
format!("{:.2}ms", timestamp_to_secs(ts) * 1000.0)
|
||||
}
|
||||
}
|
||||
|
||||
fn format_bps(bps: u64) -> String {
|
||||
if bps >= 1024u64 * 1024u64 * 1024u64 {
|
||||
format!("{:.2}GB/s", (bps / (1024u64 * 1024u64)) as f64 / 1024.0)
|
||||
} else if bps >= 1024u64 * 1024u64 {
|
||||
format!("{:.2}MB/s", (bps / 1024u64) as f64 / 1024.0)
|
||||
} else if bps >= 1024u64 {
|
||||
format!("{:.2}KB/s", bps as f64 / 1024.0)
|
||||
} else {
|
||||
format!("{:.2}B/s", bps as f64)
|
||||
}
|
||||
}
|
||||
|
||||
impl TableViewItem<PeerTableColumn> for PeerTableData {
|
||||
fn to_column(&self, column: PeerTableColumn) -> String {
|
||||
match column {
|
||||
PeerTableColumn::NodeId => self.node_id.encode(),
|
||||
PeerTableColumn::Address => format!(
|
||||
"{:?}:{}",
|
||||
self.peer_address.protocol_type(),
|
||||
self.peer_address.to_socket_addr()
|
||||
),
|
||||
PeerTableColumn::LatencyAvg => format!(
|
||||
"{}",
|
||||
self.peer_stats
|
||||
.latency
|
||||
.as_ref()
|
||||
.map(|l| format_ts(l.average))
|
||||
.unwrap_or("---".to_owned())
|
||||
),
|
||||
PeerTableColumn::TransferDownAvg => format_bps(self.peer_stats.transfer.down.average),
|
||||
PeerTableColumn::TransferUpAvg => format_bps(self.peer_stats.transfer.up.average),
|
||||
}
|
||||
}
|
||||
|
||||
fn cmp(&self, other: &Self, column: PeerTableColumn) -> Ordering
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
match column {
|
||||
PeerTableColumn::NodeId => self.node_id.cmp(&other.node_id),
|
||||
PeerTableColumn::Address => self.to_column(column).cmp(&other.to_column(column)),
|
||||
PeerTableColumn::LatencyAvg => self
|
||||
.peer_stats
|
||||
.latency
|
||||
.as_ref()
|
||||
.map(|l| l.average)
|
||||
.cmp(&other.peer_stats.latency.as_ref().map(|l| l.average)),
|
||||
PeerTableColumn::TransferDownAvg => self
|
||||
.peer_stats
|
||||
.transfer
|
||||
.down
|
||||
.average
|
||||
.cmp(&other.peer_stats.transfer.down.average),
|
||||
PeerTableColumn::TransferUpAvg => self
|
||||
.peer_stats
|
||||
.transfer
|
||||
.up
|
||||
.average
|
||||
.cmp(&other.peer_stats.transfer.up.average),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub type PeersTableView = TableView<PeerTableData, PeerTableColumn>;
|
@ -1,4 +1,5 @@
|
||||
use crate::command_processor::*;
|
||||
use crate::peers_table_view::*;
|
||||
use crate::settings::Settings;
|
||||
use crossbeam_channel::Sender;
|
||||
use cursive::align::*;
|
||||
@ -10,6 +11,7 @@ use cursive::views::*;
|
||||
use cursive::Cursive;
|
||||
use cursive::CursiveRunnable;
|
||||
use cursive_flexi_logger_view::{CursiveLogWriter, FlexiLoggerView};
|
||||
//use cursive_multiplex::*;
|
||||
use log::*;
|
||||
use std::cell::RefCell;
|
||||
use std::collections::{HashMap, VecDeque};
|
||||
@ -20,7 +22,7 @@ use veilid_core::*;
|
||||
//////////////////////////////////////////////////////////////
|
||||
///
|
||||
struct Dirty<T> {
|
||||
pub value: T,
|
||||
value: T,
|
||||
dirty: bool,
|
||||
}
|
||||
|
||||
@ -52,6 +54,7 @@ struct UIState {
|
||||
network_started: Dirty<bool>,
|
||||
network_down_up: Dirty<(f32, f32)>,
|
||||
connection_state: Dirty<ConnectionState>,
|
||||
peers_state: Dirty<Vec<PeerTableData>>,
|
||||
}
|
||||
|
||||
impl UIState {
|
||||
@ -61,6 +64,7 @@ impl UIState {
|
||||
network_started: Dirty::new(false),
|
||||
network_down_up: Dirty::new((0.0, 0.0)),
|
||||
connection_state: Dirty::new(ConnectionState::Disconnected),
|
||||
peers_state: Dirty::new(Vec::new()),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -219,6 +223,9 @@ impl UI {
|
||||
fn status_bar(s: &mut Cursive) -> ViewRef<TextView> {
|
||||
s.find_name("status-bar").unwrap()
|
||||
}
|
||||
fn peers(s: &mut Cursive) -> ViewRef<PeersTableView> {
|
||||
s.find_name("peers").unwrap()
|
||||
}
|
||||
fn render_attachment_state<'a>(inner: &mut UIInner) -> &'a str {
|
||||
match inner.ui_state.attachment_state.get() {
|
||||
AttachmentState::Detached => " Detached [----]",
|
||||
@ -607,15 +614,23 @@ impl UI {
|
||||
statusbar.set_content(status);
|
||||
}
|
||||
|
||||
fn refresh_peers(s: &mut Cursive) {
|
||||
let mut peers = UI::peers(s);
|
||||
let inner = Self::inner_mut(s);
|
||||
peers.set_items_stable(inner.ui_state.peers_state.get().clone());
|
||||
}
|
||||
|
||||
fn update_cb(s: &mut Cursive) {
|
||||
let mut inner = Self::inner_mut(s);
|
||||
|
||||
let mut refresh_statusbar = false;
|
||||
let mut refresh_button_attach = false;
|
||||
let mut refresh_connection_dialog = false;
|
||||
let mut refresh_peers = false;
|
||||
if inner.ui_state.attachment_state.take_dirty() {
|
||||
refresh_statusbar = true;
|
||||
refresh_button_attach = true;
|
||||
refresh_peers = true;
|
||||
}
|
||||
if inner.ui_state.network_started.take_dirty() {
|
||||
refresh_statusbar = true;
|
||||
@ -627,6 +642,10 @@ impl UI {
|
||||
refresh_statusbar = true;
|
||||
refresh_button_attach = true;
|
||||
refresh_connection_dialog = true;
|
||||
refresh_peers = true;
|
||||
}
|
||||
if inner.ui_state.peers_state.take_dirty() {
|
||||
refresh_peers = true;
|
||||
}
|
||||
|
||||
drop(inner);
|
||||
@ -640,6 +659,9 @@ impl UI {
|
||||
if refresh_connection_dialog {
|
||||
Self::refresh_connection_dialog(s);
|
||||
}
|
||||
if refresh_peers {
|
||||
Self::refresh_peers(s);
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
@ -686,30 +708,48 @@ impl UI {
|
||||
siv.set_user_data(this.inner.clone());
|
||||
|
||||
// Create layouts
|
||||
let mut mainlayout = LinearLayout::vertical().with_name("main-layout");
|
||||
mainlayout.get_mut().add_child(
|
||||
Panel::new(
|
||||
FlexiLoggerView::new_scrollable()
|
||||
.with_name("node-events")
|
||||
.full_screen(),
|
||||
)
|
||||
.title_position(HAlign::Left)
|
||||
.title("Node Events"),
|
||||
);
|
||||
mainlayout.get_mut().add_child(
|
||||
Panel::new(ScrollView::new(
|
||||
TextView::new("Peer Table")
|
||||
.with_name("peers")
|
||||
.fixed_height(8)
|
||||
.scrollable(),
|
||||
))
|
||||
.title_position(HAlign::Left)
|
||||
.title("Peers"),
|
||||
);
|
||||
|
||||
let node_events_view = Panel::new(
|
||||
FlexiLoggerView::new_scrollable()
|
||||
.with_name("node-events")
|
||||
.full_screen(),
|
||||
)
|
||||
.title_position(HAlign::Left)
|
||||
.title("Node Events");
|
||||
|
||||
let peers_table_view = PeersTableView::new()
|
||||
.column(PeerTableColumn::NodeId, "Node Id", |c| c.width(43))
|
||||
.column(PeerTableColumn::Address, "Address", |c| c)
|
||||
.column(PeerTableColumn::LatencyAvg, "Ping", |c| c.width(8))
|
||||
.column(PeerTableColumn::TransferDownAvg, "Down", |c| c.width(8))
|
||||
.column(PeerTableColumn::TransferUpAvg, "Up", |c| c.width(8))
|
||||
.with_name("peers")
|
||||
.full_width()
|
||||
.min_height(8);
|
||||
|
||||
// attempt at using Mux. Mux has bugs, like resizing problems.
|
||||
// let mut mux = Mux::new();
|
||||
// let node_node_events_view = mux
|
||||
// .add_below(node_events_view, mux.root().build().unwrap())
|
||||
// .unwrap();
|
||||
// let node_peers_table_view = mux
|
||||
// .add_below(peers_table_view, node_node_events_view)
|
||||
// .unwrap();
|
||||
// mux.set_container_split_ratio(node_peers_table_view, 0.75)
|
||||
// .unwrap();
|
||||
// let mut mainlayout = LinearLayout::vertical();
|
||||
// mainlayout.add_child(mux);
|
||||
|
||||
// Back to fixed layout
|
||||
let mut mainlayout = LinearLayout::vertical();
|
||||
mainlayout.add_child(node_events_view);
|
||||
mainlayout.add_child(peers_table_view);
|
||||
// ^^^ fixed layout
|
||||
|
||||
let mut command = StyledString::new();
|
||||
command.append_styled("Command> ", ColorStyle::title_primary());
|
||||
//
|
||||
mainlayout.get_mut().add_child(
|
||||
mainlayout.add_child(
|
||||
LinearLayout::horizontal()
|
||||
.child(TextView::new(command))
|
||||
.child(
|
||||
@ -738,7 +778,7 @@ impl UI {
|
||||
ColorStyle::highlight_inactive(),
|
||||
);
|
||||
|
||||
mainlayout.get_mut().add_child(
|
||||
mainlayout.add_child(
|
||||
LinearLayout::horizontal()
|
||||
.color(Some(ColorStyle::highlight_inactive()))
|
||||
.child(
|
||||
@ -776,13 +816,20 @@ impl UI {
|
||||
inner.ui_state.attachment_state.set(state);
|
||||
let _ = inner.cb_sink.send(Box::new(UI::update_cb));
|
||||
}
|
||||
pub fn set_network_status(&mut self, started: bool, bps_down: u64, bps_up: u64) {
|
||||
pub fn set_network_status(
|
||||
&mut self,
|
||||
started: bool,
|
||||
bps_down: u64,
|
||||
bps_up: u64,
|
||||
peers: Vec<PeerTableData>,
|
||||
) {
|
||||
let mut inner = self.inner.borrow_mut();
|
||||
inner.ui_state.network_started.set(started);
|
||||
inner.ui_state.network_down_up.set((
|
||||
((bps_down as f64) / 1000.0f64) as f32,
|
||||
((bps_up as f64) / 1000.0f64) as f32,
|
||||
));
|
||||
inner.ui_state.peers_state.set(peers);
|
||||
let _ = inner.cb_sink.send(Box::new(UI::update_cb));
|
||||
}
|
||||
pub fn set_connection_state(&mut self, state: ConnectionState) {
|
||||
@ -790,7 +837,8 @@ impl UI {
|
||||
inner.ui_state.connection_state.set(state);
|
||||
let _ = inner.cb_sink.send(Box::new(UI::update_cb));
|
||||
}
|
||||
pub fn add_node_event(&self, event: &str) {
|
||||
|
||||
pub fn add_node_event(&self, event: String) {
|
||||
let inner = self.inner.borrow();
|
||||
let color = *inner.log_colors.get(&Level::Info).unwrap();
|
||||
for line in event.lines() {
|
||||
|
@ -15,8 +15,8 @@ rt-async-std = [ "async-std", "async-std-resolver", "async_executors/async_std",
|
||||
rt-tokio = [ "tokio", "tokio-util", "tokio-stream", "trust-dns-resolver/tokio-runtime", "async_executors/tokio_tp", "async_executors/tokio_io", "async_executors/tokio_timer", "rtnetlink?/tokio_socket" ]
|
||||
|
||||
android_tests = []
|
||||
ios_tests = [ "simplelog", "backtrace" ]
|
||||
tracking = [ "backtrace" ]
|
||||
ios_tests = [ "simplelog" ]
|
||||
tracking = []
|
||||
|
||||
[dependencies]
|
||||
tracing = { version = "^0", features = ["log", "attributes"] }
|
||||
@ -41,9 +41,10 @@ lazy_static = "^1"
|
||||
directories = "^4"
|
||||
once_cell = "^1"
|
||||
json = "^0"
|
||||
owning_ref = "^0"
|
||||
flume = { version = "^0", features = ["async"] }
|
||||
enumset = { version= "^1", features = ["serde"] }
|
||||
backtrace = { version = "^0", optional = true }
|
||||
backtrace = { version = "^0" }
|
||||
owo-colors = "^3"
|
||||
stop-token = { version = "^0", default-features = false }
|
||||
ed25519-dalek = { version = "^1", default_features = false, features = ["alloc", "u64_backend"] }
|
||||
@ -134,7 +135,6 @@ jni-sys = "^0"
|
||||
ndk = { version = "^0", features = ["trace"] }
|
||||
ndk-glue = { version = "^0", features = ["logger"] }
|
||||
tracing-android = { version = "^0" }
|
||||
backtrace = { version = "^0" }
|
||||
|
||||
# Dependenices for all Unix (Linux, Android, MacOS, iOS)
|
||||
[target.'cfg(unix)'.dependencies]
|
||||
|
@ -175,10 +175,6 @@ struct ValueData {
|
||||
# Operations
|
||||
##############################
|
||||
|
||||
struct OperationStatusQ {
|
||||
nodeStatus @0 :NodeStatus; # node status update about the statusq sender
|
||||
}
|
||||
|
||||
enum NetworkClass {
|
||||
inboundCapable @0; # I = Inbound capable without relay, may require signal
|
||||
outboundOnly @1; # O = Outbound only, inbound relay required except with reverse connect signal
|
||||
@ -199,7 +195,7 @@ struct DialInfoDetail {
|
||||
class @1 :DialInfoClass;
|
||||
}
|
||||
|
||||
struct NodeStatus {
|
||||
struct PublicInternetNodeStatus {
|
||||
willRoute @0 :Bool;
|
||||
willTunnel @1 :Bool;
|
||||
willSignal @2 :Bool;
|
||||
@ -207,6 +203,18 @@ struct NodeStatus {
|
||||
willValidateDialInfo @4 :Bool;
|
||||
}
|
||||
|
||||
struct LocalNetworkNodeStatus {
|
||||
willRelay @0 :Bool;
|
||||
willValidateDialInfo @1 :Bool;
|
||||
}
|
||||
|
||||
struct NodeStatus {
|
||||
union {
|
||||
publicInternet @0 :PublicInternetNodeStatus;
|
||||
localNetwork @1 :LocalNetworkNodeStatus;
|
||||
}
|
||||
}
|
||||
|
||||
struct ProtocolTypeSet {
|
||||
udp @0 :Bool;
|
||||
tcp @1 :Bool;
|
||||
@ -221,7 +229,7 @@ struct AddressTypeSet {
|
||||
|
||||
struct NodeInfo {
|
||||
networkClass @0 :NetworkClass; # network class of this node
|
||||
outboundProtocols @1 :ProtocolTypeSet; # protocols that can go outbound
|
||||
outboundProtocols @1 :ProtocolTypeSet; # protocols that can go outbound
|
||||
addressTypes @2 :AddressTypeSet; # address types supported
|
||||
minVersion @3 :UInt8; # minimum protocol version for rpc
|
||||
maxVersion @4 :UInt8; # maximum protocol version for rpc
|
||||
@ -239,6 +247,21 @@ struct SenderInfo {
|
||||
socketAddress @0 :SocketAddress; # socket address was available for peer
|
||||
}
|
||||
|
||||
struct PeerInfo {
|
||||
nodeId @0 :NodeID; # node id for 'closer peer'
|
||||
signedNodeInfo @1 :SignedNodeInfo; # signed node info for 'closer peer'
|
||||
}
|
||||
|
||||
struct RoutedOperation {
|
||||
signatures @0 :List(Signature); # signatures from nodes that have handled the private route
|
||||
nonce @1 :Nonce; # nonce Xmsg
|
||||
data @2 :Data; # Operation encrypted with ENC(Xmsg,DH(PKapr,SKbsr))
|
||||
}
|
||||
|
||||
struct OperationStatusQ {
|
||||
nodeStatus @0 :NodeStatus; # node status update about the statusq sender
|
||||
}
|
||||
|
||||
struct OperationStatusA {
|
||||
nodeStatus @0 :NodeStatus; # returned node status
|
||||
senderInfo @1 :SenderInfo; # info about StatusQ sender from the perspective of the replier
|
||||
@ -258,21 +281,10 @@ struct OperationFindNodeQ {
|
||||
nodeId @0 :NodeID; # node id to locate
|
||||
}
|
||||
|
||||
struct PeerInfo {
|
||||
nodeId @0 :NodeID; # node id for 'closer peer'
|
||||
signedNodeInfo @1 :SignedNodeInfo; # signed node info for 'closer peer'
|
||||
}
|
||||
|
||||
struct OperationFindNodeA {
|
||||
peers @0 :List(PeerInfo); # returned 'closer peer' information
|
||||
}
|
||||
|
||||
struct RoutedOperation {
|
||||
signatures @0 :List(Signature); # signatures from nodes that have handled the private route
|
||||
nonce @1 :Nonce; # nonce Xmsg
|
||||
data @2 :Data; # Operation encrypted with ENC(Xmsg,DH(PKapr,SKbsr))
|
||||
}
|
||||
|
||||
struct OperationRoute {
|
||||
safetyRoute @0 :SafetyRoute; # Where this should go
|
||||
operation @1 :RoutedOperation; # The operation to be routed
|
||||
@ -419,26 +431,25 @@ struct OperationCancelTunnelA {
|
||||
# Things that want an answer
|
||||
struct Question {
|
||||
respondTo :union {
|
||||
sender @0 :Void; # sender without node info
|
||||
senderWithInfo @1 :SignedNodeInfo; # some envelope-sender signed node info to be used for reply
|
||||
privateRoute @2 :PrivateRoute; # embedded private route to be used for reply
|
||||
sender @0 :Void; # sender
|
||||
privateRoute @1 :PrivateRoute; # embedded private route to be used for reply
|
||||
}
|
||||
detail :union {
|
||||
# Direct operations
|
||||
statusQ @3 :OperationStatusQ;
|
||||
findNodeQ @4 :OperationFindNodeQ;
|
||||
statusQ @2 :OperationStatusQ;
|
||||
findNodeQ @3 :OperationFindNodeQ;
|
||||
|
||||
# Routable operations
|
||||
getValueQ @5 :OperationGetValueQ;
|
||||
setValueQ @6 :OperationSetValueQ;
|
||||
watchValueQ @7 :OperationWatchValueQ;
|
||||
supplyBlockQ @8 :OperationSupplyBlockQ;
|
||||
findBlockQ @9 :OperationFindBlockQ;
|
||||
getValueQ @4 :OperationGetValueQ;
|
||||
setValueQ @5 :OperationSetValueQ;
|
||||
watchValueQ @6 :OperationWatchValueQ;
|
||||
supplyBlockQ @7 :OperationSupplyBlockQ;
|
||||
findBlockQ @8 :OperationFindBlockQ;
|
||||
|
||||
# Tunnel operations
|
||||
startTunnelQ @10 :OperationStartTunnelQ;
|
||||
completeTunnelQ @11 :OperationCompleteTunnelQ;
|
||||
cancelTunnelQ @12 :OperationCancelTunnelQ;
|
||||
startTunnelQ @9 :OperationStartTunnelQ;
|
||||
completeTunnelQ @10 :OperationCompleteTunnelQ;
|
||||
cancelTunnelQ @11 :OperationCancelTunnelQ;
|
||||
}
|
||||
}
|
||||
|
||||
@ -480,10 +491,10 @@ struct Answer {
|
||||
|
||||
struct Operation {
|
||||
opId @0 :UInt64; # Random RPC ID. Must be random to foil reply forgery attacks.
|
||||
|
||||
senderNodeInfo @1 :SignedNodeInfo; # (optional) SignedNodeInfo for the sender to be cached by the receiver.
|
||||
kind :union {
|
||||
question @1 :Question;
|
||||
statement @2 :Statement;
|
||||
answer @3 :Answer;
|
||||
question @2 :Question;
|
||||
statement @3 :Statement;
|
||||
answer @4 :Answer;
|
||||
}
|
||||
}
|
||||
|
@ -96,7 +96,18 @@ impl<S: Subscriber + for<'a> registry::LookupSpan<'a>> Layer<S> for ApiTracingLa
|
||||
|
||||
let message = format!("{} {}", origin, recorder);
|
||||
|
||||
(inner.update_callback)(VeilidUpdate::Log(VeilidStateLog { log_level, message }))
|
||||
let backtrace = if log_level <= VeilidLogLevel::Error {
|
||||
let bt = backtrace::Backtrace::new();
|
||||
Some(format!("{:?}", bt))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
(inner.update_callback)(VeilidUpdate::Log(VeilidStateLog {
|
||||
log_level,
|
||||
message,
|
||||
backtrace,
|
||||
}))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2,6 +2,7 @@ use super::*;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ConnectionHandle {
|
||||
id: u64,
|
||||
descriptor: ConnectionDescriptor,
|
||||
channel: flume::Sender<Vec<u8>>,
|
||||
}
|
||||
@ -13,13 +14,22 @@ pub enum ConnectionHandleSendResult {
|
||||
}
|
||||
|
||||
impl ConnectionHandle {
|
||||
pub(super) fn new(descriptor: ConnectionDescriptor, channel: flume::Sender<Vec<u8>>) -> Self {
|
||||
pub(super) fn new(
|
||||
id: u64,
|
||||
descriptor: ConnectionDescriptor,
|
||||
channel: flume::Sender<Vec<u8>>,
|
||||
) -> Self {
|
||||
Self {
|
||||
id,
|
||||
descriptor,
|
||||
channel,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn connection_id(&self) -> u64 {
|
||||
self.id
|
||||
}
|
||||
|
||||
pub fn connection_descriptor(&self) -> ConnectionDescriptor {
|
||||
self.descriptor.clone()
|
||||
}
|
||||
|
@ -11,12 +11,11 @@ use stop_token::future::FutureExt;
|
||||
enum ConnectionManagerEvent {
|
||||
Accepted(ProtocolNetworkConnection),
|
||||
Dead(NetworkConnection),
|
||||
Finished(ConnectionDescriptor),
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct ConnectionManagerInner {
|
||||
connection_table: ConnectionTable,
|
||||
next_id: NetworkConnectionId,
|
||||
sender: flume::Sender<ConnectionManagerEvent>,
|
||||
async_processor_jh: Option<MustJoinHandle<()>>,
|
||||
stop_source: Option<StopSource>,
|
||||
@ -24,6 +23,9 @@ struct ConnectionManagerInner {
|
||||
|
||||
struct ConnectionManagerArc {
|
||||
network_manager: NetworkManager,
|
||||
connection_initial_timeout_ms: u32,
|
||||
connection_inactivity_timeout_ms: u32,
|
||||
connection_table: ConnectionTable,
|
||||
inner: Mutex<Option<ConnectionManagerInner>>,
|
||||
}
|
||||
impl core::fmt::Debug for ConnectionManagerArc {
|
||||
@ -41,21 +43,32 @@ pub struct ConnectionManager {
|
||||
|
||||
impl ConnectionManager {
|
||||
fn new_inner(
|
||||
config: VeilidConfig,
|
||||
stop_source: StopSource,
|
||||
sender: flume::Sender<ConnectionManagerEvent>,
|
||||
async_processor_jh: MustJoinHandle<()>,
|
||||
) -> ConnectionManagerInner {
|
||||
ConnectionManagerInner {
|
||||
next_id: 0,
|
||||
stop_source: Some(stop_source),
|
||||
sender: sender,
|
||||
async_processor_jh: Some(async_processor_jh),
|
||||
connection_table: ConnectionTable::new(config),
|
||||
}
|
||||
}
|
||||
fn new_arc(network_manager: NetworkManager) -> ConnectionManagerArc {
|
||||
let config = network_manager.config();
|
||||
let (connection_initial_timeout_ms, connection_inactivity_timeout_ms) = {
|
||||
let c = config.get();
|
||||
(
|
||||
c.network.connection_initial_timeout_ms,
|
||||
c.network.connection_inactivity_timeout_ms,
|
||||
)
|
||||
};
|
||||
|
||||
ConnectionManagerArc {
|
||||
network_manager,
|
||||
connection_initial_timeout_ms,
|
||||
connection_inactivity_timeout_ms,
|
||||
connection_table: ConnectionTable::new(config),
|
||||
inner: Mutex::new(None),
|
||||
}
|
||||
}
|
||||
@ -69,6 +82,14 @@ impl ConnectionManager {
|
||||
self.arc.network_manager.clone()
|
||||
}
|
||||
|
||||
pub fn connection_initial_timeout_ms(&self) -> u32 {
|
||||
self.arc.connection_initial_timeout_ms
|
||||
}
|
||||
|
||||
pub fn connection_inactivity_timeout_ms(&self) -> u32 {
|
||||
self.arc.connection_inactivity_timeout_ms
|
||||
}
|
||||
|
||||
pub async fn startup(&self) {
|
||||
trace!("startup connection manager");
|
||||
let mut inner = self.arc.inner.lock();
|
||||
@ -86,12 +107,7 @@ impl ConnectionManager {
|
||||
let async_processor = spawn(self.clone().async_processor(stop_source.token(), receiver));
|
||||
|
||||
// Store in the inner object
|
||||
*inner = Some(Self::new_inner(
|
||||
self.network_manager().config(),
|
||||
stop_source,
|
||||
sender,
|
||||
async_processor,
|
||||
));
|
||||
*inner = Some(Self::new_inner(stop_source, sender, async_processor));
|
||||
}
|
||||
|
||||
pub async fn shutdown(&self) {
|
||||
@ -117,25 +133,10 @@ impl ConnectionManager {
|
||||
async_processor_jh.await;
|
||||
// Wait for the connections to complete
|
||||
debug!("waiting for connection handlers to complete");
|
||||
inner.connection_table.join().await;
|
||||
self.arc.connection_table.join().await;
|
||||
debug!("finished connection manager shutdown");
|
||||
}
|
||||
|
||||
// Returns a network connection if one already is established
|
||||
pub async fn get_connection(
|
||||
&self,
|
||||
descriptor: ConnectionDescriptor,
|
||||
) -> Option<ConnectionHandle> {
|
||||
let mut inner = self.arc.inner.lock();
|
||||
let inner = match &mut *inner {
|
||||
Some(v) => v,
|
||||
None => {
|
||||
panic!("not started");
|
||||
}
|
||||
};
|
||||
inner.connection_table.get_connection(descriptor)
|
||||
}
|
||||
|
||||
// Internal routine to register new connection atomically.
|
||||
// Registers connection in the connection table for later access
|
||||
// and spawns a message processing loop for the connection
|
||||
@ -144,7 +145,14 @@ impl ConnectionManager {
|
||||
inner: &mut ConnectionManagerInner,
|
||||
prot_conn: ProtocolNetworkConnection,
|
||||
) -> EyreResult<NetworkResult<ConnectionHandle>> {
|
||||
log_net!("on_new_protocol_network_connection: {:?}", prot_conn);
|
||||
// Get next connection id to use
|
||||
let id = inner.next_id;
|
||||
inner.next_id += 1;
|
||||
log_net!(
|
||||
"on_new_protocol_network_connection: id={} prot_conn={:?}",
|
||||
id,
|
||||
prot_conn
|
||||
);
|
||||
|
||||
// Wrap with NetworkConnection object to start the connection processing loop
|
||||
let stop_token = match &inner.stop_source {
|
||||
@ -152,143 +160,132 @@ impl ConnectionManager {
|
||||
None => bail!("not creating connection because we are stopping"),
|
||||
};
|
||||
|
||||
let conn = NetworkConnection::from_protocol(self.clone(), stop_token, prot_conn);
|
||||
let conn = NetworkConnection::from_protocol(self.clone(), stop_token, prot_conn, id);
|
||||
let handle = conn.get_handle();
|
||||
// Add to the connection table
|
||||
match inner.connection_table.add_connection(conn) {
|
||||
match self.arc.connection_table.add_connection(conn) {
|
||||
Ok(None) => {
|
||||
// Connection added
|
||||
}
|
||||
Ok(Some(conn)) => {
|
||||
// Connection added and a different one LRU'd out
|
||||
// Send it to be terminated
|
||||
let _ = inner.sender.send(ConnectionManagerEvent::Dead(conn));
|
||||
}
|
||||
Err(ConnectionTableAddError::AddressFilter(conn, e)) => {
|
||||
// Connection filtered
|
||||
let desc = conn.connection_descriptor();
|
||||
let _ = inner.sender.send(ConnectionManagerEvent::Dead(conn));
|
||||
return Err(eyre!("connection filtered: {:?} ({})", desc, e));
|
||||
return Ok(NetworkResult::no_connection_other(format!(
|
||||
"connection filtered: {:?} ({})",
|
||||
desc, e
|
||||
)));
|
||||
}
|
||||
Err(ConnectionTableAddError::AlreadyExists(conn)) => {
|
||||
// Connection already exists
|
||||
let desc = conn.connection_descriptor();
|
||||
let _ = inner.sender.send(ConnectionManagerEvent::Dead(conn));
|
||||
return Err(eyre!("connection already exists: {:?}", desc));
|
||||
return Ok(NetworkResult::no_connection_other(format!(
|
||||
"connection already exists: {:?}",
|
||||
desc
|
||||
)));
|
||||
}
|
||||
};
|
||||
Ok(NetworkResult::Value(handle))
|
||||
}
|
||||
|
||||
// Returns a network connection if one already is established
|
||||
pub fn get_connection(&self, descriptor: ConnectionDescriptor) -> Option<ConnectionHandle> {
|
||||
self.arc
|
||||
.connection_table
|
||||
.get_connection_by_descriptor(descriptor)
|
||||
}
|
||||
|
||||
// Terminate any connections that would collide with a new connection
|
||||
// using different protocols to the same remote address and port. Used to ensure
|
||||
// that we can switch quickly between TCP and WS if necessary to the same node
|
||||
// Returns true if we killed off colliding connections
|
||||
async fn kill_off_colliding_connections(&self, dial_info: &DialInfo) -> bool {
|
||||
let protocol_type = dial_info.protocol_type();
|
||||
let socket_address = dial_info.socket_address();
|
||||
|
||||
let killed = self.arc.connection_table.drain_filter(|prior_descriptor| {
|
||||
// If the protocol types aren't the same, then this is a candidate to be killed off
|
||||
// If they are the same, then we would just return the exact same connection from get_or_create_connection()
|
||||
if prior_descriptor.protocol_type() == protocol_type {
|
||||
return false;
|
||||
}
|
||||
// If the prior remote is not the same address, then we're not going to collide
|
||||
if *prior_descriptor.remote().socket_address() != socket_address {
|
||||
return false;
|
||||
}
|
||||
|
||||
log_net!(debug
|
||||
">< Terminating connection prior_descriptor={:?}",
|
||||
prior_descriptor
|
||||
);
|
||||
true
|
||||
});
|
||||
// Wait for the killed connections to end their recv loops
|
||||
let did_kill = !killed.is_empty();
|
||||
for k in killed {
|
||||
k.await;
|
||||
}
|
||||
did_kill
|
||||
}
|
||||
|
||||
// Called when we want to create a new connection or get the current one that already exists
|
||||
// This will kill off any connections that are in conflict with the new connection to be made
|
||||
// in order to make room for the new connection in the system's connection table
|
||||
// This routine needs to be atomic, or connections may exist in the table that are not established
|
||||
pub async fn get_or_create_connection(
|
||||
&self,
|
||||
local_addr: Option<SocketAddr>,
|
||||
dial_info: DialInfo,
|
||||
) -> EyreResult<NetworkResult<ConnectionHandle>> {
|
||||
let killed = {
|
||||
let mut inner = self.arc.inner.lock();
|
||||
let inner = match &mut *inner {
|
||||
Some(v) => v,
|
||||
None => {
|
||||
panic!("not started");
|
||||
}
|
||||
};
|
||||
log_net!(
|
||||
"== get_or_create_connection local_addr={:?} dial_info={:?}",
|
||||
local_addr.green(),
|
||||
dial_info.green()
|
||||
);
|
||||
|
||||
// Kill off any possibly conflicting connections
|
||||
let did_kill = self.kill_off_colliding_connections(&dial_info).await;
|
||||
let mut retry_count = if did_kill { 2 } else { 0 };
|
||||
|
||||
// Make a connection descriptor for this dialinfo
|
||||
let peer_address = dial_info.to_peer_address();
|
||||
let descriptor = match local_addr {
|
||||
Some(la) => {
|
||||
ConnectionDescriptor::new(peer_address, SocketAddress::from_socket_addr(la))
|
||||
}
|
||||
None => ConnectionDescriptor::new_no_local(peer_address),
|
||||
};
|
||||
|
||||
// If any connection to this remote exists that has the same protocol, return it
|
||||
// Any connection will do, we don't have to match the local address
|
||||
if let Some(conn) = self
|
||||
.arc
|
||||
.connection_table
|
||||
.get_last_connection_by_remote(descriptor.remote())
|
||||
{
|
||||
log_net!(
|
||||
"== get_or_create_connection local_addr={:?} dial_info={:?}",
|
||||
"== Returning existing connection local_addr={:?} peer_address={:?}",
|
||||
local_addr.green(),
|
||||
dial_info.green()
|
||||
peer_address.green()
|
||||
);
|
||||
|
||||
let peer_address = dial_info.to_peer_address();
|
||||
|
||||
// Make a connection to the address
|
||||
// reject connections to addresses with an unknown or unsupported peer scope
|
||||
let descriptor = match local_addr {
|
||||
Some(la) => {
|
||||
ConnectionDescriptor::new(peer_address, SocketAddress::from_socket_addr(la))
|
||||
}
|
||||
None => ConnectionDescriptor::new_no_local(peer_address),
|
||||
}?;
|
||||
|
||||
// If any connection to this remote exists that has the same protocol, return it
|
||||
// Any connection will do, we don't have to match the local address
|
||||
|
||||
if let Some(conn) = inner
|
||||
.connection_table
|
||||
.get_last_connection_by_remote(descriptor.remote())
|
||||
{
|
||||
log_net!(
|
||||
"== Returning existing connection local_addr={:?} peer_address={:?}",
|
||||
local_addr.green(),
|
||||
peer_address.green()
|
||||
);
|
||||
|
||||
return Ok(NetworkResult::Value(conn));
|
||||
}
|
||||
|
||||
// Drop any other protocols connections to this remote that have the same local addr
|
||||
// otherwise this connection won't succeed due to binding
|
||||
let mut killed = Vec::<NetworkConnection>::new();
|
||||
if let Some(local_addr) = local_addr {
|
||||
if local_addr.port() != 0 {
|
||||
for pt in [ProtocolType::TCP, ProtocolType::WS, ProtocolType::WSS] {
|
||||
let pa = PeerAddress::new(descriptor.remote_address().clone(), pt);
|
||||
for prior_descriptor in inner
|
||||
.connection_table
|
||||
.get_connection_descriptors_by_remote(pa)
|
||||
{
|
||||
let mut kill = false;
|
||||
// See if the local address would collide
|
||||
if let Some(prior_local) = prior_descriptor.local() {
|
||||
if (local_addr.ip().is_unspecified()
|
||||
|| prior_local.to_ip_addr().is_unspecified()
|
||||
|| (local_addr.ip() == prior_local.to_ip_addr()))
|
||||
&& prior_local.port() == local_addr.port()
|
||||
{
|
||||
kill = true;
|
||||
}
|
||||
}
|
||||
if kill {
|
||||
log_net!(debug
|
||||
">< Terminating connection prior_descriptor={:?}",
|
||||
prior_descriptor
|
||||
);
|
||||
let mut conn = inner
|
||||
.connection_table
|
||||
.remove_connection(prior_descriptor)
|
||||
.expect("connection not in table");
|
||||
|
||||
conn.close();
|
||||
|
||||
killed.push(conn);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
killed
|
||||
};
|
||||
|
||||
// Wait for the killed connections to end their recv loops
|
||||
let mut retry_count = if !killed.is_empty() { 2 } else { 0 };
|
||||
for k in killed {
|
||||
k.await;
|
||||
return Ok(NetworkResult::Value(conn));
|
||||
}
|
||||
|
||||
// Get connection timeout
|
||||
let timeout_ms = {
|
||||
let config = self.network_manager().config();
|
||||
let c = config.get();
|
||||
c.network.connection_initial_timeout_ms
|
||||
};
|
||||
|
||||
// Attempt new connection
|
||||
let conn = network_result_try!(loop {
|
||||
let result_net_res =
|
||||
ProtocolNetworkConnection::connect(local_addr, &dial_info, timeout_ms).await;
|
||||
let prot_conn = network_result_try!(loop {
|
||||
let result_net_res = ProtocolNetworkConnection::connect(
|
||||
local_addr,
|
||||
&dial_info,
|
||||
self.arc.connection_initial_timeout_ms,
|
||||
)
|
||||
.await;
|
||||
match result_net_res {
|
||||
Ok(net_res) => {
|
||||
if net_res.is_value() || retry_count == 0 {
|
||||
@ -314,7 +311,8 @@ impl ConnectionManager {
|
||||
bail!("shutting down");
|
||||
}
|
||||
};
|
||||
self.on_new_protocol_network_connection(inner, conn)
|
||||
|
||||
self.on_new_protocol_network_connection(inner, prot_conn)
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
@ -347,27 +345,6 @@ impl ConnectionManager {
|
||||
conn.close();
|
||||
conn.await;
|
||||
}
|
||||
ConnectionManagerEvent::Finished(desc) => {
|
||||
let conn = {
|
||||
let mut inner_lock = self.arc.inner.lock();
|
||||
match &mut *inner_lock {
|
||||
Some(inner) => {
|
||||
// Remove the connection and wait for the connection loop to terminate
|
||||
if let Ok(conn) = inner.connection_table.remove_connection(desc) {
|
||||
// Must close and wait to ensure things join
|
||||
Some(conn)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
None => None,
|
||||
}
|
||||
};
|
||||
if let Some(mut conn) = conn {
|
||||
conn.close();
|
||||
conn.await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -377,7 +354,7 @@ impl ConnectionManager {
|
||||
#[cfg_attr(target_os = "wasm32", allow(dead_code))]
|
||||
pub(super) async fn on_accepted_protocol_network_connection(
|
||||
&self,
|
||||
conn: ProtocolNetworkConnection,
|
||||
protocol_connection: ProtocolNetworkConnection,
|
||||
) -> EyreResult<()> {
|
||||
// Get channel sender
|
||||
let sender = {
|
||||
@ -394,14 +371,14 @@ impl ConnectionManager {
|
||||
|
||||
// Inform the processor of the event
|
||||
let _ = sender
|
||||
.send_async(ConnectionManagerEvent::Accepted(conn))
|
||||
.send_async(ConnectionManagerEvent::Accepted(protocol_connection))
|
||||
.await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Callback from network connection receive loop when it exits
|
||||
// cleans up the entry in the connection table
|
||||
pub(super) async fn report_connection_finished(&self, descriptor: ConnectionDescriptor) {
|
||||
pub(super) async fn report_connection_finished(&self, connection_id: u64) {
|
||||
// Get channel sender
|
||||
let sender = {
|
||||
let mut inner = self.arc.inner.lock();
|
||||
@ -415,9 +392,15 @@ impl ConnectionManager {
|
||||
inner.sender.clone()
|
||||
};
|
||||
|
||||
// Remove the connection
|
||||
let conn = self
|
||||
.arc
|
||||
.connection_table
|
||||
.remove_connection_by_id(connection_id);
|
||||
|
||||
// Inform the processor of the event
|
||||
let _ = sender
|
||||
.send_async(ConnectionManagerEvent::Finished(descriptor))
|
||||
.await;
|
||||
if let Some(conn) = conn {
|
||||
let _ = sender.send_async(ConnectionManagerEvent::Dead(conn)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,4 @@
|
||||
use super::*;
|
||||
use alloc::collections::btree_map::Entry;
|
||||
use futures_util::StreamExt;
|
||||
use hashlink::LruCache;
|
||||
|
||||
@ -21,36 +20,21 @@ impl ConnectionTableAddError {
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
#[derive(ThisError, Debug)]
|
||||
pub enum ConnectionTableRemoveError {
|
||||
#[error("Connection not in table")]
|
||||
NotInTable,
|
||||
}
|
||||
|
||||
impl ConnectionTableRemoveError {
|
||||
pub fn not_in_table() -> Self {
|
||||
ConnectionTableRemoveError::NotInTable
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ConnectionTable {
|
||||
pub struct ConnectionTableInner {
|
||||
max_connections: Vec<usize>,
|
||||
conn_by_descriptor: Vec<LruCache<ConnectionDescriptor, NetworkConnection>>,
|
||||
descriptors_by_remote: BTreeMap<PeerAddress, Vec<ConnectionDescriptor>>,
|
||||
conn_by_id: Vec<LruCache<NetworkConnectionId, NetworkConnection>>,
|
||||
protocol_index_by_id: BTreeMap<NetworkConnectionId, usize>,
|
||||
id_by_descriptor: BTreeMap<ConnectionDescriptor, NetworkConnectionId>,
|
||||
ids_by_remote: BTreeMap<PeerAddress, Vec<NetworkConnectionId>>,
|
||||
address_filter: ConnectionLimits,
|
||||
}
|
||||
|
||||
fn protocol_to_index(protocol: ProtocolType) -> usize {
|
||||
match protocol {
|
||||
ProtocolType::TCP => 0,
|
||||
ProtocolType::WS => 1,
|
||||
ProtocolType::WSS => 2,
|
||||
ProtocolType::UDP => panic!("not a connection-oriented protocol"),
|
||||
}
|
||||
#[derive(Debug)]
|
||||
pub struct ConnectionTable {
|
||||
inner: Arc<Mutex<ConnectionTableInner>>,
|
||||
}
|
||||
|
||||
impl ConnectionTable {
|
||||
@ -64,154 +48,217 @@ impl ConnectionTable {
|
||||
]
|
||||
};
|
||||
Self {
|
||||
max_connections,
|
||||
conn_by_descriptor: vec![
|
||||
LruCache::new_unbounded(),
|
||||
LruCache::new_unbounded(),
|
||||
LruCache::new_unbounded(),
|
||||
],
|
||||
descriptors_by_remote: BTreeMap::new(),
|
||||
address_filter: ConnectionLimits::new(config),
|
||||
inner: Arc::new(Mutex::new(ConnectionTableInner {
|
||||
max_connections,
|
||||
conn_by_id: vec![
|
||||
LruCache::new_unbounded(),
|
||||
LruCache::new_unbounded(),
|
||||
LruCache::new_unbounded(),
|
||||
],
|
||||
protocol_index_by_id: BTreeMap::new(),
|
||||
id_by_descriptor: BTreeMap::new(),
|
||||
ids_by_remote: BTreeMap::new(),
|
||||
address_filter: ConnectionLimits::new(config),
|
||||
})),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn join(&mut self) {
|
||||
let mut unord = FuturesUnordered::new();
|
||||
for table in &mut self.conn_by_descriptor {
|
||||
for (_, v) in table.drain() {
|
||||
trace!("connection table join: {:?}", v);
|
||||
unord.push(v);
|
||||
}
|
||||
fn protocol_to_index(protocol: ProtocolType) -> usize {
|
||||
match protocol {
|
||||
ProtocolType::TCP => 0,
|
||||
ProtocolType::WS => 1,
|
||||
ProtocolType::WSS => 2,
|
||||
ProtocolType::UDP => panic!("not a connection-oriented protocol"),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn join(&self) {
|
||||
let mut unord = {
|
||||
let mut inner = self.inner.lock();
|
||||
let unord = FuturesUnordered::new();
|
||||
for table in &mut inner.conn_by_id {
|
||||
for (_, v) in table.drain() {
|
||||
trace!("connection table join: {:?}", v);
|
||||
unord.push(v);
|
||||
}
|
||||
}
|
||||
inner.id_by_descriptor.clear();
|
||||
inner.ids_by_remote.clear();
|
||||
unord
|
||||
};
|
||||
|
||||
while unord.next().await.is_some() {}
|
||||
}
|
||||
|
||||
pub fn add_connection(
|
||||
&mut self,
|
||||
conn: NetworkConnection,
|
||||
&self,
|
||||
network_connection: NetworkConnection,
|
||||
) -> Result<Option<NetworkConnection>, ConnectionTableAddError> {
|
||||
let descriptor = conn.connection_descriptor();
|
||||
let ip_addr = descriptor.remote_address().to_ip_addr();
|
||||
// Get indices for network connection table
|
||||
let id = network_connection.connection_id();
|
||||
let descriptor = network_connection.connection_descriptor();
|
||||
let protocol_index = Self::protocol_to_index(descriptor.protocol_type());
|
||||
let remote = descriptor.remote();
|
||||
|
||||
let index = protocol_to_index(descriptor.protocol_type());
|
||||
if self.conn_by_descriptor[index].contains_key(&descriptor) {
|
||||
return Err(ConnectionTableAddError::already_exists(conn));
|
||||
let mut inner = self.inner.lock();
|
||||
|
||||
// Two connections to the same descriptor should be rejected (soft rejection)
|
||||
if inner.id_by_descriptor.contains_key(&descriptor) {
|
||||
return Err(ConnectionTableAddError::already_exists(network_connection));
|
||||
}
|
||||
|
||||
// Sanity checking this implementation (hard fails that would invalidate the representation)
|
||||
if inner.conn_by_id[protocol_index].contains_key(&id) {
|
||||
panic!("duplicate connection id: {:#?}", network_connection);
|
||||
}
|
||||
if inner.protocol_index_by_id.get(&id).is_some() {
|
||||
panic!("duplicate id to protocol index: {:#?}", network_connection);
|
||||
}
|
||||
if let Some(ids) = inner.ids_by_remote.get(&descriptor.remote()) {
|
||||
if ids.contains(&id) {
|
||||
panic!("duplicate id by remote: {:#?}", network_connection);
|
||||
}
|
||||
}
|
||||
|
||||
// Filter by ip for connection limits
|
||||
match self.address_filter.add(ip_addr) {
|
||||
let ip_addr = descriptor.remote_address().to_ip_addr();
|
||||
match inner.address_filter.add(ip_addr) {
|
||||
Ok(()) => {}
|
||||
Err(e) => {
|
||||
// send connection to get cleaned up cleanly
|
||||
return Err(ConnectionTableAddError::address_filter(conn, e));
|
||||
// Return the connection in the error to be disposed of
|
||||
return Err(ConnectionTableAddError::address_filter(
|
||||
network_connection,
|
||||
e,
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
// Add the connection to the table
|
||||
let res = self.conn_by_descriptor[index].insert(descriptor.clone(), conn);
|
||||
let res = inner.conn_by_id[protocol_index].insert(id, network_connection);
|
||||
assert!(res.is_none());
|
||||
|
||||
// if we have reached the maximum number of connections per protocol type
|
||||
// then drop the least recently used connection
|
||||
let mut out_conn = None;
|
||||
if self.conn_by_descriptor[index].len() > self.max_connections[index] {
|
||||
if let Some((lruk, lru_conn)) = self.conn_by_descriptor[index].remove_lru() {
|
||||
debug!("connection lru out: {:?}", lruk);
|
||||
if inner.conn_by_id[protocol_index].len() > inner.max_connections[protocol_index] {
|
||||
if let Some((lruk, lru_conn)) = inner.conn_by_id[protocol_index].remove_lru() {
|
||||
debug!("connection lru out: {:?}", lru_conn);
|
||||
out_conn = Some(lru_conn);
|
||||
self.remove_connection_records(lruk);
|
||||
Self::remove_connection_records(&mut *inner, lruk);
|
||||
}
|
||||
}
|
||||
|
||||
// add connection records
|
||||
let descriptors = self
|
||||
.descriptors_by_remote
|
||||
.entry(descriptor.remote())
|
||||
.or_default();
|
||||
|
||||
descriptors.push(descriptor);
|
||||
inner.protocol_index_by_id.insert(id, protocol_index);
|
||||
inner.id_by_descriptor.insert(descriptor, id);
|
||||
inner.ids_by_remote.entry(remote).or_default().push(id);
|
||||
|
||||
Ok(out_conn)
|
||||
}
|
||||
|
||||
pub fn get_connection(&mut self, descriptor: ConnectionDescriptor) -> Option<ConnectionHandle> {
|
||||
let index = protocol_to_index(descriptor.protocol_type());
|
||||
let out = self.conn_by_descriptor[index].get(&descriptor);
|
||||
out.map(|c| c.get_handle())
|
||||
pub fn get_connection_by_id(&self, id: NetworkConnectionId) -> Option<ConnectionHandle> {
|
||||
let mut inner = self.inner.lock();
|
||||
let protocol_index = *inner.protocol_index_by_id.get(&id)?;
|
||||
let out = inner.conn_by_id[protocol_index].get(&id).unwrap();
|
||||
Some(out.get_handle())
|
||||
}
|
||||
|
||||
pub fn get_last_connection_by_remote(
|
||||
&mut self,
|
||||
remote: PeerAddress,
|
||||
pub fn get_connection_by_descriptor(
|
||||
&self,
|
||||
descriptor: ConnectionDescriptor,
|
||||
) -> Option<ConnectionHandle> {
|
||||
let descriptor = self
|
||||
.descriptors_by_remote
|
||||
.get(&remote)
|
||||
.map(|v| v[(v.len() - 1)].clone());
|
||||
if let Some(descriptor) = descriptor {
|
||||
// lru bump
|
||||
let index = protocol_to_index(descriptor.protocol_type());
|
||||
let handle = self.conn_by_descriptor[index]
|
||||
.get(&descriptor)
|
||||
.map(|c| c.get_handle());
|
||||
handle
|
||||
} else {
|
||||
None
|
||||
}
|
||||
let mut inner = self.inner.lock();
|
||||
|
||||
let id = *inner.id_by_descriptor.get(&descriptor)?;
|
||||
let protocol_index = Self::protocol_to_index(descriptor.protocol_type());
|
||||
let out = inner.conn_by_id[protocol_index].get(&id).unwrap();
|
||||
Some(out.get_handle())
|
||||
}
|
||||
|
||||
pub fn get_connection_descriptors_by_remote(
|
||||
&mut self,
|
||||
remote: PeerAddress,
|
||||
) -> Vec<ConnectionDescriptor> {
|
||||
self.descriptors_by_remote
|
||||
pub fn get_last_connection_by_remote(&self, remote: PeerAddress) -> Option<ConnectionHandle> {
|
||||
let mut inner = self.inner.lock();
|
||||
|
||||
let id = inner.ids_by_remote.get(&remote).map(|v| v[(v.len() - 1)])?;
|
||||
let protocol_index = Self::protocol_to_index(remote.protocol_type());
|
||||
let out = inner.conn_by_id[protocol_index].get(&id).unwrap();
|
||||
Some(out.get_handle())
|
||||
}
|
||||
|
||||
pub fn _get_connection_ids_by_remote(&self, remote: PeerAddress) -> Vec<NetworkConnectionId> {
|
||||
let inner = self.inner.lock();
|
||||
inner
|
||||
.ids_by_remote
|
||||
.get(&remote)
|
||||
.cloned()
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
pub fn connection_count(&self) -> usize {
|
||||
self.conn_by_descriptor.iter().fold(0, |b, c| b + c.len())
|
||||
}
|
||||
|
||||
fn remove_connection_records(&mut self, descriptor: ConnectionDescriptor) {
|
||||
let ip_addr = descriptor.remote_address().to_ip_addr();
|
||||
|
||||
// conns_by_remote
|
||||
match self.descriptors_by_remote.entry(descriptor.remote()) {
|
||||
Entry::Vacant(_) => {
|
||||
panic!("inconsistency in connection table")
|
||||
}
|
||||
Entry::Occupied(mut o) => {
|
||||
let v = o.get_mut();
|
||||
|
||||
// Remove one matching connection from the list
|
||||
for (n, elem) in v.iter().enumerate() {
|
||||
if *elem == descriptor {
|
||||
v.remove(n);
|
||||
break;
|
||||
}
|
||||
}
|
||||
// No connections left for this remote, remove the entry from conns_by_remote
|
||||
if v.is_empty() {
|
||||
o.remove_entry();
|
||||
pub fn drain_filter<F>(&self, mut filter: F) -> Vec<NetworkConnection>
|
||||
where
|
||||
F: FnMut(ConnectionDescriptor) -> bool,
|
||||
{
|
||||
let mut inner = self.inner.lock();
|
||||
let mut filtered_ids = Vec::new();
|
||||
for cbi in &mut inner.conn_by_id {
|
||||
for (id, conn) in cbi {
|
||||
if filter(conn.connection_descriptor()) {
|
||||
filtered_ids.push(*id);
|
||||
}
|
||||
}
|
||||
}
|
||||
self.address_filter
|
||||
let mut filtered_connections = Vec::new();
|
||||
for id in filtered_ids {
|
||||
let conn = Self::remove_connection_records(&mut *inner, id);
|
||||
filtered_connections.push(conn)
|
||||
}
|
||||
filtered_connections
|
||||
}
|
||||
|
||||
pub fn connection_count(&self) -> usize {
|
||||
let inner = self.inner.lock();
|
||||
inner.conn_by_id.iter().fold(0, |acc, c| acc + c.len())
|
||||
}
|
||||
|
||||
fn remove_connection_records(
|
||||
inner: &mut ConnectionTableInner,
|
||||
id: NetworkConnectionId,
|
||||
) -> NetworkConnection {
|
||||
// protocol_index_by_id
|
||||
let protocol_index = inner.protocol_index_by_id.remove(&id).unwrap();
|
||||
// conn_by_id
|
||||
let conn = inner.conn_by_id[protocol_index].remove(&id).unwrap();
|
||||
// id_by_descriptor
|
||||
let descriptor = conn.connection_descriptor();
|
||||
inner.id_by_descriptor.remove(&descriptor).unwrap();
|
||||
// ids_by_remote
|
||||
let remote = descriptor.remote();
|
||||
let ids = inner.ids_by_remote.get_mut(&remote).unwrap();
|
||||
for (n, elem) in ids.iter().enumerate() {
|
||||
if *elem == id {
|
||||
ids.remove(n);
|
||||
if ids.is_empty() {
|
||||
inner.ids_by_remote.remove(&remote).unwrap();
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
// address_filter
|
||||
let ip_addr = remote.to_socket_addr().ip();
|
||||
inner
|
||||
.address_filter
|
||||
.remove(ip_addr)
|
||||
.expect("Inconsistency in connection table");
|
||||
conn
|
||||
}
|
||||
|
||||
pub fn remove_connection(
|
||||
&mut self,
|
||||
descriptor: ConnectionDescriptor,
|
||||
) -> Result<NetworkConnection, ConnectionTableRemoveError> {
|
||||
let index = protocol_to_index(descriptor.protocol_type());
|
||||
let conn = self.conn_by_descriptor[index]
|
||||
.remove(&descriptor)
|
||||
.ok_or_else(|| ConnectionTableRemoveError::not_in_table())?;
|
||||
pub fn remove_connection_by_id(&self, id: NetworkConnectionId) -> Option<NetworkConnection> {
|
||||
let mut inner = self.inner.lock();
|
||||
|
||||
self.remove_connection_records(descriptor);
|
||||
Ok(conn)
|
||||
let protocol_index = *inner.protocol_index_by_id.get(&id)?;
|
||||
if !inner.conn_by_id[protocol_index].contains_key(&id) {
|
||||
return None;
|
||||
}
|
||||
let conn = Self::remove_connection_records(&mut *inner, id);
|
||||
Some(conn)
|
||||
}
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -34,32 +34,56 @@ pub const PEEK_DETECT_LEN: usize = 64;
|
||||
/////////////////////////////////////////////////////////////////
|
||||
|
||||
struct NetworkInner {
|
||||
/// true if the low-level network is running
|
||||
network_started: bool,
|
||||
/// set if the network needs to be restarted due to a low level configuration change
|
||||
/// such as dhcp release or change of address or interfaces being added or removed
|
||||
network_needs_restart: bool,
|
||||
/// the calculated protocol configuration for inbound/outbound protocols
|
||||
protocol_config: Option<ProtocolConfig>,
|
||||
/// set of statically configured protocols with public dialinfo
|
||||
static_public_dialinfo: ProtocolTypeSet,
|
||||
network_class: Option<NetworkClass>,
|
||||
/// network class per routing domain
|
||||
network_class: [Option<NetworkClass>; RoutingDomain::count()],
|
||||
/// join handles for all the low level network background tasks
|
||||
join_handles: Vec<MustJoinHandle<()>>,
|
||||
/// stop source for shutting down the low level network background tasks
|
||||
stop_source: Option<StopSource>,
|
||||
/// port we are binding raw udp listen to
|
||||
udp_port: u16,
|
||||
/// port we are binding raw tcp listen to
|
||||
tcp_port: u16,
|
||||
/// port we are binding websocket listen to
|
||||
ws_port: u16,
|
||||
/// port we are binding secure websocket listen to
|
||||
wss_port: u16,
|
||||
/// does our network have ipv4 on any network?
|
||||
enable_ipv4: bool,
|
||||
/// does our network have ipv6 on the global internet?
|
||||
enable_ipv6_global: bool,
|
||||
/// does our network have ipv6 on the local network?
|
||||
enable_ipv6_local: bool,
|
||||
// public dial info check
|
||||
/// set if we need to calculate our public dial info again
|
||||
needs_public_dial_info_check: bool,
|
||||
/// set during the actual execution of the public dial info check to ensure we don't do it more than once
|
||||
doing_public_dial_info_check: bool,
|
||||
/// the punishment closure to enax
|
||||
public_dial_info_check_punishment: Option<Box<dyn FnOnce() + Send + 'static>>,
|
||||
// udp
|
||||
/// udp socket record for bound-first sockets, which are used to guarantee a port is available before
|
||||
/// creating a 'reuseport' socket there. we don't want to pick ports that other programs are using
|
||||
bound_first_udp: BTreeMap<u16, Option<(socket2::Socket, socket2::Socket)>>,
|
||||
/// mapping of protocol handlers to accept messages from a set of bound socket addresses
|
||||
inbound_udp_protocol_handlers: BTreeMap<SocketAddr, RawUdpProtocolHandler>,
|
||||
/// outbound udp protocol handler for udpv4
|
||||
outbound_udpv4_protocol_handler: Option<RawUdpProtocolHandler>,
|
||||
/// outbound udp protocol handler for udpv6
|
||||
outbound_udpv6_protocol_handler: Option<RawUdpProtocolHandler>,
|
||||
//tcp
|
||||
/// tcp socket record for bound-first sockets, which are used to guarantee a port is available before
|
||||
/// creating a 'reuseport' socket there. we don't want to pick ports that other programs are using
|
||||
bound_first_tcp: BTreeMap<u16, Option<(socket2::Socket, socket2::Socket)>>,
|
||||
/// TLS handling socket controller
|
||||
tls_acceptor: Option<TlsAcceptor>,
|
||||
/// Multiplexer record for protocols on low level TCP sockets
|
||||
listener_states: BTreeMap<SocketAddr, Arc<RwLock<ListenerState>>>,
|
||||
}
|
||||
|
||||
@ -98,7 +122,7 @@ impl Network {
|
||||
public_dial_info_check_punishment: None,
|
||||
protocol_config: None,
|
||||
static_public_dialinfo: ProtocolTypeSet::empty(),
|
||||
network_class: None,
|
||||
network_class: [None, None],
|
||||
join_handles: Vec::new(),
|
||||
stop_source: None,
|
||||
udp_port: 0u16,
|
||||
@ -521,7 +545,7 @@ impl Network {
|
||||
// Handle connection-oriented protocols
|
||||
|
||||
// Try to send to the exact existing connection if one exists
|
||||
if let Some(conn) = self.connection_manager().get_connection(descriptor).await {
|
||||
if let Some(conn) = self.connection_manager().get_connection(descriptor) {
|
||||
// connection exists, send over it
|
||||
match conn.send_async(data).await {
|
||||
ConnectionHandleSendResult::Sent => {
|
||||
@ -603,6 +627,31 @@ impl Network {
|
||||
// initialize interfaces
|
||||
self.unlocked_inner.interfaces.refresh().await?;
|
||||
|
||||
// build the set of networks we should consider for the 'LocalNetwork' routing domain
|
||||
let mut local_networks: HashSet<(IpAddr, IpAddr)> = HashSet::new();
|
||||
self.unlocked_inner
|
||||
.interfaces
|
||||
.with_interfaces(|interfaces| {
|
||||
trace!("interfaces: {:#?}", interfaces);
|
||||
|
||||
for (_name, intf) in interfaces {
|
||||
// Skip networks that we should never encounter
|
||||
if intf.is_loopback() || !intf.is_running() {
|
||||
continue;
|
||||
}
|
||||
// Add network to local networks table
|
||||
for addr in &intf.addrs {
|
||||
let netmask = addr.if_addr().netmask();
|
||||
let network_ip = ipaddr_apply_netmask(addr.if_addr().ip(), netmask);
|
||||
local_networks.insert((network_ip, netmask));
|
||||
}
|
||||
}
|
||||
});
|
||||
let local_networks: Vec<(IpAddr, IpAddr)> = local_networks.into_iter().collect();
|
||||
self.unlocked_inner
|
||||
.routing_table
|
||||
.configure_local_network_routing_domain(local_networks);
|
||||
|
||||
// determine if we have ipv4/ipv6 addresses
|
||||
{
|
||||
let mut inner = self.inner.lock();
|
||||
@ -687,18 +736,32 @@ impl Network {
|
||||
protocol_config
|
||||
};
|
||||
|
||||
// Start editing routing table
|
||||
let mut editor_public_internet = self
|
||||
.unlocked_inner
|
||||
.routing_table
|
||||
.edit_routing_domain(RoutingDomain::PublicInternet);
|
||||
let mut editor_local_network = self
|
||||
.unlocked_inner
|
||||
.routing_table
|
||||
.edit_routing_domain(RoutingDomain::LocalNetwork);
|
||||
|
||||
// start listeners
|
||||
if protocol_config.inbound.contains(ProtocolType::UDP) {
|
||||
self.start_udp_listeners().await?;
|
||||
self.start_udp_listeners(&mut editor_public_internet, &mut editor_local_network)
|
||||
.await?;
|
||||
}
|
||||
if protocol_config.inbound.contains(ProtocolType::WS) {
|
||||
self.start_ws_listeners().await?;
|
||||
self.start_ws_listeners(&mut editor_public_internet, &mut editor_local_network)
|
||||
.await?;
|
||||
}
|
||||
if protocol_config.inbound.contains(ProtocolType::WSS) {
|
||||
self.start_wss_listeners().await?;
|
||||
self.start_wss_listeners(&mut editor_public_internet, &mut editor_local_network)
|
||||
.await?;
|
||||
}
|
||||
if protocol_config.inbound.contains(ProtocolType::TCP) {
|
||||
self.start_tcp_listeners().await?;
|
||||
self.start_tcp_listeners(&mut editor_public_internet, &mut editor_local_network)
|
||||
.await?;
|
||||
}
|
||||
|
||||
// release caches of available listener ports
|
||||
@ -715,13 +778,18 @@ impl Network {
|
||||
if !detect_address_changes {
|
||||
let mut inner = self.inner.lock();
|
||||
if !inner.static_public_dialinfo.is_empty() {
|
||||
inner.network_class = Some(NetworkClass::InboundCapable);
|
||||
inner.network_class[RoutingDomain::PublicInternet as usize] =
|
||||
Some(NetworkClass::InboundCapable);
|
||||
}
|
||||
}
|
||||
|
||||
info!("network started");
|
||||
self.inner.lock().network_started = true;
|
||||
|
||||
// commit routing table edits
|
||||
editor_public_internet.commit().await;
|
||||
editor_local_network.commit().await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -766,9 +834,16 @@ impl Network {
|
||||
while unord.next().await.is_some() {}
|
||||
|
||||
debug!("clearing dial info");
|
||||
// Drop all dial info
|
||||
routing_table.clear_dial_info_details(RoutingDomain::PublicInternet);
|
||||
routing_table.clear_dial_info_details(RoutingDomain::LocalNetwork);
|
||||
|
||||
let mut editor = routing_table.edit_routing_domain(RoutingDomain::PublicInternet);
|
||||
editor.disable_node_info_updates();
|
||||
editor.clear_dial_info_details();
|
||||
editor.commit().await;
|
||||
|
||||
let mut editor = routing_table.edit_routing_domain(RoutingDomain::LocalNetwork);
|
||||
editor.disable_node_info_updates();
|
||||
editor.clear_dial_info_details();
|
||||
editor.commit().await;
|
||||
|
||||
// Reset state including network class
|
||||
*self.inner.lock() = Self::new_inner();
|
||||
@ -796,9 +871,9 @@ impl Network {
|
||||
inner.doing_public_dial_info_check
|
||||
}
|
||||
|
||||
pub fn get_network_class(&self) -> Option<NetworkClass> {
|
||||
pub fn get_network_class(&self, routing_domain: RoutingDomain) -> Option<NetworkClass> {
|
||||
let inner = self.inner.lock();
|
||||
inner.network_class
|
||||
inner.network_class[routing_domain as usize]
|
||||
}
|
||||
|
||||
//////////////////////////////////////////
|
||||
@ -861,9 +936,13 @@ impl Network {
|
||||
|
||||
// If we need to figure out our network class, tick the task for it
|
||||
if detect_address_changes {
|
||||
let network_class = self.get_network_class().unwrap_or(NetworkClass::Invalid);
|
||||
let public_internet_network_class = self
|
||||
.get_network_class(RoutingDomain::PublicInternet)
|
||||
.unwrap_or(NetworkClass::Invalid);
|
||||
let needs_public_dial_info_check = self.needs_public_dial_info_check();
|
||||
if network_class == NetworkClass::Invalid || needs_public_dial_info_check {
|
||||
if public_internet_network_class == NetworkClass::Invalid
|
||||
|| needs_public_dial_info_check
|
||||
{
|
||||
let routing_table = self.routing_table();
|
||||
let rth = routing_table.get_routing_table_health();
|
||||
|
||||
|
@ -118,20 +118,24 @@ impl DiscoveryContext {
|
||||
|
||||
// Build an filter that matches our protocol and address type
|
||||
// and excludes relays so we can get an accurate external address
|
||||
let dial_info_filter = DialInfoFilter::global()
|
||||
let dial_info_filter = DialInfoFilter::all()
|
||||
.with_protocol_type(protocol_type)
|
||||
.with_address_type(address_type);
|
||||
let inbound_dial_info_entry_filter =
|
||||
RoutingTable::make_inbound_dial_info_entry_filter(dial_info_filter.clone());
|
||||
let inbound_dial_info_entry_filter = RoutingTable::make_inbound_dial_info_entry_filter(
|
||||
RoutingDomain::PublicInternet,
|
||||
dial_info_filter.clone(),
|
||||
);
|
||||
let disallow_relays_filter = move |e: &BucketEntryInner| {
|
||||
if let Some(n) = e.node_info() {
|
||||
if let Some(n) = e.node_info(RoutingDomain::PublicInternet) {
|
||||
n.relay_peer_info.is_none()
|
||||
} else {
|
||||
false
|
||||
}
|
||||
};
|
||||
let filter =
|
||||
RoutingTable::combine_filters(inbound_dial_info_entry_filter, disallow_relays_filter);
|
||||
let filter = RoutingTable::combine_entry_filters(
|
||||
inbound_dial_info_entry_filter,
|
||||
disallow_relays_filter,
|
||||
);
|
||||
|
||||
// Find public nodes matching this filter
|
||||
let peers = self
|
||||
@ -153,7 +157,11 @@ impl DiscoveryContext {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
peer.set_filter(Some(dial_info_filter.clone()));
|
||||
peer.set_filter(Some(
|
||||
NodeRefFilter::new()
|
||||
.with_routing_domain(RoutingDomain::PublicInternet)
|
||||
.with_dial_info_filter(dial_info_filter.clone()),
|
||||
));
|
||||
if let Some(sa) = self.request_public_address(peer.clone()).await {
|
||||
return Some((sa, peer));
|
||||
}
|
||||
@ -169,7 +177,7 @@ impl DiscoveryContext {
|
||||
protocol_type: ProtocolType,
|
||||
address_type: AddressType,
|
||||
) -> Vec<SocketAddress> {
|
||||
let filter = DialInfoFilter::local()
|
||||
let filter = DialInfoFilter::all()
|
||||
.with_protocol_type(protocol_type)
|
||||
.with_address_type(address_type);
|
||||
self.routing_table
|
||||
@ -340,20 +348,22 @@ impl DiscoveryContext {
|
||||
)
|
||||
};
|
||||
|
||||
// Attempt a port mapping via all available and enabled mechanisms
|
||||
// Try this before the direct mapping in the event that we are restarting
|
||||
// and may not have recorded a mapping created the last time
|
||||
if let Some(external_mapped_dial_info) = self.try_port_mapping().await {
|
||||
// Got a port mapping, let's use it
|
||||
self.set_detected_public_dial_info(external_mapped_dial_info, DialInfoClass::Mapped);
|
||||
self.set_detected_network_class(NetworkClass::InboundCapable);
|
||||
}
|
||||
// Do a validate_dial_info on the external address from a redirected node
|
||||
if self
|
||||
else if self
|
||||
.validate_dial_info(node_1.clone(), external_1_dial_info.clone(), true)
|
||||
.await
|
||||
{
|
||||
// Add public dial info with Direct dialinfo class
|
||||
self.set_detected_public_dial_info(external_1_dial_info, DialInfoClass::Direct);
|
||||
self.set_detected_network_class(NetworkClass::InboundCapable);
|
||||
}
|
||||
// Attempt a port mapping via all available and enabled mechanisms
|
||||
else if let Some(external_mapped_dial_info) = self.try_port_mapping().await {
|
||||
// Got a port mapping, let's use it
|
||||
self.set_detected_public_dial_info(external_mapped_dial_info, DialInfoClass::Mapped);
|
||||
self.set_detected_network_class(NetworkClass::InboundCapable);
|
||||
} else {
|
||||
// Add public dial info with Blocked dialinfo class
|
||||
self.set_detected_public_dial_info(external_1_dial_info, DialInfoClass::Blocked);
|
||||
@ -376,8 +386,19 @@ impl DiscoveryContext {
|
||||
)
|
||||
};
|
||||
|
||||
// Attempt a port mapping via all available and enabled mechanisms
|
||||
// Try this before the direct mapping in the event that we are restarting
|
||||
// and may not have recorded a mapping created the last time
|
||||
if let Some(external_mapped_dial_info) = self.try_port_mapping().await {
|
||||
// Got a port mapping, let's use it
|
||||
self.set_detected_public_dial_info(external_mapped_dial_info, DialInfoClass::Mapped);
|
||||
self.set_detected_network_class(NetworkClass::InboundCapable);
|
||||
|
||||
// No more retries
|
||||
return Ok(true);
|
||||
}
|
||||
// Do a validate_dial_info on the external address from a redirected node
|
||||
if self
|
||||
else if self
|
||||
.validate_dial_info(node_1.clone(), external_1_dial_info.clone(), true)
|
||||
.await
|
||||
{
|
||||
@ -386,17 +407,9 @@ impl DiscoveryContext {
|
||||
self.set_detected_network_class(NetworkClass::InboundCapable);
|
||||
return Ok(true);
|
||||
}
|
||||
// Attempt a port mapping via all available and enabled mechanisms
|
||||
else if let Some(external_mapped_dial_info) = self.try_port_mapping().await {
|
||||
// Got a port mapping, let's use it
|
||||
self.set_detected_public_dial_info(external_mapped_dial_info, DialInfoClass::Mapped);
|
||||
self.set_detected_network_class(NetworkClass::InboundCapable);
|
||||
|
||||
// No more retries
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
// Port mapping was not possible, let's see what kind of NAT we have
|
||||
// Port mapping was not possible, and things aren't accessible directly.
|
||||
// Let's see what kind of NAT we have
|
||||
|
||||
// Does a redirected dial info validation from a different address and a random port find us?
|
||||
if self
|
||||
@ -583,7 +596,8 @@ impl Network {
|
||||
let (protocol_config, existing_network_class, tcp_same_port) = {
|
||||
let inner = self.inner.lock();
|
||||
let protocol_config = inner.protocol_config.unwrap_or_default();
|
||||
let existing_network_class = inner.network_class;
|
||||
let existing_network_class =
|
||||
inner.network_class[RoutingDomain::PublicInternet as usize];
|
||||
let tcp_same_port = if protocol_config.inbound.contains(ProtocolType::TCP)
|
||||
&& protocol_config.inbound.contains(ProtocolType::WS)
|
||||
{
|
||||
@ -594,7 +608,6 @@ impl Network {
|
||||
(protocol_config, existing_network_class, tcp_same_port)
|
||||
};
|
||||
let routing_table = self.routing_table();
|
||||
let network_manager = self.network_manager();
|
||||
|
||||
// Process all protocol and address combinations
|
||||
let mut futures = FuturesUnordered::new();
|
||||
@ -757,11 +770,12 @@ impl Network {
|
||||
// If a network class could be determined
|
||||
// see about updating our public dial info
|
||||
let mut changed = false;
|
||||
let mut editor = routing_table.edit_routing_domain(RoutingDomain::PublicInternet);
|
||||
if new_network_class.is_some() {
|
||||
// Get existing public dial info
|
||||
let existing_public_dial_info: HashSet<DialInfoDetail> = routing_table
|
||||
.all_filtered_dial_info_details(
|
||||
Some(RoutingDomain::PublicInternet),
|
||||
RoutingDomain::PublicInternet.into(),
|
||||
&DialInfoFilter::all(),
|
||||
)
|
||||
.into_iter()
|
||||
@ -800,13 +814,9 @@ impl Network {
|
||||
// Is the public dial info different?
|
||||
if existing_public_dial_info != new_public_dial_info {
|
||||
// If so, clear existing public dial info and re-register the new public dial info
|
||||
routing_table.clear_dial_info_details(RoutingDomain::PublicInternet);
|
||||
editor.clear_dial_info_details();
|
||||
for did in new_public_dial_info {
|
||||
if let Err(e) = routing_table.register_dial_info(
|
||||
RoutingDomain::PublicInternet,
|
||||
did.dial_info,
|
||||
did.class,
|
||||
) {
|
||||
if let Err(e) = editor.register_dial_info(did.dial_info, did.class) {
|
||||
log_net!(error "Failed to register detected public dial info: {}", e);
|
||||
}
|
||||
}
|
||||
@ -815,14 +825,15 @@ impl Network {
|
||||
|
||||
// Is the network class different?
|
||||
if existing_network_class != new_network_class {
|
||||
self.inner.lock().network_class = new_network_class;
|
||||
self.inner.lock().network_class[RoutingDomain::PublicInternet as usize] =
|
||||
new_network_class;
|
||||
changed = true;
|
||||
log_net!(debug "network class changed to {:?}", new_network_class);
|
||||
log_net!(debug "PublicInternet network class changed to {:?}", new_network_class);
|
||||
}
|
||||
} else if existing_network_class.is_some() {
|
||||
// Network class could not be determined
|
||||
routing_table.clear_dial_info_details(RoutingDomain::PublicInternet);
|
||||
self.inner.lock().network_class = None;
|
||||
editor.clear_dial_info_details();
|
||||
self.inner.lock().network_class[RoutingDomain::PublicInternet as usize] = None;
|
||||
changed = true;
|
||||
log_net!(debug "network class cleared");
|
||||
}
|
||||
@ -834,7 +845,7 @@ impl Network {
|
||||
}
|
||||
} else {
|
||||
// Send updates to everyone
|
||||
network_manager.send_node_info_updates(true).await;
|
||||
editor.commit().await;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
@ -149,8 +149,7 @@ impl RawTcpProtocolHandler {
|
||||
);
|
||||
let local_address = self.inner.lock().local_address;
|
||||
let conn = ProtocolNetworkConnection::RawTcp(RawTcpNetworkConnection::new(
|
||||
ConnectionDescriptor::new(peer_addr, SocketAddress::from_socket_addr(local_address))
|
||||
.map_err(|e| io::Error::new(io::ErrorKind::AddrNotAvailable, e))?,
|
||||
ConnectionDescriptor::new(peer_addr, SocketAddress::from_socket_addr(local_address)),
|
||||
ps,
|
||||
));
|
||||
|
||||
@ -190,8 +189,7 @@ impl RawTcpProtocolHandler {
|
||||
ProtocolType::TCP,
|
||||
),
|
||||
SocketAddress::from_socket_addr(actual_local_address),
|
||||
)
|
||||
.map_err(|e| io::Error::new(io::ErrorKind::AddrNotAvailable, e))?,
|
||||
),
|
||||
ps,
|
||||
));
|
||||
|
||||
|
@ -25,16 +25,10 @@ impl RawUdpProtocolHandler {
|
||||
ProtocolType::UDP,
|
||||
);
|
||||
let local_socket_addr = self.socket.local_addr()?;
|
||||
let descriptor = match ConnectionDescriptor::new(
|
||||
let descriptor = ConnectionDescriptor::new(
|
||||
peer_addr,
|
||||
SocketAddress::from_socket_addr(local_socket_addr),
|
||||
) {
|
||||
Ok(d) => d,
|
||||
Err(_) => {
|
||||
log_net!(debug "{}({}) at {}@{}:{}: {:?}", "Invalid peer scope".green(), "received message from invalid peer scope", file!(), line!(), column!(), peer_addr);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
);
|
||||
|
||||
break (size, descriptor);
|
||||
};
|
||||
@ -62,8 +56,7 @@ impl RawUdpProtocolHandler {
|
||||
let descriptor = ConnectionDescriptor::new(
|
||||
peer_addr,
|
||||
SocketAddress::from_socket_addr(local_socket_addr),
|
||||
)
|
||||
.map_err(|e| io::Error::new(io::ErrorKind::AddrNotAvailable, e))?;
|
||||
);
|
||||
|
||||
let len = network_result_try!(self
|
||||
.socket
|
||||
|
@ -212,8 +212,7 @@ impl WebsocketProtocolHandler {
|
||||
ConnectionDescriptor::new(
|
||||
peer_addr,
|
||||
SocketAddress::from_socket_addr(self.arc.local_address),
|
||||
)
|
||||
.map_err(|e| io::Error::new(io::ErrorKind::AddrNotAvailable, e))?,
|
||||
),
|
||||
ws_stream,
|
||||
));
|
||||
|
||||
@ -268,8 +267,7 @@ impl WebsocketProtocolHandler {
|
||||
let descriptor = ConnectionDescriptor::new(
|
||||
dial_info.to_peer_address(),
|
||||
SocketAddress::from_socket_addr(actual_local_addr),
|
||||
)
|
||||
.map_err(|e| io::Error::new(io::ErrorKind::AddrNotAvailable, e))?;
|
||||
);
|
||||
|
||||
// Negotiate TLS if this is WSS
|
||||
if tls {
|
||||
|
@ -250,15 +250,18 @@ impl Network {
|
||||
|
||||
/////////////////////////////////////////////////////
|
||||
|
||||
pub(super) async fn start_udp_listeners(&self) -> EyreResult<()> {
|
||||
pub(super) async fn start_udp_listeners(
|
||||
&self,
|
||||
editor_public_internet: &mut RoutingDomainEditor,
|
||||
editor_local_network: &mut RoutingDomainEditor,
|
||||
) -> EyreResult<()> {
|
||||
trace!("starting udp listeners");
|
||||
let routing_table = self.routing_table();
|
||||
let (listen_address, public_address, enable_local_peer_scope, detect_address_changes) = {
|
||||
let (listen_address, public_address, detect_address_changes) = {
|
||||
let c = self.config.get();
|
||||
(
|
||||
c.network.protocol.udp.listen_address.clone(),
|
||||
c.network.protocol.udp.public_address.clone(),
|
||||
c.network.enable_local_peer_scope,
|
||||
c.network.detect_address_changes,
|
||||
)
|
||||
};
|
||||
@ -288,26 +291,18 @@ impl Network {
|
||||
|
||||
// Register local dial info
|
||||
for di in &local_dial_info_list {
|
||||
// If the local interface address is global, or we are enabling local peer scope
|
||||
// register global dial info if no public address is specified
|
||||
// If the local interface address is global, then register global dial info
|
||||
// if no other public address is specified
|
||||
if !detect_address_changes
|
||||
&& public_address.is_none()
|
||||
&& (di.is_global() || enable_local_peer_scope)
|
||||
&& routing_table.ensure_dial_info_is_valid(RoutingDomain::PublicInternet, &di)
|
||||
{
|
||||
routing_table.register_dial_info(
|
||||
RoutingDomain::PublicInternet,
|
||||
di.clone(),
|
||||
DialInfoClass::Direct,
|
||||
)?;
|
||||
editor_public_internet.register_dial_info(di.clone(), DialInfoClass::Direct)?;
|
||||
static_public = true;
|
||||
}
|
||||
|
||||
// Register interface dial info as well since the address is on the local interface
|
||||
routing_table.register_dial_info(
|
||||
RoutingDomain::LocalNetwork,
|
||||
di.clone(),
|
||||
DialInfoClass::Direct,
|
||||
)?;
|
||||
editor_local_network.register_dial_info(di.clone(), DialInfoClass::Direct)?;
|
||||
}
|
||||
|
||||
// Add static public dialinfo if it's configured
|
||||
@ -323,11 +318,8 @@ impl Network {
|
||||
|
||||
// Register the public address
|
||||
if !detect_address_changes {
|
||||
routing_table.register_dial_info(
|
||||
RoutingDomain::PublicInternet,
|
||||
pdi.clone(),
|
||||
DialInfoClass::Direct,
|
||||
)?;
|
||||
editor_public_internet
|
||||
.register_dial_info(pdi.clone(), DialInfoClass::Direct)?;
|
||||
static_public = true;
|
||||
}
|
||||
|
||||
@ -342,8 +334,7 @@ impl Network {
|
||||
})();
|
||||
|
||||
if !local_dial_info_list.contains(&pdi) && is_interface_address {
|
||||
routing_table.register_dial_info(
|
||||
RoutingDomain::LocalNetwork,
|
||||
editor_local_network.register_dial_info(
|
||||
DialInfo::udp_from_socketaddr(pdi_addr),
|
||||
DialInfoClass::Direct,
|
||||
)?;
|
||||
@ -362,16 +353,19 @@ impl Network {
|
||||
self.create_udp_listener_tasks().await
|
||||
}
|
||||
|
||||
pub(super) async fn start_ws_listeners(&self) -> EyreResult<()> {
|
||||
pub(super) async fn start_ws_listeners(
|
||||
&self,
|
||||
editor_public_internet: &mut RoutingDomainEditor,
|
||||
editor_local_network: &mut RoutingDomainEditor,
|
||||
) -> EyreResult<()> {
|
||||
trace!("starting ws listeners");
|
||||
let routing_table = self.routing_table();
|
||||
let (listen_address, url, path, enable_local_peer_scope, detect_address_changes) = {
|
||||
let (listen_address, url, path, detect_address_changes) = {
|
||||
let c = self.config.get();
|
||||
(
|
||||
c.network.protocol.ws.listen_address.clone(),
|
||||
c.network.protocol.ws.url.clone(),
|
||||
c.network.protocol.ws.path.clone(),
|
||||
c.network.enable_local_peer_scope,
|
||||
c.network.detect_address_changes,
|
||||
)
|
||||
};
|
||||
@ -420,11 +414,8 @@ impl Network {
|
||||
.wrap_err("try_ws failed")?;
|
||||
|
||||
if !detect_address_changes {
|
||||
routing_table.register_dial_info(
|
||||
RoutingDomain::PublicInternet,
|
||||
pdi.clone(),
|
||||
DialInfoClass::Direct,
|
||||
)?;
|
||||
editor_public_internet
|
||||
.register_dial_info(pdi.clone(), DialInfoClass::Direct)?;
|
||||
static_public = true;
|
||||
}
|
||||
|
||||
@ -432,11 +423,7 @@ impl Network {
|
||||
if !registered_addresses.contains(&gsa.ip())
|
||||
&& self.is_usable_interface_address(gsa.ip())
|
||||
{
|
||||
routing_table.register_dial_info(
|
||||
RoutingDomain::LocalNetwork,
|
||||
pdi,
|
||||
DialInfoClass::Direct,
|
||||
)?;
|
||||
editor_local_network.register_dial_info(pdi, DialInfoClass::Direct)?;
|
||||
}
|
||||
|
||||
registered_addresses.insert(gsa.ip());
|
||||
@ -454,23 +441,16 @@ impl Network {
|
||||
|
||||
if !detect_address_changes
|
||||
&& url.is_none()
|
||||
&& (socket_address.address().is_global() || enable_local_peer_scope)
|
||||
&& routing_table.ensure_dial_info_is_valid(RoutingDomain::PublicInternet, &local_di)
|
||||
{
|
||||
// Register public dial info
|
||||
routing_table.register_dial_info(
|
||||
RoutingDomain::PublicInternet,
|
||||
local_di.clone(),
|
||||
DialInfoClass::Direct,
|
||||
)?;
|
||||
editor_public_internet
|
||||
.register_dial_info(local_di.clone(), DialInfoClass::Direct)?;
|
||||
static_public = true;
|
||||
}
|
||||
|
||||
// Register local dial info
|
||||
routing_table.register_dial_info(
|
||||
RoutingDomain::LocalNetwork,
|
||||
local_di,
|
||||
DialInfoClass::Direct,
|
||||
)?;
|
||||
editor_local_network.register_dial_info(local_di, DialInfoClass::Direct)?;
|
||||
}
|
||||
|
||||
if static_public {
|
||||
@ -483,10 +463,13 @@ impl Network {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(super) async fn start_wss_listeners(&self) -> EyreResult<()> {
|
||||
pub(super) async fn start_wss_listeners(
|
||||
&self,
|
||||
editor_public_internet: &mut RoutingDomainEditor,
|
||||
editor_local_network: &mut RoutingDomainEditor,
|
||||
) -> EyreResult<()> {
|
||||
trace!("starting wss listeners");
|
||||
|
||||
let routing_table = self.routing_table();
|
||||
let (listen_address, url, detect_address_changes) = {
|
||||
let c = self.config.get();
|
||||
(
|
||||
@ -545,11 +528,8 @@ impl Network {
|
||||
.wrap_err("try_wss failed")?;
|
||||
|
||||
if !detect_address_changes {
|
||||
routing_table.register_dial_info(
|
||||
RoutingDomain::PublicInternet,
|
||||
pdi.clone(),
|
||||
DialInfoClass::Direct,
|
||||
)?;
|
||||
editor_public_internet
|
||||
.register_dial_info(pdi.clone(), DialInfoClass::Direct)?;
|
||||
static_public = true;
|
||||
}
|
||||
|
||||
@ -557,11 +537,7 @@ impl Network {
|
||||
if !registered_addresses.contains(&gsa.ip())
|
||||
&& self.is_usable_interface_address(gsa.ip())
|
||||
{
|
||||
routing_table.register_dial_info(
|
||||
RoutingDomain::LocalNetwork,
|
||||
pdi,
|
||||
DialInfoClass::Direct,
|
||||
)?;
|
||||
editor_local_network.register_dial_info(pdi, DialInfoClass::Direct)?;
|
||||
}
|
||||
|
||||
registered_addresses.insert(gsa.ip());
|
||||
@ -580,16 +556,19 @@ impl Network {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(super) async fn start_tcp_listeners(&self) -> EyreResult<()> {
|
||||
pub(super) async fn start_tcp_listeners(
|
||||
&self,
|
||||
editor_public_internet: &mut RoutingDomainEditor,
|
||||
editor_local_network: &mut RoutingDomainEditor,
|
||||
) -> EyreResult<()> {
|
||||
trace!("starting tcp listeners");
|
||||
|
||||
let routing_table = self.routing_table();
|
||||
let (listen_address, public_address, enable_local_peer_scope, detect_address_changes) = {
|
||||
let (listen_address, public_address, detect_address_changes) = {
|
||||
let c = self.config.get();
|
||||
(
|
||||
c.network.protocol.tcp.listen_address.clone(),
|
||||
c.network.protocol.tcp.public_address.clone(),
|
||||
c.network.enable_local_peer_scope,
|
||||
c.network.detect_address_changes,
|
||||
)
|
||||
};
|
||||
@ -625,21 +604,13 @@ impl Network {
|
||||
// Register global dial info if no public address is specified
|
||||
if !detect_address_changes
|
||||
&& public_address.is_none()
|
||||
&& (di.is_global() || enable_local_peer_scope)
|
||||
&& routing_table.ensure_dial_info_is_valid(RoutingDomain::PublicInternet, &di)
|
||||
{
|
||||
routing_table.register_dial_info(
|
||||
RoutingDomain::PublicInternet,
|
||||
di.clone(),
|
||||
DialInfoClass::Direct,
|
||||
)?;
|
||||
editor_public_internet.register_dial_info(di.clone(), DialInfoClass::Direct)?;
|
||||
static_public = true;
|
||||
}
|
||||
// Register interface dial info
|
||||
routing_table.register_dial_info(
|
||||
RoutingDomain::LocalNetwork,
|
||||
di.clone(),
|
||||
DialInfoClass::Direct,
|
||||
)?;
|
||||
editor_local_network.register_dial_info(di.clone(), DialInfoClass::Direct)?;
|
||||
registered_addresses.insert(socket_address.to_ip_addr());
|
||||
}
|
||||
|
||||
@ -659,21 +630,14 @@ impl Network {
|
||||
let pdi = DialInfo::tcp_from_socketaddr(pdi_addr);
|
||||
|
||||
if !detect_address_changes {
|
||||
routing_table.register_dial_info(
|
||||
RoutingDomain::PublicInternet,
|
||||
pdi.clone(),
|
||||
DialInfoClass::Direct,
|
||||
)?;
|
||||
editor_public_internet
|
||||
.register_dial_info(pdi.clone(), DialInfoClass::Direct)?;
|
||||
static_public = true;
|
||||
}
|
||||
|
||||
// See if this public address is also a local interface address
|
||||
if self.is_usable_interface_address(pdi_addr.ip()) {
|
||||
routing_table.register_dial_info(
|
||||
RoutingDomain::LocalNetwork,
|
||||
pdi,
|
||||
DialInfoClass::Direct,
|
||||
)?;
|
||||
editor_local_network.register_dial_info(pdi, DialInfoClass::Direct)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
use super::*;
|
||||
use futures_util::{FutureExt, StreamExt};
|
||||
use std::io;
|
||||
use std::{io, sync::Arc};
|
||||
use stop_token::prelude::*;
|
||||
|
||||
cfg_if::cfg_if! {
|
||||
@ -81,8 +81,12 @@ pub struct NetworkConnectionStats {
|
||||
last_message_recv_time: Option<u64>,
|
||||
}
|
||||
|
||||
|
||||
pub type NetworkConnectionId = u64;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct NetworkConnection {
|
||||
connection_id: NetworkConnectionId,
|
||||
descriptor: ConnectionDescriptor,
|
||||
processor: Option<MustJoinHandle<()>>,
|
||||
established_time: u64,
|
||||
@ -92,11 +96,12 @@ pub struct NetworkConnection {
|
||||
}
|
||||
|
||||
impl NetworkConnection {
|
||||
pub(super) fn dummy(descriptor: ConnectionDescriptor) -> Self {
|
||||
pub(super) fn dummy(id: NetworkConnectionId, descriptor: ConnectionDescriptor) -> Self {
|
||||
// Create handle for sending (dummy is immediately disconnected)
|
||||
let (sender, _receiver) = flume::bounded(intf::get_concurrency() as usize);
|
||||
|
||||
Self {
|
||||
connection_id: id,
|
||||
descriptor,
|
||||
processor: None,
|
||||
established_time: intf::get_timestamp(),
|
||||
@ -113,14 +118,10 @@ impl NetworkConnection {
|
||||
connection_manager: ConnectionManager,
|
||||
manager_stop_token: StopToken,
|
||||
protocol_connection: ProtocolNetworkConnection,
|
||||
connection_id: NetworkConnectionId,
|
||||
) -> Self {
|
||||
// Get timeout
|
||||
let network_manager = connection_manager.network_manager();
|
||||
let inactivity_timeout = network_manager
|
||||
.config()
|
||||
.get()
|
||||
.network
|
||||
.connection_inactivity_timeout_ms;
|
||||
|
||||
// Get descriptor
|
||||
let descriptor = protocol_connection.descriptor();
|
||||
@ -142,15 +143,16 @@ impl NetworkConnection {
|
||||
connection_manager,
|
||||
local_stop_token,
|
||||
manager_stop_token,
|
||||
connection_id,
|
||||
descriptor.clone(),
|
||||
receiver,
|
||||
protocol_connection,
|
||||
inactivity_timeout,
|
||||
stats.clone(),
|
||||
));
|
||||
|
||||
// Return the connection
|
||||
Self {
|
||||
connection_id,
|
||||
descriptor,
|
||||
processor: Some(processor),
|
||||
established_time: intf::get_timestamp(),
|
||||
@ -160,12 +162,16 @@ impl NetworkConnection {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn connection_id(&self) -> NetworkConnectionId {
|
||||
self.connection_id
|
||||
}
|
||||
|
||||
pub fn connection_descriptor(&self) -> ConnectionDescriptor {
|
||||
self.descriptor.clone()
|
||||
}
|
||||
|
||||
pub fn get_handle(&self) -> ConnectionHandle {
|
||||
ConnectionHandle::new(self.descriptor.clone(), self.sender.clone())
|
||||
ConnectionHandle::new(self.connection_id, self.descriptor.clone(), self.sender.clone())
|
||||
}
|
||||
|
||||
pub fn close(&mut self) {
|
||||
@ -215,15 +221,15 @@ impl NetworkConnection {
|
||||
connection_manager: ConnectionManager,
|
||||
local_stop_token: StopToken,
|
||||
manager_stop_token: StopToken,
|
||||
connection_id: NetworkConnectionId,
|
||||
descriptor: ConnectionDescriptor,
|
||||
receiver: flume::Receiver<Vec<u8>>,
|
||||
protocol_connection: ProtocolNetworkConnection,
|
||||
connection_inactivity_timeout_ms: u32,
|
||||
stats: Arc<Mutex<NetworkConnectionStats>>,
|
||||
) -> SendPinBoxFuture<()> {
|
||||
Box::pin(async move {
|
||||
log_net!(
|
||||
"== Starting process_connection loop for {:?}",
|
||||
"== Starting process_connection loop for id={}, {:?}", connection_id,
|
||||
descriptor.green()
|
||||
);
|
||||
|
||||
@ -235,7 +241,7 @@ impl NetworkConnection {
|
||||
// Push mutable timer so we can reset it
|
||||
// Normally we would use an io::timeout here, but WASM won't support that, so we use a mutable sleep future
|
||||
let new_timer = || {
|
||||
intf::sleep(connection_inactivity_timeout_ms).then(|_| async {
|
||||
intf::sleep(connection_manager.connection_inactivity_timeout_ms()).then(|_| async {
|
||||
// timeout
|
||||
log_net!("== Connection timeout on {:?}", descriptor.green());
|
||||
RecvLoopAction::Timeout
|
||||
@ -317,7 +323,7 @@ impl NetworkConnection {
|
||||
.timeout_at(local_stop_token.clone())
|
||||
.timeout_at(manager_stop_token.clone())
|
||||
.await
|
||||
.and_then(std::convert::identity) // flatten
|
||||
.and_then(std::convert::identity) // flatten stoptoken timeouts
|
||||
{
|
||||
Ok(Some(RecvLoopAction::Send)) => {
|
||||
// Don't reset inactivity timer if we're only sending
|
||||
@ -350,7 +356,7 @@ impl NetworkConnection {
|
||||
|
||||
// Let the connection manager know the receive loop exited
|
||||
connection_manager
|
||||
.report_connection_finished(descriptor)
|
||||
.report_connection_finished(connection_id)
|
||||
.await;
|
||||
})
|
||||
}
|
||||
|
@ -181,15 +181,22 @@ impl NetworkManager {
|
||||
let routing_table = self.routing_table();
|
||||
|
||||
for bootstrap_di in bootstrap_dialinfos {
|
||||
log_net!(debug "direct bootstrap with: {}", bootstrap_di);
|
||||
|
||||
let peer_info = self.boot_request(bootstrap_di).await?;
|
||||
|
||||
log_net!(debug " direct bootstrap peerinfo: {:?}", peer_info);
|
||||
|
||||
// Got peer info, let's add it to the routing table
|
||||
for pi in peer_info {
|
||||
let k = pi.node_id.key;
|
||||
// Register the node
|
||||
if let Some(nr) =
|
||||
routing_table.register_node_with_signed_node_info(k, pi.signed_node_info, false)
|
||||
{
|
||||
if let Some(nr) = routing_table.register_node_with_signed_node_info(
|
||||
RoutingDomain::PublicInternet,
|
||||
k,
|
||||
pi.signed_node_info,
|
||||
false,
|
||||
) {
|
||||
// Add this our futures to process in parallel
|
||||
let routing_table = routing_table.clone();
|
||||
unord.push(
|
||||
@ -278,6 +285,7 @@ impl NetworkManager {
|
||||
|
||||
// Make invalid signed node info (no signature)
|
||||
if let Some(nr) = routing_table.register_node_with_signed_node_info(
|
||||
RoutingDomain::PublicInternet,
|
||||
k,
|
||||
SignedNodeInfo::with_no_signature(NodeInfo {
|
||||
network_class: NetworkClass::InboundCapable, // Bootstraps are always inbound capable
|
||||
@ -298,7 +306,7 @@ impl NetworkManager {
|
||||
let _ = routing_table.find_target(nr.clone()).await;
|
||||
|
||||
// Ensure we got the signed peer info
|
||||
if !nr.operate(|e| e.has_valid_signed_node_info()) {
|
||||
if !nr.signed_node_info_has_valid_signature(RoutingDomain::PublicInternet) {
|
||||
log_net!(warn
|
||||
"bootstrap at {:?} did not return valid signed node info",
|
||||
nr
|
||||
@ -320,28 +328,37 @@ impl NetworkManager {
|
||||
// Ping each node in the routing table if they need to be pinged
|
||||
// to determine their reliability
|
||||
#[instrument(level = "trace", skip(self), err)]
|
||||
pub(super) async fn ping_validator_task_routine(
|
||||
self,
|
||||
stop_token: StopToken,
|
||||
_last_ts: u64,
|
||||
fn ping_validator_public_internet(
|
||||
&self,
|
||||
cur_ts: u64,
|
||||
unord: &mut FuturesUnordered<
|
||||
SendPinBoxFuture<Result<NetworkResult<Answer<SenderInfo>>, RPCError>>,
|
||||
>,
|
||||
) -> EyreResult<()> {
|
||||
let rpc = self.rpc_processor();
|
||||
let routing_table = self.routing_table();
|
||||
|
||||
let relay_node_id = self.relay_node().map(|nr| nr.node_id());
|
||||
let dids = routing_table.all_filtered_dial_info_details(
|
||||
Some(RoutingDomain::PublicInternet),
|
||||
&DialInfoFilter::global(),
|
||||
);
|
||||
let mut unord = FuturesUnordered::new();
|
||||
// Get all nodes needing pings in the PublicInternet routing domain
|
||||
let node_refs = routing_table.get_nodes_needing_ping(RoutingDomain::PublicInternet, cur_ts);
|
||||
|
||||
let node_refs = routing_table.get_nodes_needing_ping(cur_ts, relay_node_id);
|
||||
// Look up any NAT mappings we may need to try to preserve with keepalives
|
||||
let mut mapped_port_info = routing_table.get_mapped_port_info();
|
||||
|
||||
// Get the PublicInternet relay if we are using one
|
||||
let opt_relay_nr = routing_table.relay_node(RoutingDomain::PublicInternet);
|
||||
let opt_relay_id = opt_relay_nr.map(|nr| nr.node_id());
|
||||
|
||||
// Get our publicinternet dial info
|
||||
let dids = routing_table.all_filtered_dial_info_details(
|
||||
RoutingDomain::PublicInternet.into(),
|
||||
&DialInfoFilter::all(),
|
||||
);
|
||||
|
||||
// For all nodes needing pings, figure out how many and over what protocols
|
||||
for nr in node_refs {
|
||||
let rpc = rpc.clone();
|
||||
if Some(nr.node_id()) == relay_node_id {
|
||||
// If this is a relay, let's check for NAT keepalives
|
||||
let mut did_pings = false;
|
||||
if Some(nr.node_id()) == opt_relay_id {
|
||||
// Relay nodes get pinged over all protocols we have inbound dialinfo for
|
||||
// This is so we can preserve the inbound NAT mappings at our router
|
||||
for did in &dids {
|
||||
@ -361,19 +378,72 @@ impl NetworkManager {
|
||||
};
|
||||
if needs_ping {
|
||||
let rpc = rpc.clone();
|
||||
let dif = did.dial_info.make_filter(true);
|
||||
let nr_filtered = nr.filtered_clone(dif);
|
||||
let dif = did.dial_info.make_filter();
|
||||
let nr_filtered =
|
||||
nr.filtered_clone(NodeRefFilter::new().with_dial_info_filter(dif));
|
||||
log_net!("--> Keepalive ping to {:?}", nr_filtered);
|
||||
unord.push(async move { rpc.rpc_call_status(nr_filtered).await }.boxed());
|
||||
did_pings = true;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Just do a single ping with the best protocol for all the other nodes
|
||||
}
|
||||
// Just do a single ping with the best protocol for all the other nodes,
|
||||
// ensuring that we at least ping a relay with -something- even if we didnt have
|
||||
// any mapped ports to preserve
|
||||
if !did_pings {
|
||||
let rpc = rpc.clone();
|
||||
unord.push(async move { rpc.rpc_call_status(nr).await }.boxed());
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for futures to complete
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Ping each node in the LocalNetwork routing domain if they
|
||||
// need to be pinged to determine their reliability
|
||||
#[instrument(level = "trace", skip(self), err)]
|
||||
fn ping_validator_local_network(
|
||||
&self,
|
||||
cur_ts: u64,
|
||||
unord: &mut FuturesUnordered<
|
||||
SendPinBoxFuture<Result<NetworkResult<Answer<SenderInfo>>, RPCError>>,
|
||||
>,
|
||||
) -> EyreResult<()> {
|
||||
let rpc = self.rpc_processor();
|
||||
let routing_table = self.routing_table();
|
||||
|
||||
// Get all nodes needing pings in the LocalNetwork routing domain
|
||||
let node_refs = routing_table.get_nodes_needing_ping(RoutingDomain::LocalNetwork, cur_ts);
|
||||
|
||||
// For all nodes needing pings, figure out how many and over what protocols
|
||||
for nr in node_refs {
|
||||
let rpc = rpc.clone();
|
||||
|
||||
// Just do a single ping with the best protocol for all the nodes
|
||||
unord.push(async move { rpc.rpc_call_status(nr).await }.boxed());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Ping each node in the routing table if they need to be pinged
|
||||
// to determine their reliability
|
||||
#[instrument(level = "trace", skip(self), err)]
|
||||
pub(super) async fn ping_validator_task_routine(
|
||||
self,
|
||||
stop_token: StopToken,
|
||||
_last_ts: u64,
|
||||
cur_ts: u64,
|
||||
) -> EyreResult<()> {
|
||||
let mut unord = FuturesUnordered::new();
|
||||
|
||||
// PublicInternet
|
||||
self.ping_validator_public_internet(cur_ts, &mut unord)?;
|
||||
|
||||
// LocalNetwork
|
||||
self.ping_validator_local_network(cur_ts, &mut unord)?;
|
||||
|
||||
// Wait for ping futures to complete in parallel
|
||||
while let Ok(Some(_)) = unord.next().timeout_at(stop_token.clone()).await {}
|
||||
|
||||
Ok(())
|
||||
@ -381,25 +451,39 @@ impl NetworkManager {
|
||||
|
||||
// Ask our remaining peers to give us more peers before we go
|
||||
// back to the bootstrap servers to keep us from bothering them too much
|
||||
// This only adds PublicInternet routing domain peers. The discovery
|
||||
// mechanism for LocalNetwork suffices for locating all the local network
|
||||
// peers that are available. This, however, may query other LocalNetwork
|
||||
// nodes for their PublicInternet peers, which is a very fast way to get
|
||||
// a new node online.
|
||||
#[instrument(level = "trace", skip(self), err)]
|
||||
pub(super) async fn peer_minimum_refresh_task_routine(
|
||||
self,
|
||||
stop_token: StopToken,
|
||||
) -> EyreResult<()> {
|
||||
let routing_table = self.routing_table();
|
||||
let cur_ts = intf::get_timestamp();
|
||||
let mut ord = FuturesOrdered::new();
|
||||
let min_peer_count = {
|
||||
let c = self.config.get();
|
||||
c.network.dht.min_peer_count as usize
|
||||
};
|
||||
|
||||
// get list of all peers we know about, even the unreliable ones, and ask them to find nodes close to our node too
|
||||
let noderefs = routing_table.get_all_nodes(cur_ts);
|
||||
|
||||
// do peer minimum search concurrently
|
||||
let mut unord = FuturesUnordered::new();
|
||||
// For the PublicInternet routing domain, get list of all peers we know about
|
||||
// even the unreliable ones, and ask them to find nodes close to our node too
|
||||
let noderefs = routing_table.find_fastest_nodes(
|
||||
min_peer_count,
|
||||
|_k, _v| true,
|
||||
|k: DHTKey, v: Option<Arc<BucketEntry>>| {
|
||||
NodeRef::new(routing_table.clone(), k, v.unwrap().clone(), None)
|
||||
},
|
||||
);
|
||||
for nr in noderefs {
|
||||
log_net!("--- peer minimum search with {:?}", nr);
|
||||
let routing_table = routing_table.clone();
|
||||
unord.push(async move { routing_table.reverse_find_node(nr, false).await });
|
||||
ord.push_back(async move { routing_table.reverse_find_node(nr, false).await });
|
||||
}
|
||||
while let Ok(Some(_)) = unord.next().timeout_at(stop_token.clone()).await {}
|
||||
|
||||
// do peer minimum search in order from fastest to slowest
|
||||
while let Ok(Some(_)) = ord.next().timeout_at(stop_token.clone()).await {}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -414,30 +498,29 @@ impl NetworkManager {
|
||||
) -> EyreResult<()> {
|
||||
// Get our node's current node info and network class and do the right thing
|
||||
let routing_table = self.routing_table();
|
||||
let node_info = routing_table.get_own_node_info();
|
||||
let network_class = self.get_network_class();
|
||||
let mut node_info_changed = false;
|
||||
let node_info = routing_table.get_own_node_info(RoutingDomain::PublicInternet);
|
||||
let network_class = self.get_network_class(RoutingDomain::PublicInternet);
|
||||
|
||||
// Get routing domain editor
|
||||
let mut editor = routing_table.edit_routing_domain(RoutingDomain::PublicInternet);
|
||||
|
||||
// Do we know our network class yet?
|
||||
if let Some(network_class) = network_class {
|
||||
// If we already have a relay, see if it is dead, or if we don't need it any more
|
||||
let has_relay = {
|
||||
let mut inner = self.inner.lock();
|
||||
if let Some(relay_node) = inner.relay_node.clone() {
|
||||
let state = relay_node.operate(|e| e.state(cur_ts));
|
||||
if let Some(relay_node) = routing_table.relay_node(RoutingDomain::PublicInternet) {
|
||||
let state = relay_node.state(cur_ts);
|
||||
// Relay node is dead or no longer needed
|
||||
if matches!(state, BucketEntryState::Dead) {
|
||||
info!("Relay node died, dropping relay {}", relay_node);
|
||||
inner.relay_node = None;
|
||||
node_info_changed = true;
|
||||
editor.clear_relay_node();
|
||||
false
|
||||
} else if !node_info.requires_relay() {
|
||||
info!(
|
||||
"Relay node no longer required, dropping relay {}",
|
||||
relay_node
|
||||
);
|
||||
inner.relay_node = None;
|
||||
node_info_changed = true;
|
||||
editor.clear_relay_node();
|
||||
false
|
||||
} else {
|
||||
true
|
||||
@ -453,36 +536,32 @@ impl NetworkManager {
|
||||
if network_class.outbound_wants_relay() {
|
||||
// The outbound relay is the host of the PWA
|
||||
if let Some(outbound_relay_peerinfo) = intf::get_outbound_relay_peer().await {
|
||||
let mut inner = self.inner.lock();
|
||||
|
||||
// Register new outbound relay
|
||||
if let Some(nr) = routing_table.register_node_with_signed_node_info(
|
||||
RoutingDomain::PublicInternet,
|
||||
outbound_relay_peerinfo.node_id.key,
|
||||
outbound_relay_peerinfo.signed_node_info,
|
||||
false,
|
||||
) {
|
||||
info!("Outbound relay node selected: {}", nr);
|
||||
inner.relay_node = Some(nr);
|
||||
node_info_changed = true;
|
||||
editor.set_relay_node(nr);
|
||||
}
|
||||
}
|
||||
// Otherwise we must need an inbound relay
|
||||
} else {
|
||||
// Find a node in our routing table that is an acceptable inbound relay
|
||||
if let Some(nr) = routing_table.find_inbound_relay(cur_ts) {
|
||||
let mut inner = self.inner.lock();
|
||||
if let Some(nr) =
|
||||
routing_table.find_inbound_relay(RoutingDomain::PublicInternet, cur_ts)
|
||||
{
|
||||
info!("Inbound relay node selected: {}", nr);
|
||||
inner.relay_node = Some(nr);
|
||||
node_info_changed = true;
|
||||
editor.set_relay_node(nr);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Re-send our node info if we selected a relay
|
||||
if node_info_changed {
|
||||
self.send_node_info_updates(true).await;
|
||||
}
|
||||
// Commit the changes
|
||||
editor.commit().await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -7,13 +7,12 @@ use crate::*;
|
||||
pub async fn test_add_get_remove() {
|
||||
let config = get_config();
|
||||
|
||||
let mut table = ConnectionTable::new(config);
|
||||
let table = ConnectionTable::new(config);
|
||||
|
||||
let a1 = ConnectionDescriptor::new_no_local(PeerAddress::new(
|
||||
SocketAddress::new(Address::IPV4(Ipv4Addr::new(192, 168, 0, 1)), 8080),
|
||||
ProtocolType::TCP,
|
||||
))
|
||||
.unwrap();
|
||||
));
|
||||
let a2 = a1;
|
||||
let a3 = ConnectionDescriptor::new(
|
||||
PeerAddress::new(
|
||||
@ -26,8 +25,7 @@ pub async fn test_add_get_remove() {
|
||||
0,
|
||||
0,
|
||||
))),
|
||||
)
|
||||
.unwrap();
|
||||
);
|
||||
let a4 = ConnectionDescriptor::new(
|
||||
PeerAddress::new(
|
||||
SocketAddress::new(Address::IPV6(Ipv6Addr::new(192, 0, 0, 0, 0, 0, 0, 1)), 8090),
|
||||
@ -39,8 +37,7 @@ pub async fn test_add_get_remove() {
|
||||
0,
|
||||
0,
|
||||
))),
|
||||
)
|
||||
.unwrap();
|
||||
);
|
||||
let a5 = ConnectionDescriptor::new(
|
||||
PeerAddress::new(
|
||||
SocketAddress::new(Address::IPV6(Ipv6Addr::new(192, 0, 0, 0, 0, 0, 0, 1)), 8090),
|
||||
@ -52,79 +49,72 @@ pub async fn test_add_get_remove() {
|
||||
0,
|
||||
0,
|
||||
))),
|
||||
)
|
||||
.unwrap();
|
||||
);
|
||||
|
||||
let c1 = NetworkConnection::dummy(a1);
|
||||
let c1 = NetworkConnection::dummy(1, a1);
|
||||
let c1h = c1.get_handle();
|
||||
let c2 = NetworkConnection::dummy(a2);
|
||||
//let c2h = c2.get_handle();
|
||||
let c3 = NetworkConnection::dummy(a3);
|
||||
//let c3h = c3.get_handle();
|
||||
let c4 = NetworkConnection::dummy(a4);
|
||||
//let c4h = c4.get_handle();
|
||||
let c5 = NetworkConnection::dummy(a5);
|
||||
//let c5h = c5.get_handle();
|
||||
let c2 = NetworkConnection::dummy(2, a2);
|
||||
let c3 = NetworkConnection::dummy(3, a3);
|
||||
let c4 = NetworkConnection::dummy(4, a4);
|
||||
let c5 = NetworkConnection::dummy(5, a5);
|
||||
|
||||
assert_eq!(a1, c2.connection_descriptor());
|
||||
assert_ne!(a3, c4.connection_descriptor());
|
||||
assert_ne!(a4, c5.connection_descriptor());
|
||||
|
||||
assert_eq!(table.connection_count(), 0);
|
||||
assert_eq!(table.get_connection(a1), None);
|
||||
assert_eq!(table.get_connection_by_descriptor(a1), None);
|
||||
table.add_connection(c1).unwrap();
|
||||
|
||||
assert_eq!(table.connection_count(), 1);
|
||||
assert_err!(table.remove_connection(a3));
|
||||
assert_err!(table.remove_connection(a4));
|
||||
assert!(table.remove_connection_by_id(4).is_none());
|
||||
assert!(table.remove_connection_by_id(5).is_none());
|
||||
assert_eq!(table.connection_count(), 1);
|
||||
assert_eq!(table.get_connection(a1), Some(c1h.clone()));
|
||||
assert_eq!(table.get_connection(a1), Some(c1h.clone()));
|
||||
assert_eq!(table.get_connection_by_descriptor(a1), Some(c1h.clone()));
|
||||
assert_eq!(table.get_connection_by_descriptor(a1), Some(c1h.clone()));
|
||||
assert_eq!(table.connection_count(), 1);
|
||||
assert_err!(table.add_connection(c2));
|
||||
assert_eq!(table.connection_count(), 1);
|
||||
assert_eq!(table.get_connection(a1), Some(c1h.clone()));
|
||||
assert_eq!(table.get_connection(a1), Some(c1h.clone()));
|
||||
assert_eq!(table.get_connection_by_descriptor(a1), Some(c1h.clone()));
|
||||
assert_eq!(table.get_connection_by_descriptor(a1), Some(c1h.clone()));
|
||||
assert_eq!(table.connection_count(), 1);
|
||||
assert_eq!(
|
||||
table
|
||||
.remove_connection(a2)
|
||||
.remove_connection_by_id(1)
|
||||
.map(|c| c.connection_descriptor())
|
||||
.unwrap(),
|
||||
a1
|
||||
);
|
||||
assert_eq!(table.connection_count(), 0);
|
||||
assert_err!(table.remove_connection(a2));
|
||||
assert!(table.remove_connection_by_id(2).is_none());
|
||||
assert_eq!(table.connection_count(), 0);
|
||||
assert_eq!(table.get_connection(a2), None);
|
||||
assert_eq!(table.get_connection(a1), None);
|
||||
assert_eq!(table.get_connection_by_descriptor(a2), None);
|
||||
assert_eq!(table.get_connection_by_descriptor(a1), None);
|
||||
assert_eq!(table.connection_count(), 0);
|
||||
let c1 = NetworkConnection::dummy(a1);
|
||||
//let c1h = c1.get_handle();
|
||||
let c1 = NetworkConnection::dummy(6, a1);
|
||||
table.add_connection(c1).unwrap();
|
||||
let c2 = NetworkConnection::dummy(a2);
|
||||
//let c2h = c2.get_handle();
|
||||
let c2 = NetworkConnection::dummy(7, a2);
|
||||
assert_err!(table.add_connection(c2));
|
||||
table.add_connection(c3).unwrap();
|
||||
table.add_connection(c4).unwrap();
|
||||
assert_eq!(table.connection_count(), 3);
|
||||
assert_eq!(
|
||||
table
|
||||
.remove_connection(a2)
|
||||
.remove_connection_by_id(6)
|
||||
.map(|c| c.connection_descriptor())
|
||||
.unwrap(),
|
||||
a2
|
||||
);
|
||||
assert_eq!(
|
||||
table
|
||||
.remove_connection(a3)
|
||||
.remove_connection_by_id(3)
|
||||
.map(|c| c.connection_descriptor())
|
||||
.unwrap(),
|
||||
a3
|
||||
);
|
||||
assert_eq!(
|
||||
table
|
||||
.remove_connection(a4)
|
||||
.remove_connection_by_id(4)
|
||||
.map(|c| c.connection_descriptor())
|
||||
.unwrap(),
|
||||
a4
|
||||
|
@ -160,7 +160,7 @@ impl Network {
|
||||
// Handle connection-oriented protocols
|
||||
|
||||
// Try to send to the exact existing connection if one exists
|
||||
if let Some(conn) = self.connection_manager().get_connection(descriptor).await {
|
||||
if let Some(conn) = self.connection_manager().get_connection(descriptor) {
|
||||
// connection exists, send over it
|
||||
match conn.send_async(data).await {
|
||||
ConnectionHandleSendResult::Sent => {
|
||||
@ -292,11 +292,15 @@ impl Network {
|
||||
|
||||
//////////////////////////////////////////
|
||||
|
||||
pub fn set_needs_public_dial_info_check(&self) {
|
||||
pub fn set_needs_public_dial_info_check(&self, _punishment: Option<Box<dyn FnOnce() + Send + 'static>>) {
|
||||
//
|
||||
}
|
||||
|
||||
pub fn get_network_class(&self) -> Option<NetworkClass> {
|
||||
pub fn doing_public_dial_info_check(&self) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
pub fn get_network_class(&self, _routing_domain: RoutingDomain) -> Option<NetworkClass> {
|
||||
// xxx eventually detect tor browser?
|
||||
return if self.inner.lock().network_started {
|
||||
Some(NetworkClass::WebApp)
|
||||
|
@ -134,8 +134,7 @@ impl WebsocketProtocolHandler {
|
||||
|
||||
// Make our connection descriptor
|
||||
let wnc = WebsocketNetworkConnection::new(
|
||||
ConnectionDescriptor::new_no_local(dial_info.to_peer_address())
|
||||
.map_err(|e| io::Error::new(io::ErrorKind::AddrNotAvailable, e))?,
|
||||
ConnectionDescriptor::new_no_local(dial_info.to_peer_address()),
|
||||
wsmeta,
|
||||
wsio,
|
||||
);
|
||||
|
@ -39,16 +39,37 @@ pub enum BucketEntryState {
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Ord, Hash)]
|
||||
struct LastConnectionKey(PeerScope, ProtocolType, AddressType);
|
||||
struct LastConnectionKey(ProtocolType, AddressType);
|
||||
|
||||
/// Bucket entry information specific to the LocalNetwork RoutingDomain
|
||||
#[derive(Debug)]
|
||||
pub struct BucketEntryPublicInternet {
|
||||
/// The PublicInternet node info
|
||||
signed_node_info: Option<Box<SignedNodeInfo>>,
|
||||
/// If this node has seen our publicinternet node info
|
||||
seen_our_node_info: bool,
|
||||
/// Last known node status
|
||||
node_status: Option<PublicInternetNodeStatus>,
|
||||
}
|
||||
|
||||
/// Bucket entry information specific to the LocalNetwork RoutingDomain
|
||||
#[derive(Debug)]
|
||||
pub struct BucketEntryLocalNetwork {
|
||||
/// The LocalNetwork node info
|
||||
signed_node_info: Option<Box<SignedNodeInfo>>,
|
||||
/// If this node has seen our localnetwork node info
|
||||
seen_our_node_info: bool,
|
||||
/// Last known node status
|
||||
node_status: Option<LocalNetworkNodeStatus>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct BucketEntryInner {
|
||||
min_max_version: Option<(u8, u8)>,
|
||||
seen_our_node_info: bool,
|
||||
updated_since_last_network_change: bool,
|
||||
last_connections: BTreeMap<LastConnectionKey, (ConnectionDescriptor, u64)>,
|
||||
opt_signed_node_info: Option<SignedNodeInfo>,
|
||||
opt_local_node_info: Option<LocalNodeInfo>,
|
||||
public_internet: BucketEntryPublicInternet,
|
||||
local_network: BucketEntryLocalNetwork,
|
||||
peer_stats: PeerStats,
|
||||
latency_stats_accounting: LatencyStatsAccounting,
|
||||
transfer_stats_accounting: TransferStatsAccounting,
|
||||
@ -115,29 +136,39 @@ impl BucketEntryInner {
|
||||
move |e1, e2| Self::cmp_fastest_reliable(cur_ts, e1, e2)
|
||||
}
|
||||
|
||||
pub fn clear_signed_node_info(&mut self, routing_domain: RoutingDomain) {
|
||||
// Get the correct signed_node_info for the chosen routing domain
|
||||
let opt_current_sni = match routing_domain {
|
||||
RoutingDomain::LocalNetwork => &mut self.local_network.signed_node_info,
|
||||
RoutingDomain::PublicInternet => &mut self.public_internet.signed_node_info,
|
||||
};
|
||||
*opt_current_sni = None;
|
||||
}
|
||||
|
||||
// Retuns true if the node info changed
|
||||
pub fn update_signed_node_info(
|
||||
&mut self,
|
||||
routing_domain: RoutingDomain,
|
||||
signed_node_info: SignedNodeInfo,
|
||||
allow_invalid_signature: bool,
|
||||
) {
|
||||
// Don't allow invalid signatures unless we are explicitly allowing it
|
||||
if !allow_invalid_signature && !signed_node_info.signature.valid {
|
||||
log_rtab!(debug "Invalid signature on signed node info: {:?}", signed_node_info);
|
||||
return;
|
||||
}
|
||||
// Get the correct signed_node_info for the chosen routing domain
|
||||
let opt_current_sni = match routing_domain {
|
||||
RoutingDomain::LocalNetwork => &mut self.local_network.signed_node_info,
|
||||
RoutingDomain::PublicInternet => &mut self.public_internet.signed_node_info,
|
||||
};
|
||||
|
||||
// See if we have an existing signed_node_info to update or not
|
||||
if let Some(current_sni) = &self.opt_signed_node_info {
|
||||
if let Some(current_sni) = opt_current_sni {
|
||||
// If the timestamp hasn't changed or is less, ignore this update
|
||||
if signed_node_info.timestamp <= current_sni.timestamp {
|
||||
// If we received a node update with the same timestamp
|
||||
// we can make this node live again, but only if our network hasn't changed
|
||||
// we can make this node live again, but only if our network has recently changed
|
||||
// which may make nodes that were unreachable now reachable with the same dialinfo
|
||||
if !self.updated_since_last_network_change
|
||||
&& signed_node_info.timestamp == current_sni.timestamp
|
||||
{
|
||||
// No need to update the signednodeinfo though since the timestamp is the same
|
||||
// Just return true so we can make the node not dead
|
||||
// Touch the node and let it try to live again
|
||||
self.updated_since_last_network_change = true;
|
||||
self.touch_last_seen(intf::get_timestamp());
|
||||
}
|
||||
@ -152,48 +183,70 @@ impl BucketEntryInner {
|
||||
));
|
||||
|
||||
// Update the signed node info
|
||||
self.opt_signed_node_info = Some(signed_node_info);
|
||||
*opt_current_sni = Some(Box::new(signed_node_info));
|
||||
self.updated_since_last_network_change = true;
|
||||
self.touch_last_seen(intf::get_timestamp());
|
||||
}
|
||||
pub fn update_local_node_info(&mut self, local_node_info: LocalNodeInfo) {
|
||||
self.opt_local_node_info = Some(local_node_info)
|
||||
}
|
||||
|
||||
pub fn has_node_info(&self) -> bool {
|
||||
self.opt_signed_node_info.is_some()
|
||||
}
|
||||
|
||||
pub fn has_valid_signed_node_info(&self) -> bool {
|
||||
if let Some(sni) = &self.opt_signed_node_info {
|
||||
sni.is_valid()
|
||||
} else {
|
||||
false
|
||||
pub fn has_node_info(&self, routing_domain_set: RoutingDomainSet) -> bool {
|
||||
for routing_domain in routing_domain_set {
|
||||
// Get the correct signed_node_info for the chosen routing domain
|
||||
let opt_current_sni = match routing_domain {
|
||||
RoutingDomain::LocalNetwork => &self.local_network.signed_node_info,
|
||||
RoutingDomain::PublicInternet => &self.public_internet.signed_node_info,
|
||||
};
|
||||
if opt_current_sni.is_some() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
pub fn has_local_node_info(&self) -> bool {
|
||||
self.opt_local_node_info.is_some()
|
||||
pub fn node_info(&self, routing_domain: RoutingDomain) -> Option<&NodeInfo> {
|
||||
let opt_current_sni = match routing_domain {
|
||||
RoutingDomain::LocalNetwork => &self.local_network.signed_node_info,
|
||||
RoutingDomain::PublicInternet => &self.public_internet.signed_node_info,
|
||||
};
|
||||
opt_current_sni.as_ref().map(|s| &s.node_info)
|
||||
}
|
||||
|
||||
pub fn node_info(&self) -> Option<NodeInfo> {
|
||||
self.opt_signed_node_info
|
||||
.as_ref()
|
||||
.map(|s| s.node_info.clone())
|
||||
pub fn signed_node_info(&self, routing_domain: RoutingDomain) -> Option<&SignedNodeInfo> {
|
||||
let opt_current_sni = match routing_domain {
|
||||
RoutingDomain::LocalNetwork => &self.local_network.signed_node_info,
|
||||
RoutingDomain::PublicInternet => &self.public_internet.signed_node_info,
|
||||
};
|
||||
opt_current_sni.as_ref().map(|s| s.as_ref())
|
||||
}
|
||||
pub fn local_node_info(&self) -> Option<LocalNodeInfo> {
|
||||
self.opt_local_node_info.clone()
|
||||
}
|
||||
pub fn peer_info(&self, key: DHTKey) -> Option<PeerInfo> {
|
||||
self.opt_signed_node_info.as_ref().map(|s| PeerInfo {
|
||||
|
||||
pub fn make_peer_info(&self, key: DHTKey, routing_domain: RoutingDomain) -> Option<PeerInfo> {
|
||||
let opt_current_sni = match routing_domain {
|
||||
RoutingDomain::LocalNetwork => &self.local_network.signed_node_info,
|
||||
RoutingDomain::PublicInternet => &self.public_internet.signed_node_info,
|
||||
};
|
||||
opt_current_sni.as_ref().map(|s| PeerInfo {
|
||||
node_id: NodeId::new(key),
|
||||
signed_node_info: s.clone(),
|
||||
signed_node_info: *s.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
fn descriptor_to_key(last_connection: ConnectionDescriptor) -> LastConnectionKey {
|
||||
pub fn best_routing_domain(
|
||||
&self,
|
||||
routing_domain_set: RoutingDomainSet,
|
||||
) -> Option<RoutingDomain> {
|
||||
for routing_domain in routing_domain_set {
|
||||
let opt_current_sni = match routing_domain {
|
||||
RoutingDomain::LocalNetwork => &self.local_network.signed_node_info,
|
||||
RoutingDomain::PublicInternet => &self.public_internet.signed_node_info,
|
||||
};
|
||||
if opt_current_sni.is_some() {
|
||||
return Some(routing_domain);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn descriptor_to_key(&self, last_connection: ConnectionDescriptor) -> LastConnectionKey {
|
||||
LastConnectionKey(
|
||||
last_connection.peer_scope(),
|
||||
last_connection.protocol_type(),
|
||||
last_connection.address_type(),
|
||||
)
|
||||
@ -201,7 +254,7 @@ impl BucketEntryInner {
|
||||
|
||||
// Stores a connection descriptor in this entry's table of last connections
|
||||
pub fn set_last_connection(&mut self, last_connection: ConnectionDescriptor, timestamp: u64) {
|
||||
let key = Self::descriptor_to_key(last_connection);
|
||||
let key = self.descriptor_to_key(last_connection);
|
||||
self.last_connections
|
||||
.insert(key, (last_connection, timestamp));
|
||||
}
|
||||
@ -211,19 +264,26 @@ impl BucketEntryInner {
|
||||
self.last_connections.clear();
|
||||
}
|
||||
|
||||
// Gets the best 'last connection' that matches a set of protocol types and address types
|
||||
pub fn last_connection(
|
||||
// Gets the best 'last connection' that matches a set of routing domain, protocol types and address types
|
||||
pub(super) fn last_connection(
|
||||
&self,
|
||||
dial_info_filter: Option<DialInfoFilter>,
|
||||
routing_table_inner: &RoutingTableInner,
|
||||
node_ref_filter: Option<NodeRefFilter>,
|
||||
) -> Option<(ConnectionDescriptor, u64)> {
|
||||
// Iterate peer scopes and protocol types and address type in order to ensure we pick the preferred protocols if all else is the same
|
||||
let dif = dial_info_filter.unwrap_or_default();
|
||||
for ps in dif.peer_scope_set {
|
||||
for pt in dif.protocol_type_set {
|
||||
for at in dif.address_type_set {
|
||||
let key = LastConnectionKey(ps, pt, at);
|
||||
if let Some(v) = self.last_connections.get(&key) {
|
||||
return Some(*v);
|
||||
let nrf = node_ref_filter.unwrap_or_default();
|
||||
for pt in nrf.dial_info_filter.protocol_type_set {
|
||||
for at in nrf.dial_info_filter.address_type_set {
|
||||
let key = LastConnectionKey(pt, at);
|
||||
if let Some(v) = self.last_connections.get(&key) {
|
||||
// Verify this connection could be in the filtered routing domain
|
||||
let address = v.0.remote_address().address();
|
||||
if let Some(rd) =
|
||||
RoutingTable::routing_domain_for_address_inner(routing_table_inner, address)
|
||||
{
|
||||
if nrf.routing_domain_set.contains(rd) {
|
||||
return Some(*v);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -253,15 +313,46 @@ impl BucketEntryInner {
|
||||
}
|
||||
|
||||
pub fn update_node_status(&mut self, status: NodeStatus) {
|
||||
self.peer_stats.status = Some(status);
|
||||
match status {
|
||||
NodeStatus::LocalNetwork(ln) => {
|
||||
self.local_network.node_status = Some(ln);
|
||||
}
|
||||
NodeStatus::PublicInternet(pi) => {
|
||||
self.public_internet.node_status = Some(pi);
|
||||
}
|
||||
}
|
||||
}
|
||||
pub fn node_status(&self, routing_domain: RoutingDomain) -> Option<NodeStatus> {
|
||||
match routing_domain {
|
||||
RoutingDomain::LocalNetwork => self
|
||||
.local_network
|
||||
.node_status
|
||||
.as_ref()
|
||||
.map(|ln| NodeStatus::LocalNetwork(ln.clone())),
|
||||
RoutingDomain::PublicInternet => self
|
||||
.public_internet
|
||||
.node_status
|
||||
.as_ref()
|
||||
.map(|pi| NodeStatus::PublicInternet(pi.clone())),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_seen_our_node_info(&mut self, seen: bool) {
|
||||
self.seen_our_node_info = seen;
|
||||
pub fn set_seen_our_node_info(&mut self, routing_domain: RoutingDomain, seen: bool) {
|
||||
match routing_domain {
|
||||
RoutingDomain::LocalNetwork => {
|
||||
self.local_network.seen_our_node_info = seen;
|
||||
}
|
||||
RoutingDomain::PublicInternet => {
|
||||
self.public_internet.seen_our_node_info = seen;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn has_seen_our_node_info(&self) -> bool {
|
||||
self.seen_our_node_info
|
||||
pub fn has_seen_our_node_info(&self, routing_domain: RoutingDomain) -> bool {
|
||||
match routing_domain {
|
||||
RoutingDomain::LocalNetwork => self.local_network.seen_our_node_info,
|
||||
RoutingDomain::PublicInternet => self.public_internet.seen_our_node_info,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_updated_since_last_network_change(&mut self, updated: bool) {
|
||||
@ -337,20 +428,14 @@ impl BucketEntryInner {
|
||||
}
|
||||
|
||||
// Check if this node needs a ping right now to validate it is still reachable
|
||||
pub(super) fn needs_ping(
|
||||
&self,
|
||||
node_id: &DHTKey,
|
||||
cur_ts: u64,
|
||||
relay_node_id: Option<DHTKey>,
|
||||
) -> bool {
|
||||
pub(super) fn needs_ping(&self, cur_ts: u64, needs_keepalive: bool) -> bool {
|
||||
// See which ping pattern we are to use
|
||||
let state = self.state(cur_ts);
|
||||
|
||||
// If this entry is our relay node, then we should ping it regularly to keep our association alive
|
||||
if let Some(relay_node_id) = relay_node_id {
|
||||
if relay_node_id == *node_id {
|
||||
return self.needs_constant_ping(cur_ts, KEEPALIVE_PING_INTERVAL_SECS as u64);
|
||||
}
|
||||
// If this entry needs a keepalive (like a relay node),
|
||||
// then we should ping it regularly to keep our association alive
|
||||
if needs_keepalive {
|
||||
return self.needs_constant_ping(cur_ts, KEEPALIVE_PING_INTERVAL_SECS as u64);
|
||||
}
|
||||
|
||||
match state {
|
||||
@ -494,17 +579,23 @@ impl BucketEntry {
|
||||
ref_count: AtomicU32::new(0),
|
||||
inner: RwLock::new(BucketEntryInner {
|
||||
min_max_version: None,
|
||||
seen_our_node_info: false,
|
||||
updated_since_last_network_change: false,
|
||||
last_connections: BTreeMap::new(),
|
||||
opt_signed_node_info: None,
|
||||
opt_local_node_info: None,
|
||||
local_network: BucketEntryLocalNetwork {
|
||||
seen_our_node_info: false,
|
||||
signed_node_info: None,
|
||||
node_status: None,
|
||||
},
|
||||
public_internet: BucketEntryPublicInternet {
|
||||
seen_our_node_info: false,
|
||||
signed_node_info: None,
|
||||
node_status: None,
|
||||
},
|
||||
peer_stats: PeerStats {
|
||||
time_added: now,
|
||||
rpc_stats: RPCStats::default(),
|
||||
latency: None,
|
||||
transfer: TransferStatsDownUp::default(),
|
||||
status: None,
|
||||
},
|
||||
latency_stats_accounting: LatencyStatsAccounting::new(),
|
||||
transfer_stats_accounting: TransferStatsAccounting::new(),
|
||||
@ -516,7 +607,7 @@ impl BucketEntry {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with<F, R>(&self, f: F) -> R
|
||||
pub(super) fn with<F, R>(&self, f: F) -> R
|
||||
where
|
||||
F: FnOnce(&BucketEntryInner) -> R,
|
||||
{
|
||||
@ -524,7 +615,7 @@ impl BucketEntry {
|
||||
f(&*inner)
|
||||
}
|
||||
|
||||
pub fn with_mut<F, R>(&self, f: F) -> R
|
||||
pub(super) fn with_mut<F, R>(&self, f: F) -> R
|
||||
where
|
||||
F: FnOnce(&mut BucketEntryInner) -> R,
|
||||
{
|
||||
@ -547,7 +638,7 @@ impl Drop for BucketEntry {
|
||||
|
||||
panic!(
|
||||
"bucket entry dropped with non-zero refcount: {:#?}",
|
||||
self.inner.read().node_info()
|
||||
&*self.inner.read()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
@ -6,7 +6,7 @@ impl RoutingTable {
|
||||
let inner = self.inner.read();
|
||||
out += "Routing Table Info:\n";
|
||||
|
||||
out += &format!(" Node Id: {}\n", inner.node_id.encode());
|
||||
out += &format!(" Node Id: {}\n", self.unlocked_inner.node_id.encode());
|
||||
out += &format!(
|
||||
" Self Latency Stats Accounting: {:#?}\n\n",
|
||||
inner.self_latency_stats_accounting
|
||||
@ -85,8 +85,17 @@ impl RoutingTable {
|
||||
out += &format!(" {:>2}: {:?}\n", n, gdi);
|
||||
}
|
||||
|
||||
out += "Own PeerInfo:\n";
|
||||
out += &format!(" {:#?}\n", self.get_own_peer_info());
|
||||
out += "LocalNetwork PeerInfo:\n";
|
||||
out += &format!(
|
||||
" {:#?}\n",
|
||||
self.get_own_peer_info(RoutingDomain::LocalNetwork)
|
||||
);
|
||||
|
||||
out += "PublicInternet PeerInfo:\n";
|
||||
out += &format!(
|
||||
" {:#?}\n",
|
||||
self.get_own_peer_info(RoutingDomain::PublicInternet)
|
||||
);
|
||||
|
||||
out
|
||||
}
|
||||
@ -142,7 +151,7 @@ impl RoutingTable {
|
||||
let mut out = String::new();
|
||||
out += &format!("Entry {:?}:\n", node_id);
|
||||
if let Some(nr) = self.lookup_node_ref(node_id) {
|
||||
out += &nr.operate(|e| format!("{:#?}\n", e));
|
||||
out += &nr.operate(|_rt, e| format!("{:#?}\n", e));
|
||||
} else {
|
||||
out += "Entry not found\n";
|
||||
}
|
||||
|
@ -15,38 +15,47 @@ pub struct MappedPortInfo {
|
||||
impl RoutingTable {
|
||||
// Makes a filter that finds nodes with a matching inbound dialinfo
|
||||
pub fn make_inbound_dial_info_entry_filter(
|
||||
routing_domain: RoutingDomain,
|
||||
dial_info_filter: DialInfoFilter,
|
||||
) -> impl FnMut(&BucketEntryInner) -> bool {
|
||||
// does it have matching public dial info?
|
||||
move |e| {
|
||||
e.node_info()
|
||||
.map(|n| {
|
||||
n.first_filtered_dial_info_detail(|did| did.matches_filter(&dial_info_filter))
|
||||
.is_some()
|
||||
})
|
||||
.unwrap_or(false)
|
||||
if let Some(ni) = e.node_info(routing_domain) {
|
||||
if ni
|
||||
.first_filtered_dial_info_detail(|did| did.matches_filter(&dial_info_filter))
|
||||
.is_some()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
// Makes a filter that finds nodes capable of dialing a particular outbound dialinfo
|
||||
pub fn make_outbound_dial_info_entry_filter(
|
||||
routing_domain: RoutingDomain,
|
||||
dial_info: DialInfo,
|
||||
) -> impl FnMut(&BucketEntryInner) -> bool {
|
||||
// does the node's outbound capabilities match the dialinfo?
|
||||
move |e| {
|
||||
e.node_info()
|
||||
.map(|n| {
|
||||
let mut dif = DialInfoFilter::all();
|
||||
dif = dif.with_protocol_type_set(n.outbound_protocols);
|
||||
dif = dif.with_address_type_set(n.address_types);
|
||||
dial_info.matches_filter(&dif)
|
||||
})
|
||||
.unwrap_or(false)
|
||||
if let Some(ni) = e.node_info(routing_domain) {
|
||||
let dif = DialInfoFilter::all()
|
||||
.with_protocol_type_set(ni.outbound_protocols)
|
||||
.with_address_type_set(ni.address_types);
|
||||
if dial_info.matches_filter(&dif) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
// Make a filter that wraps another filter
|
||||
pub fn combine_filters<F, G>(mut f1: F, mut f2: G) -> impl FnMut(&BucketEntryInner) -> bool
|
||||
pub fn combine_entry_filters<F, G>(
|
||||
mut f1: F,
|
||||
mut f2: G,
|
||||
) -> impl FnMut(&BucketEntryInner) -> bool
|
||||
where
|
||||
F: FnMut(&BucketEntryInner) -> bool,
|
||||
G: FnMut(&BucketEntryInner) -> bool,
|
||||
@ -75,18 +84,21 @@ impl RoutingTable {
|
||||
// count
|
||||
node_count,
|
||||
// filter
|
||||
Some(move |_k: DHTKey, v: Option<Arc<BucketEntry>>| {
|
||||
|_k: DHTKey, v: Option<Arc<BucketEntry>>| {
|
||||
let entry = v.unwrap();
|
||||
entry.with(|e| {
|
||||
// skip nodes on our local network here
|
||||
if e.local_node_info().is_some() {
|
||||
// skip nodes on local network
|
||||
if e.node_info(RoutingDomain::LocalNetwork).is_some() {
|
||||
return false;
|
||||
}
|
||||
// skip nodes not on public internet
|
||||
if e.node_info(RoutingDomain::PublicInternet).is_none() {
|
||||
return false;
|
||||
}
|
||||
|
||||
// skip nodes that dont match entry filter
|
||||
entry_filter(e)
|
||||
})
|
||||
}),
|
||||
},
|
||||
// transform
|
||||
|k: DHTKey, v: Option<Arc<BucketEntry>>| {
|
||||
NodeRef::new(self.clone(), k, v.unwrap().clone(), None)
|
||||
@ -109,37 +121,34 @@ impl RoutingTable {
|
||||
// count
|
||||
protocol_types.len() * 2 * max_per_type,
|
||||
// filter
|
||||
Some(move |_k: DHTKey, v: Option<Arc<BucketEntry>>| {
|
||||
move |_k: DHTKey, v: Option<Arc<BucketEntry>>| {
|
||||
let entry = v.unwrap();
|
||||
entry.with(|e| {
|
||||
// skip nodes on our local network here
|
||||
if e.local_node_info().is_some() {
|
||||
if e.has_node_info(RoutingDomain::LocalNetwork.into()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// does it have some dial info we need?
|
||||
let filter = |n: NodeInfo| {
|
||||
let filter = |n: &NodeInfo| {
|
||||
let mut keep = false;
|
||||
for did in n.dial_info_detail_list {
|
||||
if did.dial_info.is_global() {
|
||||
if matches!(did.dial_info.address_type(), AddressType::IPV4) {
|
||||
for (n, protocol_type) in protocol_types.iter().enumerate() {
|
||||
if nodes_proto_v4[n] < max_per_type
|
||||
&& did.dial_info.protocol_type() == *protocol_type
|
||||
{
|
||||
nodes_proto_v4[n] += 1;
|
||||
keep = true;
|
||||
}
|
||||
for did in &n.dial_info_detail_list {
|
||||
if matches!(did.dial_info.address_type(), AddressType::IPV4) {
|
||||
for (n, protocol_type) in protocol_types.iter().enumerate() {
|
||||
if nodes_proto_v4[n] < max_per_type
|
||||
&& did.dial_info.protocol_type() == *protocol_type
|
||||
{
|
||||
nodes_proto_v4[n] += 1;
|
||||
keep = true;
|
||||
}
|
||||
} else if matches!(did.dial_info.address_type(), AddressType::IPV6)
|
||||
{
|
||||
for (n, protocol_type) in protocol_types.iter().enumerate() {
|
||||
if nodes_proto_v6[n] < max_per_type
|
||||
&& did.dial_info.protocol_type() == *protocol_type
|
||||
{
|
||||
nodes_proto_v6[n] += 1;
|
||||
keep = true;
|
||||
}
|
||||
}
|
||||
} else if matches!(did.dial_info.address_type(), AddressType::IPV6) {
|
||||
for (n, protocol_type) in protocol_types.iter().enumerate() {
|
||||
if nodes_proto_v6[n] < max_per_type
|
||||
&& did.dial_info.protocol_type() == *protocol_type
|
||||
{
|
||||
nodes_proto_v6[n] += 1;
|
||||
keep = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -147,9 +156,11 @@ impl RoutingTable {
|
||||
keep
|
||||
};
|
||||
|
||||
e.node_info().map(filter).unwrap_or(false)
|
||||
e.node_info(RoutingDomain::PublicInternet)
|
||||
.map(filter)
|
||||
.unwrap_or(false)
|
||||
})
|
||||
}),
|
||||
},
|
||||
// transform
|
||||
|k: DHTKey, v: Option<Arc<BucketEntry>>| {
|
||||
NodeRef::new(self.clone(), k, v.unwrap().clone(), None)
|
||||
@ -157,50 +168,30 @@ impl RoutingTable {
|
||||
)
|
||||
}
|
||||
|
||||
// Get our own node's peer info (public node info) so we can share it with other nodes
|
||||
pub fn get_own_peer_info(&self) -> PeerInfo {
|
||||
PeerInfo::new(NodeId::new(self.node_id()), self.get_own_signed_node_info())
|
||||
}
|
||||
|
||||
pub fn get_own_signed_node_info(&self) -> SignedNodeInfo {
|
||||
let node_id = NodeId::new(self.node_id());
|
||||
let secret = self.node_id_secret();
|
||||
SignedNodeInfo::with_secret(self.get_own_node_info(), node_id, &secret).unwrap()
|
||||
}
|
||||
|
||||
pub fn get_own_node_info(&self) -> NodeInfo {
|
||||
let netman = self.network_manager();
|
||||
let relay_node = netman.relay_node();
|
||||
let pc = netman.get_protocol_config();
|
||||
NodeInfo {
|
||||
network_class: netman.get_network_class().unwrap_or(NetworkClass::Invalid),
|
||||
outbound_protocols: pc.outbound,
|
||||
address_types: pc.family_global,
|
||||
min_version: MIN_VERSION,
|
||||
max_version: MAX_VERSION,
|
||||
dial_info_detail_list: self.dial_info_details(RoutingDomain::PublicInternet),
|
||||
relay_peer_info: relay_node.and_then(|rn| rn.peer_info().map(Box::new)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn filter_has_valid_signed_node_info(
|
||||
&self,
|
||||
routing_domain: RoutingDomain,
|
||||
v: Option<Arc<BucketEntry>>,
|
||||
own_peer_info_is_valid: bool,
|
||||
) -> bool {
|
||||
match v {
|
||||
None => own_peer_info_is_valid,
|
||||
Some(entry) => entry.with(|e| e.has_valid_signed_node_info()),
|
||||
None => self.has_valid_own_node_info(routing_domain),
|
||||
Some(entry) => entry.with(|e| {
|
||||
e.signed_node_info(routing_domain.into())
|
||||
.map(|sni| sni.has_valid_signature())
|
||||
.unwrap_or(false)
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn transform_to_peer_info(
|
||||
&self,
|
||||
routing_domain: RoutingDomain,
|
||||
k: DHTKey,
|
||||
v: Option<Arc<BucketEntry>>,
|
||||
own_peer_info: &PeerInfo,
|
||||
) -> PeerInfo {
|
||||
match v {
|
||||
None => own_peer_info.clone(),
|
||||
Some(entry) => entry.with(|e| e.peer_info(k).unwrap()),
|
||||
None => self.get_own_peer_info(routing_domain),
|
||||
Some(entry) => entry.with(|e| e.make_peer_info(k, routing_domain).unwrap()),
|
||||
}
|
||||
}
|
||||
|
||||
@ -221,7 +212,7 @@ impl RoutingTable {
|
||||
T: FnMut(DHTKey, Option<Arc<BucketEntry>>) -> O,
|
||||
{
|
||||
let inner = self.inner.read();
|
||||
let self_node_id = inner.node_id;
|
||||
let self_node_id = self.unlocked_inner.node_id;
|
||||
|
||||
// collect all the nodes for sorting
|
||||
let mut nodes =
|
||||
@ -258,7 +249,7 @@ impl RoutingTable {
|
||||
pub fn find_fastest_nodes<T, F, O>(
|
||||
&self,
|
||||
node_count: usize,
|
||||
mut filter: Option<F>,
|
||||
mut filter: F,
|
||||
transform: T,
|
||||
) -> Vec<O>
|
||||
where
|
||||
@ -276,7 +267,7 @@ impl RoutingTable {
|
||||
if entry.with(|e| e.state(cur_ts) == BucketEntryState::Dead) {
|
||||
false
|
||||
} else {
|
||||
filter.as_mut().map(|f| f(k, v)).unwrap_or(true)
|
||||
filter(k, v)
|
||||
}
|
||||
} else {
|
||||
// always filter out self peer, as it is irrelevant to the 'fastest nodes' search
|
||||
@ -340,23 +331,23 @@ impl RoutingTable {
|
||||
pub fn find_closest_nodes<F, T, O>(
|
||||
&self,
|
||||
node_id: DHTKey,
|
||||
mut filter: Option<F>,
|
||||
filter: F,
|
||||
mut transform: T,
|
||||
) -> Vec<O>
|
||||
where
|
||||
T: FnMut(DHTKey, Option<Arc<BucketEntry>>) -> O,
|
||||
F: FnMut(DHTKey, Option<Arc<BucketEntry>>) -> bool,
|
||||
T: FnMut(DHTKey, Option<Arc<BucketEntry>>) -> O,
|
||||
{
|
||||
let cur_ts = intf::get_timestamp();
|
||||
let node_count = {
|
||||
let c = self.config.get();
|
||||
let c = self.unlocked_inner.config.get();
|
||||
c.network.dht.max_find_node_count as usize
|
||||
};
|
||||
let out = self.find_peers_with_sort_and_filter(
|
||||
node_count,
|
||||
cur_ts,
|
||||
// filter
|
||||
|k, v| filter.as_mut().map(|f| f(k, v)).unwrap_or(true),
|
||||
filter,
|
||||
// sort
|
||||
|(a_key, a_entry), (b_key, b_entry)| {
|
||||
// same nodes are always the same
|
||||
@ -402,7 +393,7 @@ impl RoutingTable {
|
||||
let mut protocol_to_port =
|
||||
BTreeMap::<(ProtocolType, AddressType), (LowLevelProtocolType, u16)>::new();
|
||||
let our_dids = self.all_filtered_dial_info_details(
|
||||
Some(RoutingDomain::PublicInternet),
|
||||
RoutingDomain::PublicInternet.into(),
|
||||
&DialInfoFilter::all(),
|
||||
);
|
||||
for did in our_dids {
|
||||
@ -425,7 +416,7 @@ impl RoutingTable {
|
||||
}
|
||||
}
|
||||
|
||||
fn make_relay_node_filter(&self) -> impl Fn(&BucketEntryInner) -> bool {
|
||||
fn make_public_internet_relay_node_filter(&self) -> impl Fn(&BucketEntryInner) -> bool {
|
||||
// Get all our outbound protocol/address types
|
||||
let outbound_dif = self
|
||||
.network_manager()
|
||||
@ -433,12 +424,8 @@ impl RoutingTable {
|
||||
let mapped_port_info = self.get_mapped_port_info();
|
||||
|
||||
move |e: &BucketEntryInner| {
|
||||
// Ensure this node is not on our local network
|
||||
let has_local_dial_info = e
|
||||
.local_node_info()
|
||||
.map(|l| l.has_dial_info())
|
||||
.unwrap_or(false);
|
||||
if has_local_dial_info {
|
||||
// Ensure this node is not on the local network
|
||||
if e.has_node_info(RoutingDomain::LocalNetwork.into()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -447,7 +434,7 @@ impl RoutingTable {
|
||||
let mut low_level_protocol_ports = mapped_port_info.low_level_protocol_ports.clone();
|
||||
|
||||
let can_serve_as_relay = e
|
||||
.node_info()
|
||||
.node_info(RoutingDomain::PublicInternet)
|
||||
.map(|n| {
|
||||
let dids =
|
||||
n.all_filtered_dial_info_details(|did| did.matches_filter(&outbound_dif));
|
||||
@ -471,9 +458,18 @@ impl RoutingTable {
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self), ret)]
|
||||
pub fn find_inbound_relay(&self, cur_ts: u64) -> Option<NodeRef> {
|
||||
pub fn find_inbound_relay(
|
||||
&self,
|
||||
routing_domain: RoutingDomain,
|
||||
cur_ts: u64,
|
||||
) -> Option<NodeRef> {
|
||||
// Get relay filter function
|
||||
let relay_node_filter = self.make_relay_node_filter();
|
||||
let relay_node_filter = match routing_domain {
|
||||
RoutingDomain::PublicInternet => self.make_public_internet_relay_node_filter(),
|
||||
RoutingDomain::LocalNetwork => {
|
||||
unimplemented!();
|
||||
}
|
||||
};
|
||||
|
||||
// Go through all entries and find fastest entry that matches filter function
|
||||
let inner = self.inner.read();
|
||||
@ -485,9 +481,9 @@ impl RoutingTable {
|
||||
let v2 = v.clone();
|
||||
v.with(|e| {
|
||||
// Ensure we have the node's status
|
||||
if let Some(node_status) = e.peer_stats().status.clone() {
|
||||
if let Some(node_status) = e.node_status(routing_domain) {
|
||||
// Ensure the node will relay
|
||||
if node_status.will_relay {
|
||||
if node_status.will_relay() {
|
||||
// Compare against previous candidate
|
||||
if let Some(best_inbound_relay) = best_inbound_relay.as_mut() {
|
||||
// Less is faster
|
||||
@ -534,6 +530,7 @@ impl RoutingTable {
|
||||
|
||||
// register the node if it's new
|
||||
if let Some(nr) = self.register_node_with_signed_node_info(
|
||||
RoutingDomain::PublicInternet,
|
||||
p.node_id.key,
|
||||
p.signed_node_info.clone(),
|
||||
false,
|
||||
@ -555,12 +552,7 @@ impl RoutingTable {
|
||||
let res = network_result_try!(
|
||||
rpc_processor
|
||||
.clone()
|
||||
.rpc_call_find_node(
|
||||
Destination::Direct(node_ref.clone()),
|
||||
node_id,
|
||||
None,
|
||||
rpc_processor.make_respond_to_sender(node_ref.clone()),
|
||||
)
|
||||
.rpc_call_find_node(Destination::direct(node_ref), node_id)
|
||||
.await?
|
||||
);
|
||||
|
||||
|
@ -3,6 +3,8 @@ mod bucket_entry;
|
||||
mod debug;
|
||||
mod find_nodes;
|
||||
mod node_ref;
|
||||
mod routing_domain_editor;
|
||||
mod routing_domains;
|
||||
mod stats_accounting;
|
||||
mod tasks;
|
||||
|
||||
@ -15,78 +17,98 @@ use bucket::*;
|
||||
pub use bucket_entry::*;
|
||||
pub use debug::*;
|
||||
pub use find_nodes::*;
|
||||
use hashlink::LruCache;
|
||||
pub use node_ref::*;
|
||||
pub use routing_domain_editor::*;
|
||||
pub use routing_domains::*;
|
||||
pub use stats_accounting::*;
|
||||
|
||||
const RECENT_PEERS_TABLE_SIZE: usize = 64;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, PartialOrd, Ord, Eq)]
|
||||
pub enum RoutingDomain {
|
||||
PublicInternet,
|
||||
LocalNetwork,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct RoutingDomainDetail {
|
||||
dial_info_details: Vec<DialInfoDetail>,
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct RecentPeersEntry {
|
||||
pub last_connection: ConnectionDescriptor,
|
||||
}
|
||||
|
||||
/// RoutingTable rwlock-internal data
|
||||
struct RoutingTableInner {
|
||||
network_manager: NetworkManager,
|
||||
node_id: DHTKey, // The current node's public DHT key
|
||||
node_id_secret: DHTKeySecret, // The current node's DHT key secret
|
||||
|
||||
buckets: Vec<Bucket>, // Routing table buckets that hold entries
|
||||
kick_queue: BTreeSet<usize>, // Buckets to kick on our next kick task
|
||||
bucket_entry_count: usize, // A fast counter for the number of entries in the table, total
|
||||
|
||||
public_internet_routing_domain: RoutingDomainDetail, // The dial info we use on the public internet
|
||||
local_network_routing_domain: RoutingDomainDetail, // The dial info we use on the local network
|
||||
|
||||
self_latency_stats_accounting: LatencyStatsAccounting, // Interim accounting mechanism for this node's RPC latency to any other node
|
||||
self_transfer_stats_accounting: TransferStatsAccounting, // Interim accounting mechanism for the total bandwidth to/from this node
|
||||
self_transfer_stats: TransferStatsDownUp, // Statistics about the total bandwidth to/from this node
|
||||
/// Routing table buckets that hold entries
|
||||
buckets: Vec<Bucket>,
|
||||
/// A fast counter for the number of entries in the table, total
|
||||
bucket_entry_count: usize,
|
||||
/// The public internet routing domain
|
||||
public_internet_routing_domain: PublicInternetRoutingDomainDetail,
|
||||
/// The dial info we use on the local network
|
||||
local_network_routing_domain: LocalInternetRoutingDomainDetail,
|
||||
/// Interim accounting mechanism for this node's RPC latency to any other node
|
||||
self_latency_stats_accounting: LatencyStatsAccounting,
|
||||
/// Interim accounting mechanism for the total bandwidth to/from this node
|
||||
self_transfer_stats_accounting: TransferStatsAccounting,
|
||||
/// Statistics about the total bandwidth to/from this node
|
||||
self_transfer_stats: TransferStatsDownUp,
|
||||
/// Peers we have recently communicated with
|
||||
recent_peers: LruCache<DHTKey, RecentPeersEntry>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct RoutingTableHealth {
|
||||
/// Number of reliable (responsive) entries in the routing table
|
||||
pub reliable_entry_count: usize,
|
||||
/// Number of unreliable (occasionally unresponsive) entries in the routing table
|
||||
pub unreliable_entry_count: usize,
|
||||
/// Number of dead (always unresponsive) entries in the routing table
|
||||
pub dead_entry_count: usize,
|
||||
}
|
||||
|
||||
struct RoutingTableUnlockedInner {
|
||||
// Background processes
|
||||
// Accessors
|
||||
config: VeilidConfig,
|
||||
network_manager: NetworkManager,
|
||||
|
||||
/// The current node's public DHT key
|
||||
node_id: DHTKey,
|
||||
/// The current node's DHT key secret
|
||||
node_id_secret: DHTKeySecret,
|
||||
/// Buckets to kick on our next kick task
|
||||
kick_queue: Mutex<BTreeSet<usize>>,
|
||||
/// Background process for computing statistics
|
||||
rolling_transfers_task: TickTask<EyreReport>,
|
||||
/// Backgroup process to purge dead routing table entries when necessary
|
||||
kick_buckets_task: TickTask<EyreReport>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct RoutingTable {
|
||||
config: VeilidConfig,
|
||||
inner: Arc<RwLock<RoutingTableInner>>,
|
||||
unlocked_inner: Arc<RoutingTableUnlockedInner>,
|
||||
}
|
||||
|
||||
impl RoutingTable {
|
||||
fn new_inner(network_manager: NetworkManager) -> RoutingTableInner {
|
||||
fn new_inner() -> RoutingTableInner {
|
||||
RoutingTableInner {
|
||||
network_manager,
|
||||
node_id: DHTKey::default(),
|
||||
node_id_secret: DHTKeySecret::default(),
|
||||
buckets: Vec::new(),
|
||||
kick_queue: BTreeSet::default(),
|
||||
public_internet_routing_domain: RoutingDomainDetail::default(),
|
||||
local_network_routing_domain: RoutingDomainDetail::default(),
|
||||
public_internet_routing_domain: PublicInternetRoutingDomainDetail::default(),
|
||||
local_network_routing_domain: LocalInternetRoutingDomainDetail::default(),
|
||||
bucket_entry_count: 0,
|
||||
self_latency_stats_accounting: LatencyStatsAccounting::new(),
|
||||
self_transfer_stats_accounting: TransferStatsAccounting::new(),
|
||||
self_transfer_stats: TransferStatsDownUp::default(),
|
||||
recent_peers: LruCache::new(RECENT_PEERS_TABLE_SIZE),
|
||||
}
|
||||
}
|
||||
fn new_unlocked_inner(_config: VeilidConfig) -> RoutingTableUnlockedInner {
|
||||
//let c = config.get();
|
||||
fn new_unlocked_inner(
|
||||
config: VeilidConfig,
|
||||
network_manager: NetworkManager,
|
||||
) -> RoutingTableUnlockedInner {
|
||||
let c = config.get();
|
||||
RoutingTableUnlockedInner {
|
||||
config: config.clone(),
|
||||
network_manager,
|
||||
node_id: c.network.node_id,
|
||||
node_id_secret: c.network.node_id_secret,
|
||||
kick_queue: Mutex::new(BTreeSet::default()),
|
||||
rolling_transfers_task: TickTask::new(ROLLING_TRANSFERS_INTERVAL_SECS),
|
||||
kick_buckets_task: TickTask::new(1),
|
||||
}
|
||||
@ -94,9 +116,8 @@ impl RoutingTable {
|
||||
pub fn new(network_manager: NetworkManager) -> Self {
|
||||
let config = network_manager.config();
|
||||
let this = Self {
|
||||
config: config.clone(),
|
||||
inner: Arc::new(RwLock::new(Self::new_inner(network_manager))),
|
||||
unlocked_inner: Arc::new(Self::new_unlocked_inner(config)),
|
||||
inner: Arc::new(RwLock::new(Self::new_inner())),
|
||||
unlocked_inner: Arc::new(Self::new_unlocked_inner(config, network_manager)),
|
||||
};
|
||||
// Set rolling transfers tick task
|
||||
{
|
||||
@ -121,23 +142,42 @@ impl RoutingTable {
|
||||
}
|
||||
|
||||
pub fn network_manager(&self) -> NetworkManager {
|
||||
self.inner.read().network_manager.clone()
|
||||
self.unlocked_inner.network_manager.clone()
|
||||
}
|
||||
pub fn rpc_processor(&self) -> RPCProcessor {
|
||||
self.network_manager().rpc_processor()
|
||||
}
|
||||
|
||||
pub fn node_id(&self) -> DHTKey {
|
||||
self.inner.read().node_id
|
||||
self.unlocked_inner.node_id
|
||||
}
|
||||
|
||||
pub fn node_id_secret(&self) -> DHTKeySecret {
|
||||
self.inner.read().node_id_secret
|
||||
self.unlocked_inner.node_id_secret
|
||||
}
|
||||
|
||||
fn routing_domain_for_address_inner(
|
||||
inner: &RoutingTableInner,
|
||||
address: Address,
|
||||
) -> Option<RoutingDomain> {
|
||||
for rd in RoutingDomain::all() {
|
||||
let can_contain =
|
||||
Self::with_routing_domain(inner, rd, |rdd| rdd.can_contain_address(address));
|
||||
if can_contain {
|
||||
return Some(rd);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
pub fn routing_domain_for_address(&self, address: Address) -> Option<RoutingDomain> {
|
||||
let inner = self.inner.read();
|
||||
Self::routing_domain_for_address_inner(&*inner, address)
|
||||
}
|
||||
|
||||
fn with_routing_domain<F, R>(inner: &RoutingTableInner, domain: RoutingDomain, f: F) -> R
|
||||
where
|
||||
F: FnOnce(&RoutingDomainDetail) -> R,
|
||||
F: FnOnce(&dyn RoutingDomainDetail) -> R,
|
||||
{
|
||||
match domain {
|
||||
RoutingDomain::PublicInternet => f(&inner.public_internet_routing_domain),
|
||||
@ -151,7 +191,7 @@ impl RoutingTable {
|
||||
f: F,
|
||||
) -> R
|
||||
where
|
||||
F: FnOnce(&mut RoutingDomainDetail) -> R,
|
||||
F: FnOnce(&mut dyn RoutingDomainDetail) -> R,
|
||||
{
|
||||
match domain {
|
||||
RoutingDomain::PublicInternet => f(&mut inner.public_internet_routing_domain),
|
||||
@ -159,71 +199,53 @@ impl RoutingTable {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn relay_node(&self, domain: RoutingDomain) -> Option<NodeRef> {
|
||||
let inner = self.inner.read();
|
||||
Self::with_routing_domain(&*inner, domain, |rd| rd.relay_node())
|
||||
}
|
||||
|
||||
pub fn has_dial_info(&self, domain: RoutingDomain) -> bool {
|
||||
let inner = self.inner.read();
|
||||
Self::with_routing_domain(&*inner, domain, |rd| !rd.dial_info_details.is_empty())
|
||||
Self::with_routing_domain(&*inner, domain, |rd| !rd.dial_info_details().is_empty())
|
||||
}
|
||||
|
||||
pub fn dial_info_details(&self, domain: RoutingDomain) -> Vec<DialInfoDetail> {
|
||||
let inner = self.inner.read();
|
||||
Self::with_routing_domain(&*inner, domain, |rd| rd.dial_info_details.clone())
|
||||
Self::with_routing_domain(&*inner, domain, |rd| rd.dial_info_details().clone())
|
||||
}
|
||||
|
||||
pub fn first_filtered_dial_info_detail(
|
||||
&self,
|
||||
domain: Option<RoutingDomain>,
|
||||
routing_domain_set: RoutingDomainSet,
|
||||
filter: &DialInfoFilter,
|
||||
) -> Option<DialInfoDetail> {
|
||||
let inner = self.inner.read();
|
||||
// Prefer local network first if it isn't filtered out
|
||||
if domain == None || domain == Some(RoutingDomain::LocalNetwork) {
|
||||
Self::with_routing_domain(&*inner, RoutingDomain::LocalNetwork, |rd| {
|
||||
for did in &rd.dial_info_details {
|
||||
for routing_domain in routing_domain_set {
|
||||
let did = Self::with_routing_domain(&*inner, routing_domain, |rd| {
|
||||
for did in rd.dial_info_details() {
|
||||
if did.matches_filter(filter) {
|
||||
return Some(did.clone());
|
||||
}
|
||||
}
|
||||
None
|
||||
})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
.or_else(|| {
|
||||
if domain == None || domain == Some(RoutingDomain::PublicInternet) {
|
||||
Self::with_routing_domain(&*inner, RoutingDomain::PublicInternet, |rd| {
|
||||
for did in &rd.dial_info_details {
|
||||
if did.matches_filter(filter) {
|
||||
return Some(did.clone());
|
||||
}
|
||||
}
|
||||
None
|
||||
})
|
||||
} else {
|
||||
None
|
||||
});
|
||||
if did.is_some() {
|
||||
return did;
|
||||
}
|
||||
})
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
pub fn all_filtered_dial_info_details(
|
||||
&self,
|
||||
domain: Option<RoutingDomain>,
|
||||
routing_domain_set: RoutingDomainSet,
|
||||
filter: &DialInfoFilter,
|
||||
) -> Vec<DialInfoDetail> {
|
||||
let inner = self.inner.read();
|
||||
let mut ret = Vec::new();
|
||||
|
||||
if domain == None || domain == Some(RoutingDomain::LocalNetwork) {
|
||||
Self::with_routing_domain(&*inner, RoutingDomain::LocalNetwork, |rd| {
|
||||
for did in &rd.dial_info_details {
|
||||
if did.matches_filter(filter) {
|
||||
ret.push(did.clone());
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
if domain == None || domain == Some(RoutingDomain::PublicInternet) {
|
||||
Self::with_routing_domain(&*inner, RoutingDomain::PublicInternet, |rd| {
|
||||
for did in &rd.dial_info_details {
|
||||
for routing_domain in routing_domain_set {
|
||||
Self::with_routing_domain(&*inner, routing_domain, |rd| {
|
||||
for did in rd.dial_info_details() {
|
||||
if did.matches_filter(filter) {
|
||||
ret.push(did.clone());
|
||||
}
|
||||
@ -235,17 +257,13 @@ impl RoutingTable {
|
||||
}
|
||||
|
||||
pub fn ensure_dial_info_is_valid(&self, domain: RoutingDomain, dial_info: &DialInfo) -> bool {
|
||||
let enable_local_peer_scope = {
|
||||
let config = self.network_manager().config();
|
||||
let c = config.get();
|
||||
c.network.enable_local_peer_scope
|
||||
};
|
||||
let address = dial_info.socket_address().address();
|
||||
let inner = self.inner.read();
|
||||
let can_contain_address =
|
||||
Self::with_routing_domain(&*inner, domain, |rd| rd.can_contain_address(address));
|
||||
|
||||
if !enable_local_peer_scope
|
||||
&& matches!(domain, RoutingDomain::PublicInternet)
|
||||
&& dial_info.is_local()
|
||||
{
|
||||
log_rtab!(debug "shouldn't be registering local addresses as public");
|
||||
if !can_contain_address {
|
||||
log_rtab!(debug "can not add dial info to this routing domain");
|
||||
return false;
|
||||
}
|
||||
if !dial_info.is_valid() {
|
||||
@ -258,59 +276,47 @@ impl RoutingTable {
|
||||
true
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self), err)]
|
||||
pub fn register_dial_info(
|
||||
pub fn node_info_is_valid_in_routing_domain(
|
||||
&self,
|
||||
domain: RoutingDomain,
|
||||
dial_info: DialInfo,
|
||||
class: DialInfoClass,
|
||||
) -> EyreResult<()> {
|
||||
if !self.ensure_dial_info_is_valid(domain, &dial_info) {
|
||||
return Err(eyre!("dial info is not valid"));
|
||||
routing_domain: RoutingDomain,
|
||||
node_info: &NodeInfo,
|
||||
) -> bool {
|
||||
// Should not be passing around nodeinfo with an invalid network class
|
||||
if matches!(node_info.network_class, NetworkClass::Invalid) {
|
||||
return false;
|
||||
}
|
||||
|
||||
let mut inner = self.inner.write();
|
||||
Self::with_routing_domain_mut(&mut *inner, domain, |rd| {
|
||||
rd.dial_info_details.push(DialInfoDetail {
|
||||
dial_info: dial_info.clone(),
|
||||
class,
|
||||
});
|
||||
rd.dial_info_details.sort();
|
||||
});
|
||||
|
||||
let domain_str = match domain {
|
||||
RoutingDomain::PublicInternet => "Public",
|
||||
RoutingDomain::LocalNetwork => "Local",
|
||||
};
|
||||
info!(
|
||||
"{} Dial Info: {}",
|
||||
domain_str,
|
||||
NodeDialInfo {
|
||||
node_id: NodeId::new(inner.node_id),
|
||||
dial_info
|
||||
// Ensure all of the dial info works in this routing domain
|
||||
for did in &node_info.dial_info_detail_list {
|
||||
if !self.ensure_dial_info_is_valid(routing_domain, &did.dial_info) {
|
||||
return false;
|
||||
}
|
||||
.to_string(),
|
||||
);
|
||||
debug!(" Class: {:?}", class);
|
||||
|
||||
// Public dial info changed, go through all nodes and reset their 'seen our node info' bit
|
||||
if matches!(domain, RoutingDomain::PublicInternet) {
|
||||
Self::reset_all_seen_our_node_info(&*inner);
|
||||
Self::reset_all_updated_since_last_network_change(&*inner);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
// Ensure the relay is also valid in this routing domain if it is provided
|
||||
if let Some(relay_peer_info) = node_info.relay_peer_info.as_ref() {
|
||||
let relay_ni = &relay_peer_info.signed_node_info.node_info;
|
||||
if !self.node_info_is_valid_in_routing_domain(routing_domain, relay_ni) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
fn reset_all_seen_our_node_info(inner: &RoutingTableInner) {
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub fn edit_routing_domain(&self, domain: RoutingDomain) -> RoutingDomainEditor {
|
||||
RoutingDomainEditor::new(self.clone(), domain)
|
||||
}
|
||||
|
||||
fn reset_all_seen_our_node_info(inner: &mut RoutingTableInner, routing_domain: RoutingDomain) {
|
||||
let cur_ts = intf::get_timestamp();
|
||||
Self::with_entries(&*inner, cur_ts, BucketEntryState::Dead, |_, v| {
|
||||
v.with_mut(|e| e.set_seen_our_node_info(false));
|
||||
v.with_mut(|e| {
|
||||
e.set_seen_our_node_info(routing_domain, false);
|
||||
});
|
||||
Option::<()>::None
|
||||
});
|
||||
}
|
||||
|
||||
fn reset_all_updated_since_last_network_change(inner: &RoutingTableInner) {
|
||||
fn reset_all_updated_since_last_network_change(inner: &mut RoutingTableInner) {
|
||||
let cur_ts = intf::get_timestamp();
|
||||
Self::with_entries(&*inner, cur_ts, BucketEntryState::Dead, |_, v| {
|
||||
v.with_mut(|e| e.set_updated_since_last_network_change(false));
|
||||
@ -318,20 +324,46 @@ impl RoutingTable {
|
||||
});
|
||||
}
|
||||
|
||||
pub fn clear_dial_info_details(&self, domain: RoutingDomain) {
|
||||
trace!("clearing dial info domain: {:?}", domain);
|
||||
pub fn get_own_peer_info(&self, routing_domain: RoutingDomain) -> PeerInfo {
|
||||
PeerInfo::new(
|
||||
NodeId::new(self.node_id()),
|
||||
self.get_own_signed_node_info(routing_domain),
|
||||
)
|
||||
}
|
||||
|
||||
let mut inner = self.inner.write();
|
||||
Self::with_routing_domain_mut(&mut *inner, domain, |rd| {
|
||||
rd.dial_info_details.clear();
|
||||
});
|
||||
pub fn get_own_signed_node_info(&self, routing_domain: RoutingDomain) -> SignedNodeInfo {
|
||||
let node_id = NodeId::new(self.node_id());
|
||||
let secret = self.node_id_secret();
|
||||
SignedNodeInfo::with_secret(self.get_own_node_info(routing_domain), node_id, &secret)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
// Public dial info changed, go through all nodes and reset their 'seen our node info' bit
|
||||
if matches!(domain, RoutingDomain::PublicInternet) {
|
||||
Self::reset_all_seen_our_node_info(&*inner);
|
||||
pub fn get_own_node_info(&self, routing_domain: RoutingDomain) -> NodeInfo {
|
||||
let netman = self.network_manager();
|
||||
let relay_node = self.relay_node(routing_domain);
|
||||
let pc = netman.get_protocol_config();
|
||||
NodeInfo {
|
||||
network_class: netman
|
||||
.get_network_class(routing_domain)
|
||||
.unwrap_or(NetworkClass::Invalid),
|
||||
outbound_protocols: pc.outbound,
|
||||
address_types: pc.family_global,
|
||||
min_version: MIN_VERSION,
|
||||
max_version: MAX_VERSION,
|
||||
dial_info_detail_list: self.dial_info_details(routing_domain),
|
||||
relay_peer_info: relay_node
|
||||
.and_then(|rn| rn.make_peer_info(routing_domain).map(Box::new)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn has_valid_own_node_info(&self, routing_domain: RoutingDomain) -> bool {
|
||||
let netman = self.network_manager();
|
||||
let nc = netman
|
||||
.get_network_class(routing_domain)
|
||||
.unwrap_or(NetworkClass::Invalid);
|
||||
!matches!(nc, NetworkClass::Invalid)
|
||||
}
|
||||
|
||||
fn bucket_depth(index: usize) -> usize {
|
||||
match index {
|
||||
0 => 256,
|
||||
@ -357,11 +389,6 @@ impl RoutingTable {
|
||||
inner.buckets.push(bucket);
|
||||
}
|
||||
|
||||
// make local copy of node id for easy access
|
||||
let c = self.config.get();
|
||||
inner.node_id = c.network.node_id;
|
||||
inner.node_id_secret = c.network.node_id_secret;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -378,11 +405,33 @@ impl RoutingTable {
|
||||
error!("kick_buckets_task not stopped: {}", e);
|
||||
}
|
||||
|
||||
*self.inner.write() = Self::new_inner(self.network_manager());
|
||||
*self.inner.write() = Self::new_inner();
|
||||
|
||||
debug!("finished routing table terminate");
|
||||
}
|
||||
|
||||
pub fn configure_local_network_routing_domain(&self, local_networks: Vec<(IpAddr, IpAddr)>) {
|
||||
log_net!(debug "configure_local_network_routing_domain: {:#?}", local_networks);
|
||||
|
||||
let mut inner = self.inner.write();
|
||||
let changed = inner
|
||||
.local_network_routing_domain
|
||||
.set_local_networks(local_networks);
|
||||
|
||||
// If the local network topology has changed, nuke the existing local node info and let new local discovery happen
|
||||
if changed {
|
||||
let cur_ts = intf::get_timestamp();
|
||||
Self::with_entries(&*inner, cur_ts, BucketEntryState::Dead, |_rti, e| {
|
||||
e.with_mut(|e| {
|
||||
e.clear_signed_node_info(RoutingDomain::LocalNetwork);
|
||||
e.set_seen_our_node_info(RoutingDomain::LocalNetwork, false);
|
||||
e.set_updated_since_last_network_change(false);
|
||||
});
|
||||
Option::<()>::None
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Attempt to empty the routing table
|
||||
// should only be performed when there are no node_refs (detached)
|
||||
pub fn purge_buckets(&self) {
|
||||
@ -440,22 +489,34 @@ impl RoutingTable {
|
||||
}
|
||||
}
|
||||
|
||||
fn find_bucket_index(inner: &RoutingTableInner, node_id: DHTKey) -> usize {
|
||||
distance(&node_id, &inner.node_id)
|
||||
fn find_bucket_index(&self, node_id: DHTKey) -> usize {
|
||||
distance(&node_id, &self.unlocked_inner.node_id)
|
||||
.first_nonzero_bit()
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub fn get_entry_count(&self, min_state: BucketEntryState) -> usize {
|
||||
pub fn get_entry_count(
|
||||
&self,
|
||||
routing_domain_set: RoutingDomainSet,
|
||||
min_state: BucketEntryState,
|
||||
) -> usize {
|
||||
let inner = self.inner.read();
|
||||
Self::get_entry_count_inner(&*inner, min_state)
|
||||
Self::get_entry_count_inner(&*inner, routing_domain_set, min_state)
|
||||
}
|
||||
|
||||
fn get_entry_count_inner(inner: &RoutingTableInner, min_state: BucketEntryState) -> usize {
|
||||
fn get_entry_count_inner(
|
||||
inner: &RoutingTableInner,
|
||||
routing_domain_set: RoutingDomainSet,
|
||||
min_state: BucketEntryState,
|
||||
) -> usize {
|
||||
let mut count = 0usize;
|
||||
let cur_ts = intf::get_timestamp();
|
||||
Self::with_entries(inner, cur_ts, min_state, |_, _| {
|
||||
count += 1;
|
||||
Self::with_entries(inner, cur_ts, min_state, |_, e| {
|
||||
if e.with(|e| e.best_routing_domain(routing_domain_set))
|
||||
.is_some()
|
||||
{
|
||||
count += 1;
|
||||
}
|
||||
Option::<()>::None
|
||||
});
|
||||
count
|
||||
@ -479,13 +540,23 @@ impl RoutingTable {
|
||||
None
|
||||
}
|
||||
|
||||
pub fn get_nodes_needing_updates(&self, cur_ts: u64, all: bool) -> Vec<NodeRef> {
|
||||
pub fn get_nodes_needing_updates(
|
||||
&self,
|
||||
routing_domain: RoutingDomain,
|
||||
cur_ts: u64,
|
||||
all: bool,
|
||||
) -> Vec<NodeRef> {
|
||||
let inner = self.inner.read();
|
||||
let mut node_refs = Vec::<NodeRef>::with_capacity(inner.bucket_entry_count);
|
||||
Self::with_entries(&*inner, cur_ts, BucketEntryState::Unreliable, |k, v| {
|
||||
// Only update nodes that haven't seen our node info yet
|
||||
if all || !v.with(|e| e.has_seen_our_node_info()) {
|
||||
node_refs.push(NodeRef::new(self.clone(), k, v, None));
|
||||
if all || !v.with(|e| e.has_seen_our_node_info(routing_domain)) {
|
||||
node_refs.push(NodeRef::new(
|
||||
self.clone(),
|
||||
k,
|
||||
v,
|
||||
Some(NodeRefFilter::new().with_routing_domain(routing_domain)),
|
||||
));
|
||||
}
|
||||
Option::<()>::None
|
||||
});
|
||||
@ -494,15 +565,29 @@ impl RoutingTable {
|
||||
|
||||
pub fn get_nodes_needing_ping(
|
||||
&self,
|
||||
routing_domain: RoutingDomain,
|
||||
cur_ts: u64,
|
||||
relay_node_id: Option<DHTKey>,
|
||||
) -> Vec<NodeRef> {
|
||||
let inner = self.inner.read();
|
||||
|
||||
// Collect relay nodes
|
||||
let opt_relay_id = Self::with_routing_domain(&*inner, routing_domain, |rd| {
|
||||
rd.relay_node().map(|rn| rn.node_id())
|
||||
});
|
||||
|
||||
// Collect all entries that are 'needs_ping' and have some node info making them reachable somehow
|
||||
let mut node_refs = Vec::<NodeRef>::with_capacity(inner.bucket_entry_count);
|
||||
Self::with_entries(&*inner, cur_ts, BucketEntryState::Unreliable, |k, v| {
|
||||
// Only update nodes that haven't seen our node info yet
|
||||
if v.with(|e| e.needs_ping(&k, cur_ts, relay_node_id)) {
|
||||
node_refs.push(NodeRef::new(self.clone(), k, v, None));
|
||||
if v.with(|e| {
|
||||
e.has_node_info(routing_domain.into())
|
||||
&& e.needs_ping(cur_ts, opt_relay_id == Some(k))
|
||||
}) {
|
||||
node_refs.push(NodeRef::new(
|
||||
self.clone(),
|
||||
k,
|
||||
v,
|
||||
Some(NodeRefFilter::new().with_routing_domain(routing_domain)),
|
||||
));
|
||||
}
|
||||
Option::<()>::None
|
||||
});
|
||||
@ -520,9 +605,8 @@ impl RoutingTable {
|
||||
}
|
||||
|
||||
fn queue_bucket_kick(&self, node_id: DHTKey) {
|
||||
let mut inner = self.inner.write();
|
||||
let idx = Self::find_bucket_index(&*inner, node_id);
|
||||
inner.kick_queue.insert(idx);
|
||||
let idx = self.find_bucket_index(node_id);
|
||||
self.unlocked_inner.kick_queue.lock().insert(idx);
|
||||
}
|
||||
|
||||
// Create a node reference, possibly creating a bucket entry
|
||||
@ -542,7 +626,7 @@ impl RoutingTable {
|
||||
let mut inner = self.inner.write();
|
||||
|
||||
// Look up existing entry
|
||||
let idx = Self::find_bucket_index(&*inner, node_id);
|
||||
let idx = self.find_bucket_index(node_id);
|
||||
let noderef = {
|
||||
let bucket = &inner.buckets[idx];
|
||||
let entry = bucket.entry(&node_id);
|
||||
@ -563,8 +647,8 @@ impl RoutingTable {
|
||||
entry.with_mut(update_func);
|
||||
|
||||
// Kick the bucket
|
||||
inner.kick_queue.insert(idx);
|
||||
log_rtab!(debug "Routing table now has {} nodes, {} live", cnt, Self::get_entry_count_inner(&mut *inner, BucketEntryState::Unreliable));
|
||||
self.unlocked_inner.kick_queue.lock().insert(idx);
|
||||
log_rtab!(debug "Routing table now has {} nodes, {} live", cnt, Self::get_entry_count_inner(&mut *inner, RoutingDomainSet::all(), BucketEntryState::Unreliable));
|
||||
|
||||
nr
|
||||
}
|
||||
@ -584,12 +668,12 @@ impl RoutingTable {
|
||||
}
|
||||
|
||||
pub fn lookup_node_ref(&self, node_id: DHTKey) -> Option<NodeRef> {
|
||||
let inner = self.inner.read();
|
||||
if node_id == inner.node_id {
|
||||
if node_id == self.unlocked_inner.node_id {
|
||||
log_rtab!(debug "can't look up own node id in routing table");
|
||||
return None;
|
||||
}
|
||||
let idx = Self::find_bucket_index(&*inner, node_id);
|
||||
let idx = self.find_bucket_index(node_id);
|
||||
let inner = self.inner.read();
|
||||
let bucket = &inner.buckets[idx];
|
||||
bucket
|
||||
.entry(&node_id)
|
||||
@ -597,13 +681,17 @@ impl RoutingTable {
|
||||
}
|
||||
|
||||
// Shortcut function to add a node to our routing table if it doesn't exist
|
||||
// and add the dial info we have for it, since that's pretty common
|
||||
// and add the dial info we have for it. Returns a noderef filtered to
|
||||
// the routing domain in which this node was registered for convenience.
|
||||
pub fn register_node_with_signed_node_info(
|
||||
&self,
|
||||
routing_domain: RoutingDomain,
|
||||
node_id: DHTKey,
|
||||
signed_node_info: SignedNodeInfo,
|
||||
allow_invalid_signature: bool,
|
||||
allow_invalid: bool,
|
||||
) -> Option<NodeRef> {
|
||||
//log_rtab!("register_node_with_signed_node_info: routing_domain: {:?}, node_id: {:?}, signed_node_info: {:?}, allow_invalid: {:?}", routing_domain, node_id, signed_node_info, allow_invalid );
|
||||
|
||||
// validate signed node info is not something malicious
|
||||
if node_id == self.node_id() {
|
||||
log_rtab!(debug "can't register own node id in routing table");
|
||||
@ -615,9 +703,29 @@ impl RoutingTable {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
if !allow_invalid {
|
||||
// verify signature
|
||||
if !signed_node_info.has_valid_signature() {
|
||||
log_rtab!(debug "signed node info for {} has invalid signature", node_id);
|
||||
return None;
|
||||
}
|
||||
// verify signed node info is valid in this routing domain
|
||||
if !self
|
||||
.node_info_is_valid_in_routing_domain(routing_domain, &signed_node_info.node_info)
|
||||
{
|
||||
log_rtab!(debug "signed node info for {} not valid in the {:?} routing domain", node_id, routing_domain);
|
||||
return None;
|
||||
}
|
||||
}
|
||||
|
||||
self.create_node_ref(node_id, |e| {
|
||||
e.update_signed_node_info(signed_node_info, allow_invalid_signature);
|
||||
e.update_signed_node_info(routing_domain, signed_node_info);
|
||||
})
|
||||
.map(|mut nr| {
|
||||
nr.set_filter(Some(
|
||||
NodeRefFilter::new().with_routing_domain(routing_domain),
|
||||
));
|
||||
nr
|
||||
})
|
||||
}
|
||||
|
||||
@ -629,13 +737,15 @@ impl RoutingTable {
|
||||
descriptor: ConnectionDescriptor,
|
||||
timestamp: u64,
|
||||
) -> Option<NodeRef> {
|
||||
self.create_node_ref(node_id, |e| {
|
||||
// set the most recent node address for connection finding and udp replies
|
||||
e.set_last_connection(descriptor, timestamp);
|
||||
|
||||
let out = self.create_node_ref(node_id, |e| {
|
||||
// this node is live because it literally just connected to us
|
||||
e.touch_last_seen(timestamp);
|
||||
})
|
||||
});
|
||||
if let Some(nr) = &out {
|
||||
// set the most recent node address for connection finding and udp replies
|
||||
nr.set_last_connection(descriptor, timestamp);
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
// Ticks about once per second
|
||||
@ -645,7 +755,7 @@ impl RoutingTable {
|
||||
self.unlocked_inner.rolling_transfers_task.tick().await?;
|
||||
|
||||
// Kick buckets task
|
||||
let kick_bucket_queue_count = { self.inner.read().kick_queue.len() };
|
||||
let kick_bucket_queue_count = self.unlocked_inner.kick_queue.lock().len();
|
||||
if kick_bucket_queue_count > 0 {
|
||||
self.unlocked_inner.kick_buckets_task.tick().await?;
|
||||
}
|
||||
@ -653,64 +763,6 @@ impl RoutingTable {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Stats Accounting
|
||||
pub fn stats_question_sent(
|
||||
&self,
|
||||
node_ref: NodeRef,
|
||||
ts: u64,
|
||||
bytes: u64,
|
||||
expects_answer: bool,
|
||||
) {
|
||||
self.inner
|
||||
.write()
|
||||
.self_transfer_stats_accounting
|
||||
.add_up(bytes);
|
||||
node_ref.operate_mut(|e| {
|
||||
e.question_sent(ts, bytes, expects_answer);
|
||||
})
|
||||
}
|
||||
pub fn stats_question_rcvd(&self, node_ref: NodeRef, ts: u64, bytes: u64) {
|
||||
self.inner
|
||||
.write()
|
||||
.self_transfer_stats_accounting
|
||||
.add_down(bytes);
|
||||
node_ref.operate_mut(|e| {
|
||||
e.question_rcvd(ts, bytes);
|
||||
})
|
||||
}
|
||||
pub fn stats_answer_sent(&self, node_ref: NodeRef, bytes: u64) {
|
||||
self.inner
|
||||
.write()
|
||||
.self_transfer_stats_accounting
|
||||
.add_up(bytes);
|
||||
node_ref.operate_mut(|e| {
|
||||
e.answer_sent(bytes);
|
||||
})
|
||||
}
|
||||
pub fn stats_answer_rcvd(&self, node_ref: NodeRef, send_ts: u64, recv_ts: u64, bytes: u64) {
|
||||
{
|
||||
let mut inner = self.inner.write();
|
||||
inner.self_transfer_stats_accounting.add_down(bytes);
|
||||
inner
|
||||
.self_latency_stats_accounting
|
||||
.record_latency(recv_ts - send_ts);
|
||||
}
|
||||
node_ref.operate_mut(|e| {
|
||||
e.answer_rcvd(send_ts, recv_ts, bytes);
|
||||
})
|
||||
}
|
||||
pub fn stats_question_lost(&self, node_ref: NodeRef) {
|
||||
node_ref.operate_mut(|e| {
|
||||
e.question_lost();
|
||||
})
|
||||
}
|
||||
pub fn stats_failed_to_send(&self, node_ref: NodeRef, ts: u64, expects_answer: bool) {
|
||||
node_ref.operate_mut(|e| {
|
||||
e.failed_to_send(ts, expects_answer);
|
||||
})
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Routing Table Health Metrics
|
||||
|
||||
@ -735,4 +787,50 @@ impl RoutingTable {
|
||||
}
|
||||
health
|
||||
}
|
||||
|
||||
pub fn get_recent_peers(&self) -> Vec<(DHTKey, RecentPeersEntry)> {
|
||||
let mut recent_peers = Vec::new();
|
||||
let mut dead_peers = Vec::new();
|
||||
let mut out = Vec::new();
|
||||
|
||||
// collect all recent peers
|
||||
{
|
||||
let inner = self.inner.read();
|
||||
for (k, _v) in &inner.recent_peers {
|
||||
recent_peers.push(*k);
|
||||
}
|
||||
}
|
||||
|
||||
// look up each node and make sure the connection is still live
|
||||
// (uses same logic as send_data, ensuring last_connection works for UDP)
|
||||
for e in &recent_peers {
|
||||
let mut dead = true;
|
||||
if let Some(nr) = self.lookup_node_ref(*e) {
|
||||
if let Some(last_connection) = nr.last_connection() {
|
||||
out.push((*e, RecentPeersEntry { last_connection }));
|
||||
dead = false;
|
||||
}
|
||||
}
|
||||
if dead {
|
||||
dead_peers.push(e);
|
||||
}
|
||||
}
|
||||
|
||||
// purge dead recent peers
|
||||
{
|
||||
let mut inner = self.inner.write();
|
||||
for d in dead_peers {
|
||||
inner.recent_peers.remove(d);
|
||||
}
|
||||
}
|
||||
|
||||
out
|
||||
}
|
||||
|
||||
pub fn touch_recent_peer(&self, node_id: DHTKey, last_connection: ConnectionDescriptor) {
|
||||
let mut inner = self.inner.write();
|
||||
inner
|
||||
.recent_peers
|
||||
.insert(node_id, RecentPeersEntry { last_connection });
|
||||
}
|
||||
}
|
||||
|
@ -6,11 +6,71 @@ use alloc::fmt;
|
||||
// We should ping them with some frequency and 30 seconds is typical timeout
|
||||
const CONNECTIONLESS_TIMEOUT_SECS: u32 = 29;
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub struct NodeRefFilter {
|
||||
pub routing_domain_set: RoutingDomainSet,
|
||||
pub dial_info_filter: DialInfoFilter,
|
||||
}
|
||||
|
||||
impl Default for NodeRefFilter {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl NodeRefFilter {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
routing_domain_set: RoutingDomainSet::all(),
|
||||
dial_info_filter: DialInfoFilter::all(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_routing_domain(mut self, routing_domain: RoutingDomain) -> Self {
|
||||
self.routing_domain_set = routing_domain.into();
|
||||
self
|
||||
}
|
||||
pub fn with_routing_domain_set(mut self, routing_domain_set: RoutingDomainSet) -> Self {
|
||||
self.routing_domain_set = routing_domain_set;
|
||||
self
|
||||
}
|
||||
pub fn with_dial_info_filter(mut self, dial_info_filter: DialInfoFilter) -> Self {
|
||||
self.dial_info_filter = dial_info_filter;
|
||||
self
|
||||
}
|
||||
pub fn with_protocol_type(mut self, protocol_type: ProtocolType) -> Self {
|
||||
self.dial_info_filter = self.dial_info_filter.with_protocol_type(protocol_type);
|
||||
self
|
||||
}
|
||||
pub fn with_protocol_type_set(mut self, protocol_set: ProtocolTypeSet) -> Self {
|
||||
self.dial_info_filter = self.dial_info_filter.with_protocol_type_set(protocol_set);
|
||||
self
|
||||
}
|
||||
pub fn with_address_type(mut self, address_type: AddressType) -> Self {
|
||||
self.dial_info_filter = self.dial_info_filter.with_address_type(address_type);
|
||||
self
|
||||
}
|
||||
pub fn with_address_type_set(mut self, address_set: AddressTypeSet) -> Self {
|
||||
self.dial_info_filter = self.dial_info_filter.with_address_type_set(address_set);
|
||||
self
|
||||
}
|
||||
pub fn filtered(mut self, other_filter: &NodeRefFilter) -> Self {
|
||||
self.routing_domain_set &= other_filter.routing_domain_set;
|
||||
self.dial_info_filter = self
|
||||
.dial_info_filter
|
||||
.filtered(&other_filter.dial_info_filter);
|
||||
self
|
||||
}
|
||||
pub fn is_dead(&self) -> bool {
|
||||
self.dial_info_filter.is_dead() || self.routing_domain_set.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct NodeRef {
|
||||
routing_table: RoutingTable,
|
||||
node_id: DHTKey,
|
||||
entry: Arc<BucketEntry>,
|
||||
filter: Option<DialInfoFilter>,
|
||||
filter: Option<NodeRefFilter>,
|
||||
#[cfg(feature = "tracking")]
|
||||
track_id: usize,
|
||||
}
|
||||
@ -20,7 +80,7 @@ impl NodeRef {
|
||||
routing_table: RoutingTable,
|
||||
node_id: DHTKey,
|
||||
entry: Arc<BucketEntry>,
|
||||
filter: Option<DialInfoFilter>,
|
||||
filter: Option<NodeRefFilter>,
|
||||
) -> Self {
|
||||
entry.ref_count.fetch_add(1u32, Ordering::Relaxed);
|
||||
|
||||
@ -34,31 +94,47 @@ impl NodeRef {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn node_id(&self) -> DHTKey {
|
||||
self.node_id
|
||||
// Operate on entry accessors
|
||||
|
||||
pub(super) fn operate<T, F>(&self, f: F) -> T
|
||||
where
|
||||
F: FnOnce(&RoutingTableInner, &BucketEntryInner) -> T,
|
||||
{
|
||||
let inner = &*self.routing_table.inner.read();
|
||||
self.entry.with(|e| f(inner, e))
|
||||
}
|
||||
|
||||
pub fn filter_ref(&self) -> Option<&DialInfoFilter> {
|
||||
pub(super) fn operate_mut<T, F>(&self, f: F) -> T
|
||||
where
|
||||
F: FnOnce(&mut RoutingTableInner, &mut BucketEntryInner) -> T,
|
||||
{
|
||||
let inner = &mut *self.routing_table.inner.write();
|
||||
self.entry.with_mut(|e| f(inner, e))
|
||||
}
|
||||
|
||||
// Filtering
|
||||
|
||||
pub fn filter_ref(&self) -> Option<&NodeRefFilter> {
|
||||
self.filter.as_ref()
|
||||
}
|
||||
|
||||
pub fn take_filter(&mut self) -> Option<DialInfoFilter> {
|
||||
pub fn take_filter(&mut self) -> Option<NodeRefFilter> {
|
||||
self.filter.take()
|
||||
}
|
||||
|
||||
pub fn set_filter(&mut self, filter: Option<DialInfoFilter>) {
|
||||
pub fn set_filter(&mut self, filter: Option<NodeRefFilter>) {
|
||||
self.filter = filter
|
||||
}
|
||||
|
||||
pub fn merge_filter(&mut self, filter: DialInfoFilter) {
|
||||
pub fn merge_filter(&mut self, filter: NodeRefFilter) {
|
||||
if let Some(self_filter) = self.filter.take() {
|
||||
self.filter = Some(self_filter.filtered(filter));
|
||||
self.filter = Some(self_filter.filtered(&filter));
|
||||
} else {
|
||||
self.filter = Some(filter);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn filtered_clone(&self, filter: DialInfoFilter) -> Self {
|
||||
pub fn filtered_clone(&self, filter: NodeRefFilter) -> Self {
|
||||
let mut out = self.clone();
|
||||
out.merge_filter(filter);
|
||||
out
|
||||
@ -72,70 +148,103 @@ impl NodeRef {
|
||||
}
|
||||
}
|
||||
|
||||
// Returns true if some protocols can still pass the filter and false if no protocols remain
|
||||
// pub fn filter_protocols(&mut self, protocol_set: ProtocolSet) -> bool {
|
||||
// if protocol_set != ProtocolSet::all() {
|
||||
// let mut dif = self.filter.clone().unwrap_or_default();
|
||||
// dif.protocol_set &= protocol_set;
|
||||
// self.filter = Some(dif);
|
||||
// }
|
||||
// self.filter
|
||||
// .as_ref()
|
||||
// .map(|f| !f.protocol_set.is_empty())
|
||||
// .unwrap_or(true)
|
||||
// }
|
||||
|
||||
pub fn operate<T, F>(&self, f: F) -> T
|
||||
where
|
||||
F: FnOnce(&BucketEntryInner) -> T,
|
||||
{
|
||||
self.entry.with(f)
|
||||
pub fn routing_domain_set(&self) -> RoutingDomainSet {
|
||||
self.filter
|
||||
.as_ref()
|
||||
.map(|f| f.routing_domain_set)
|
||||
.unwrap_or(RoutingDomainSet::all())
|
||||
}
|
||||
|
||||
pub fn operate_mut<T, F>(&self, f: F) -> T
|
||||
where
|
||||
F: FnOnce(&mut BucketEntryInner) -> T,
|
||||
{
|
||||
self.entry.with_mut(f)
|
||||
pub fn dial_info_filter(&self) -> DialInfoFilter {
|
||||
self.filter
|
||||
.as_ref()
|
||||
.map(|f| f.dial_info_filter.clone())
|
||||
.unwrap_or(DialInfoFilter::all())
|
||||
}
|
||||
|
||||
pub fn peer_info(&self) -> Option<PeerInfo> {
|
||||
self.operate(|e| e.peer_info(self.node_id()))
|
||||
pub fn best_routing_domain(&self) -> Option<RoutingDomain> {
|
||||
self.operate(|_rti, e| {
|
||||
e.best_routing_domain(
|
||||
self.filter
|
||||
.as_ref()
|
||||
.map(|f| f.routing_domain_set)
|
||||
.unwrap_or(RoutingDomainSet::all()),
|
||||
)
|
||||
})
|
||||
}
|
||||
pub fn has_seen_our_node_info(&self) -> bool {
|
||||
self.operate(|e| e.has_seen_our_node_info())
|
||||
|
||||
// Accessors
|
||||
pub fn routing_table(&self) -> RoutingTable {
|
||||
self.routing_table.clone()
|
||||
}
|
||||
pub fn set_seen_our_node_info(&self) {
|
||||
self.operate_mut(|e| e.set_seen_our_node_info(true));
|
||||
pub fn node_id(&self) -> DHTKey {
|
||||
self.node_id
|
||||
}
|
||||
pub fn has_updated_since_last_network_change(&self) -> bool {
|
||||
self.operate(|e| e.has_updated_since_last_network_change())
|
||||
self.operate(|_rti, e| e.has_updated_since_last_network_change())
|
||||
}
|
||||
pub fn set_updated_since_last_network_change(&self) {
|
||||
self.operate_mut(|e| e.set_updated_since_last_network_change(true));
|
||||
self.operate_mut(|_rti, e| e.set_updated_since_last_network_change(true));
|
||||
}
|
||||
pub fn network_class(&self) -> Option<NetworkClass> {
|
||||
self.operate(|e| e.node_info().map(|n| n.network_class))
|
||||
pub fn update_node_status(&self, node_status: NodeStatus) {
|
||||
self.operate_mut(|_rti, e| {
|
||||
e.update_node_status(node_status);
|
||||
});
|
||||
}
|
||||
pub fn outbound_protocols(&self) -> Option<ProtocolTypeSet> {
|
||||
self.operate(|e| e.node_info().map(|n| n.outbound_protocols))
|
||||
pub fn min_max_version(&self) -> Option<(u8, u8)> {
|
||||
self.operate(|_rti, e| e.min_max_version())
|
||||
}
|
||||
pub fn address_types(&self) -> Option<AddressTypeSet> {
|
||||
self.operate(|e| e.node_info().map(|n| n.address_types))
|
||||
pub fn set_min_max_version(&self, min_max_version: (u8, u8)) {
|
||||
self.operate_mut(|_rti, e| e.set_min_max_version(min_max_version))
|
||||
}
|
||||
pub fn node_info_outbound_filter(&self) -> DialInfoFilter {
|
||||
pub fn state(&self, cur_ts: u64) -> BucketEntryState {
|
||||
self.operate(|_rti, e| e.state(cur_ts))
|
||||
}
|
||||
pub fn peer_stats(&self) -> PeerStats {
|
||||
self.operate(|_rti, e| e.peer_stats().clone())
|
||||
}
|
||||
|
||||
// Per-RoutingDomain accessors
|
||||
pub fn make_peer_info(&self, routing_domain: RoutingDomain) -> Option<PeerInfo> {
|
||||
self.operate(|_rti, e| e.make_peer_info(self.node_id(), routing_domain))
|
||||
}
|
||||
pub fn signed_node_info_has_valid_signature(&self, routing_domain: RoutingDomain) -> bool {
|
||||
self.operate(|_rti, e| {
|
||||
e.signed_node_info(routing_domain)
|
||||
.map(|sni| sni.has_valid_signature())
|
||||
.unwrap_or(false)
|
||||
})
|
||||
}
|
||||
pub fn has_seen_our_node_info(&self, routing_domain: RoutingDomain) -> bool {
|
||||
self.operate(|_rti, e| e.has_seen_our_node_info(routing_domain))
|
||||
}
|
||||
pub fn set_seen_our_node_info(&self, routing_domain: RoutingDomain) {
|
||||
self.operate_mut(|_rti, e| e.set_seen_our_node_info(routing_domain, true));
|
||||
}
|
||||
pub fn network_class(&self, routing_domain: RoutingDomain) -> Option<NetworkClass> {
|
||||
self.operate(|_rt, e| e.node_info(routing_domain).map(|n| n.network_class))
|
||||
}
|
||||
pub fn outbound_protocols(&self, routing_domain: RoutingDomain) -> Option<ProtocolTypeSet> {
|
||||
self.operate(|_rt, e| e.node_info(routing_domain).map(|n| n.outbound_protocols))
|
||||
}
|
||||
pub fn address_types(&self, routing_domain: RoutingDomain) -> Option<AddressTypeSet> {
|
||||
self.operate(|_rt, e| e.node_info(routing_domain).map(|n| n.address_types))
|
||||
}
|
||||
pub fn node_info_outbound_filter(&self, routing_domain: RoutingDomain) -> DialInfoFilter {
|
||||
let mut dif = DialInfoFilter::all();
|
||||
if let Some(outbound_protocols) = self.outbound_protocols() {
|
||||
if let Some(outbound_protocols) = self.outbound_protocols(routing_domain) {
|
||||
dif = dif.with_protocol_type_set(outbound_protocols);
|
||||
}
|
||||
if let Some(address_types) = self.address_types() {
|
||||
if let Some(address_types) = self.address_types(routing_domain) {
|
||||
dif = dif.with_address_type_set(address_types);
|
||||
}
|
||||
dif
|
||||
}
|
||||
|
||||
pub fn relay(&self) -> Option<NodeRef> {
|
||||
let target_rpi = self.operate(|e| e.node_info().map(|n| n.relay_peer_info))?;
|
||||
pub fn relay(&self, routing_domain: RoutingDomain) -> Option<NodeRef> {
|
||||
let target_rpi = self.operate(|_rti, e| {
|
||||
e.node_info(routing_domain)
|
||||
.map(|n| n.relay_peer_info.as_ref().map(|pi| pi.as_ref().clone()))
|
||||
})?;
|
||||
target_rpi.and_then(|t| {
|
||||
// If relay is ourselves, then return None, because we can't relay through ourselves
|
||||
// and to contact this node we should have had an existing inbound connection
|
||||
@ -144,87 +253,45 @@ impl NodeRef {
|
||||
}
|
||||
|
||||
// Register relay node and return noderef
|
||||
self.routing_table
|
||||
.register_node_with_signed_node_info(t.node_id.key, t.signed_node_info, false)
|
||||
.map(|mut nr| {
|
||||
nr.set_filter(self.filter_ref().cloned());
|
||||
nr
|
||||
})
|
||||
})
|
||||
}
|
||||
pub fn first_filtered_dial_info_detail(
|
||||
&self,
|
||||
routing_domain: Option<RoutingDomain>,
|
||||
) -> Option<DialInfoDetail> {
|
||||
self.operate(|e| {
|
||||
// Prefer local dial info first unless it is filtered out
|
||||
if routing_domain == None || routing_domain == Some(RoutingDomain::LocalNetwork) {
|
||||
e.local_node_info().and_then(|l| {
|
||||
l.first_filtered_dial_info(|di| {
|
||||
if let Some(filter) = self.filter.as_ref() {
|
||||
di.matches_filter(filter)
|
||||
} else {
|
||||
true
|
||||
}
|
||||
})
|
||||
.map(|di| DialInfoDetail {
|
||||
class: DialInfoClass::Direct,
|
||||
dial_info: di,
|
||||
})
|
||||
})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
.or_else(|| {
|
||||
if routing_domain == None || routing_domain == Some(RoutingDomain::PublicInternet) {
|
||||
e.node_info().and_then(|n| {
|
||||
n.first_filtered_dial_info_detail(|did| {
|
||||
if let Some(filter) = self.filter.as_ref() {
|
||||
did.matches_filter(filter)
|
||||
} else {
|
||||
true
|
||||
}
|
||||
})
|
||||
})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
self.routing_table.register_node_with_signed_node_info(
|
||||
routing_domain,
|
||||
t.node_id.key,
|
||||
t.signed_node_info,
|
||||
false,
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn all_filtered_dial_info_details<F>(
|
||||
&self,
|
||||
routing_domain: Option<RoutingDomain>,
|
||||
) -> Vec<DialInfoDetail> {
|
||||
let mut out = Vec::new();
|
||||
self.operate(|e| {
|
||||
// Prefer local dial info first unless it is filtered out
|
||||
if routing_domain == None || routing_domain == Some(RoutingDomain::LocalNetwork) {
|
||||
if let Some(lni) = e.local_node_info() {
|
||||
for di in lni.all_filtered_dial_info(|di| {
|
||||
if let Some(filter) = self.filter.as_ref() {
|
||||
di.matches_filter(filter)
|
||||
} else {
|
||||
true
|
||||
}
|
||||
}) {
|
||||
out.push(DialInfoDetail {
|
||||
class: DialInfoClass::Direct,
|
||||
dial_info: di,
|
||||
});
|
||||
// Filtered accessors
|
||||
pub fn first_filtered_dial_info_detail(&self) -> Option<DialInfoDetail> {
|
||||
let routing_domain_set = self.routing_domain_set();
|
||||
let dial_info_filter = self.dial_info_filter();
|
||||
|
||||
self.operate(|_rt, e| {
|
||||
for routing_domain in routing_domain_set {
|
||||
if let Some(ni) = e.node_info(routing_domain) {
|
||||
let filter = |did: &DialInfoDetail| did.matches_filter(&dial_info_filter);
|
||||
if let Some(did) = ni.first_filtered_dial_info_detail(filter) {
|
||||
return Some(did);
|
||||
}
|
||||
}
|
||||
}
|
||||
if routing_domain == None || routing_domain == Some(RoutingDomain::PublicInternet) {
|
||||
if let Some(ni) = e.node_info() {
|
||||
out.append(&mut ni.all_filtered_dial_info_details(|did| {
|
||||
if let Some(filter) = self.filter.as_ref() {
|
||||
did.matches_filter(filter)
|
||||
} else {
|
||||
true
|
||||
}
|
||||
}))
|
||||
None
|
||||
})
|
||||
}
|
||||
|
||||
pub fn all_filtered_dial_info_details<F>(&self) -> Vec<DialInfoDetail> {
|
||||
let routing_domain_set = self.routing_domain_set();
|
||||
let dial_info_filter = self.dial_info_filter();
|
||||
|
||||
let mut out = Vec::new();
|
||||
self.operate(|_rt, e| {
|
||||
for routing_domain in routing_domain_set {
|
||||
if let Some(ni) = e.node_info(routing_domain) {
|
||||
let filter = |did: &DialInfoDetail| did.matches_filter(&dial_info_filter);
|
||||
if let Some(did) = ni.first_filtered_dial_info_detail(filter) {
|
||||
out.push(did);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
@ -232,16 +299,16 @@ impl NodeRef {
|
||||
out
|
||||
}
|
||||
|
||||
pub async fn last_connection(&self) -> Option<ConnectionDescriptor> {
|
||||
pub fn last_connection(&self) -> Option<ConnectionDescriptor> {
|
||||
// Get the last connection and the last time we saw anything with this connection
|
||||
let (last_connection, last_seen) =
|
||||
self.operate(|e| e.last_connection(self.filter.clone()))?;
|
||||
self.operate(|rti, e| e.last_connection(rti, self.filter.clone()))?;
|
||||
|
||||
// Should we check the connection table?
|
||||
if last_connection.protocol_type().is_connection_oriented() {
|
||||
// Look the connection up in the connection manager and see if it's still there
|
||||
let connection_manager = self.routing_table.network_manager().connection_manager();
|
||||
connection_manager.get_connection(last_connection).await?;
|
||||
connection_manager.get_connection(last_connection)?;
|
||||
} else {
|
||||
// If this is not connection oriented, then we check our last seen time
|
||||
// to see if this mapping has expired (beyond our timeout)
|
||||
@ -254,21 +321,62 @@ impl NodeRef {
|
||||
}
|
||||
|
||||
pub fn clear_last_connections(&self) {
|
||||
self.operate_mut(|e| e.clear_last_connections())
|
||||
self.operate_mut(|_rti, e| e.clear_last_connections())
|
||||
}
|
||||
|
||||
pub fn set_last_connection(&self, connection_descriptor: ConnectionDescriptor, ts: u64) {
|
||||
self.operate_mut(|e| e.set_last_connection(connection_descriptor, ts))
|
||||
self.operate_mut(|_rti, e| e.set_last_connection(connection_descriptor, ts));
|
||||
self.routing_table
|
||||
.touch_recent_peer(self.node_id(), connection_descriptor);
|
||||
}
|
||||
|
||||
pub fn has_any_dial_info(&self) -> bool {
|
||||
self.operate(|e| {
|
||||
e.node_info()
|
||||
.map(|n| n.has_any_dial_info())
|
||||
.unwrap_or(false)
|
||||
|| e.local_node_info()
|
||||
.map(|l| l.has_dial_info())
|
||||
.unwrap_or(false)
|
||||
self.operate(|_rti, e| {
|
||||
for rtd in RoutingDomain::all() {
|
||||
if let Some(ni) = e.node_info(rtd) {
|
||||
if ni.has_any_dial_info() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
false
|
||||
})
|
||||
}
|
||||
|
||||
pub fn stats_question_sent(&self, ts: u64, bytes: u64, expects_answer: bool) {
|
||||
self.operate_mut(|rti, e| {
|
||||
rti.self_transfer_stats_accounting.add_up(bytes);
|
||||
e.question_sent(ts, bytes, expects_answer);
|
||||
})
|
||||
}
|
||||
pub fn stats_question_rcvd(&self, ts: u64, bytes: u64) {
|
||||
self.operate_mut(|rti, e| {
|
||||
rti.self_transfer_stats_accounting.add_down(bytes);
|
||||
e.question_rcvd(ts, bytes);
|
||||
})
|
||||
}
|
||||
pub fn stats_answer_sent(&self, bytes: u64) {
|
||||
self.operate_mut(|rti, e| {
|
||||
rti.self_transfer_stats_accounting.add_up(bytes);
|
||||
e.answer_sent(bytes);
|
||||
})
|
||||
}
|
||||
pub fn stats_answer_rcvd(&self, send_ts: u64, recv_ts: u64, bytes: u64) {
|
||||
self.operate_mut(|rti, e| {
|
||||
rti.self_transfer_stats_accounting.add_down(bytes);
|
||||
rti.self_latency_stats_accounting
|
||||
.record_latency(recv_ts - send_ts);
|
||||
e.answer_rcvd(send_ts, recv_ts, bytes);
|
||||
})
|
||||
}
|
||||
pub fn stats_question_lost(&self) {
|
||||
self.operate_mut(|_rti, e| {
|
||||
e.question_lost();
|
||||
})
|
||||
}
|
||||
pub fn stats_failed_to_send(&self, ts: u64, expects_answer: bool) {
|
||||
self.operate_mut(|_rti, e| {
|
||||
e.failed_to_send(ts, expects_answer);
|
||||
})
|
||||
}
|
||||
}
|
||||
|
130
veilid-core/src/routing_table/routing_domain_editor.rs
Normal file
130
veilid-core/src/routing_table/routing_domain_editor.rs
Normal file
@ -0,0 +1,130 @@
|
||||
use super::*;
|
||||
|
||||
enum RoutingDomainChange {
|
||||
ClearDialInfoDetails,
|
||||
ClearRelayNode,
|
||||
SetRelayNode { relay_node: NodeRef },
|
||||
AddDialInfoDetail { dial_info_detail: DialInfoDetail },
|
||||
}
|
||||
|
||||
pub struct RoutingDomainEditor {
|
||||
routing_table: RoutingTable,
|
||||
routing_domain: RoutingDomain,
|
||||
changes: Vec<RoutingDomainChange>,
|
||||
send_node_info_updates: bool,
|
||||
}
|
||||
|
||||
impl RoutingDomainEditor {
|
||||
pub(super) fn new(routing_table: RoutingTable, routing_domain: RoutingDomain) -> Self {
|
||||
Self {
|
||||
routing_table,
|
||||
routing_domain,
|
||||
changes: Vec::new(),
|
||||
send_node_info_updates: true,
|
||||
}
|
||||
}
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub fn disable_node_info_updates(&mut self) {
|
||||
self.send_node_info_updates = false;
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub fn clear_dial_info_details(&mut self) {
|
||||
self.changes.push(RoutingDomainChange::ClearDialInfoDetails);
|
||||
}
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub fn clear_relay_node(&mut self) {
|
||||
self.changes.push(RoutingDomainChange::ClearRelayNode);
|
||||
}
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub fn set_relay_node(&mut self, relay_node: NodeRef) {
|
||||
self.changes
|
||||
.push(RoutingDomainChange::SetRelayNode { relay_node })
|
||||
}
|
||||
#[instrument(level = "debug", skip(self), err)]
|
||||
pub fn register_dial_info(
|
||||
&mut self,
|
||||
dial_info: DialInfo,
|
||||
class: DialInfoClass,
|
||||
) -> EyreResult<()> {
|
||||
if !self
|
||||
.routing_table
|
||||
.ensure_dial_info_is_valid(self.routing_domain, &dial_info)
|
||||
{
|
||||
return Err(eyre!(
|
||||
"dial info '{}' is not valid in routing domain '{:?}'",
|
||||
dial_info,
|
||||
self.routing_domain
|
||||
));
|
||||
}
|
||||
|
||||
self.changes.push(RoutingDomainChange::AddDialInfoDetail {
|
||||
dial_info_detail: DialInfoDetail {
|
||||
dial_info: dial_info.clone(),
|
||||
class,
|
||||
},
|
||||
});
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub async fn commit(self) {
|
||||
let mut changed = false;
|
||||
{
|
||||
let node_id = self.routing_table.node_id();
|
||||
|
||||
let mut inner = self.routing_table.inner.write();
|
||||
let inner = &mut *inner;
|
||||
RoutingTable::with_routing_domain_mut(inner, self.routing_domain, |detail| {
|
||||
for change in self.changes {
|
||||
match change {
|
||||
RoutingDomainChange::ClearDialInfoDetails => {
|
||||
debug!("[{:?}] cleared dial info details", self.routing_domain);
|
||||
detail.clear_dial_info_details();
|
||||
changed = true;
|
||||
}
|
||||
RoutingDomainChange::ClearRelayNode => {
|
||||
debug!("[{:?}] cleared relay node", self.routing_domain);
|
||||
detail.set_relay_node(None);
|
||||
changed = true;
|
||||
}
|
||||
RoutingDomainChange::SetRelayNode { relay_node } => {
|
||||
debug!("[{:?}] set relay node: {}", self.routing_domain, relay_node);
|
||||
detail.set_relay_node(Some(relay_node));
|
||||
changed = true;
|
||||
}
|
||||
RoutingDomainChange::AddDialInfoDetail { dial_info_detail } => {
|
||||
debug!(
|
||||
"[{:?}] add dial info detail: {:?}",
|
||||
self.routing_domain, dial_info_detail
|
||||
);
|
||||
detail.add_dial_info_detail(dial_info_detail.clone());
|
||||
|
||||
info!(
|
||||
"{:?} Dial Info: {}",
|
||||
self.routing_domain,
|
||||
NodeDialInfo {
|
||||
node_id: NodeId::new(node_id),
|
||||
dial_info: dial_info_detail.dial_info
|
||||
}
|
||||
.to_string(),
|
||||
);
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
if changed {
|
||||
RoutingTable::reset_all_seen_our_node_info(inner, self.routing_domain);
|
||||
RoutingTable::reset_all_updated_since_last_network_change(inner);
|
||||
}
|
||||
}
|
||||
if changed && self.send_node_info_updates {
|
||||
let network_manager = self.routing_table.unlocked_inner.network_manager.clone();
|
||||
network_manager
|
||||
.send_node_info_updates(self.routing_domain, true)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
}
|
98
veilid-core/src/routing_table/routing_domains.rs
Normal file
98
veilid-core/src/routing_table/routing_domains.rs
Normal file
@ -0,0 +1,98 @@
|
||||
use super::*;
|
||||
|
||||
/// General trait for all routing domains
|
||||
pub trait RoutingDomainDetail {
|
||||
fn can_contain_address(&self, address: Address) -> bool;
|
||||
fn relay_node(&self) -> Option<NodeRef>;
|
||||
fn set_relay_node(&mut self, opt_relay_node: Option<NodeRef>);
|
||||
fn dial_info_details(&self) -> &Vec<DialInfoDetail>;
|
||||
fn clear_dial_info_details(&mut self);
|
||||
fn add_dial_info_detail(&mut self, did: DialInfoDetail);
|
||||
}
|
||||
|
||||
/// Public Internet routing domain internals
|
||||
#[derive(Debug, Default)]
|
||||
pub struct PublicInternetRoutingDomainDetail {
|
||||
/// An optional node we relay through for this domain
|
||||
relay_node: Option<NodeRef>,
|
||||
/// The dial infos on this domain we can be reached by
|
||||
dial_info_details: Vec<DialInfoDetail>,
|
||||
}
|
||||
|
||||
impl RoutingDomainDetail for PublicInternetRoutingDomainDetail {
|
||||
fn can_contain_address(&self, address: Address) -> bool {
|
||||
address.is_global()
|
||||
}
|
||||
fn relay_node(&self) -> Option<NodeRef> {
|
||||
self.relay_node.clone()
|
||||
}
|
||||
fn set_relay_node(&mut self, opt_relay_node: Option<NodeRef>) {
|
||||
self.relay_node = opt_relay_node.map(|nr| {
|
||||
nr.filtered_clone(
|
||||
NodeRefFilter::new().with_routing_domain(RoutingDomain::PublicInternet),
|
||||
)
|
||||
})
|
||||
}
|
||||
fn dial_info_details(&self) -> &Vec<DialInfoDetail> {
|
||||
&self.dial_info_details
|
||||
}
|
||||
fn clear_dial_info_details(&mut self) {
|
||||
self.dial_info_details.clear();
|
||||
}
|
||||
fn add_dial_info_detail(&mut self, did: DialInfoDetail) {
|
||||
self.dial_info_details.push(did);
|
||||
self.dial_info_details.sort();
|
||||
}
|
||||
}
|
||||
|
||||
/// Local Network routing domain internals
|
||||
#[derive(Debug, Default)]
|
||||
pub struct LocalInternetRoutingDomainDetail {
|
||||
/// An optional node we relay through for this domain
|
||||
relay_node: Option<NodeRef>,
|
||||
/// The dial infos on this domain we can be reached by
|
||||
dial_info_details: Vec<DialInfoDetail>,
|
||||
/// The local networks this domain will communicate with
|
||||
local_networks: Vec<(IpAddr, IpAddr)>,
|
||||
}
|
||||
|
||||
impl LocalInternetRoutingDomainDetail {
|
||||
pub fn set_local_networks(&mut self, mut local_networks: Vec<(IpAddr, IpAddr)>) -> bool {
|
||||
local_networks.sort();
|
||||
if local_networks == self.local_networks {
|
||||
return false;
|
||||
}
|
||||
self.local_networks = local_networks;
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
impl RoutingDomainDetail for LocalInternetRoutingDomainDetail {
|
||||
fn can_contain_address(&self, address: Address) -> bool {
|
||||
let ip = address.to_ip_addr();
|
||||
for localnet in &self.local_networks {
|
||||
if ipaddr_in_network(ip, localnet.0, localnet.1) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
fn relay_node(&self) -> Option<NodeRef> {
|
||||
self.relay_node.clone()
|
||||
}
|
||||
fn set_relay_node(&mut self, opt_relay_node: Option<NodeRef>) {
|
||||
self.relay_node = opt_relay_node.map(|nr| {
|
||||
nr.filtered_clone(NodeRefFilter::new().with_routing_domain(RoutingDomain::LocalNetwork))
|
||||
});
|
||||
}
|
||||
fn dial_info_details(&self) -> &Vec<DialInfoDetail> {
|
||||
&self.dial_info_details
|
||||
}
|
||||
fn clear_dial_info_details(&mut self) {
|
||||
self.dial_info_details.clear();
|
||||
}
|
||||
fn add_dial_info_detail(&mut self, did: DialInfoDetail) {
|
||||
self.dial_info_details.push(did);
|
||||
self.dial_info_details.sort();
|
||||
}
|
||||
}
|
@ -37,9 +37,10 @@ impl RoutingTable {
|
||||
_last_ts: u64,
|
||||
cur_ts: u64,
|
||||
) -> EyreResult<()> {
|
||||
let kick_queue: Vec<usize> = core::mem::take(&mut *self.unlocked_inner.kick_queue.lock())
|
||||
.into_iter()
|
||||
.collect();
|
||||
let mut inner = self.inner.write();
|
||||
let kick_queue: Vec<usize> = inner.kick_queue.iter().map(|v| *v).collect();
|
||||
inner.kick_queue.clear();
|
||||
for idx in kick_queue {
|
||||
Self::kick_bucket(&mut *inner, idx)
|
||||
}
|
||||
|
@ -1,23 +1,23 @@
|
||||
use crate::*;
|
||||
use rpc_processor::*;
|
||||
|
||||
pub fn encode_node_status(
|
||||
node_status: &NodeStatus,
|
||||
builder: &mut veilid_capnp::node_status::Builder,
|
||||
pub fn encode_public_internet_node_status(
|
||||
public_internet_node_status: &PublicInternetNodeStatus,
|
||||
builder: &mut veilid_capnp::public_internet_node_status::Builder,
|
||||
) -> Result<(), RPCError> {
|
||||
builder.set_will_route(node_status.will_route);
|
||||
builder.set_will_tunnel(node_status.will_tunnel);
|
||||
builder.set_will_signal(node_status.will_signal);
|
||||
builder.set_will_relay(node_status.will_relay);
|
||||
builder.set_will_validate_dial_info(node_status.will_validate_dial_info);
|
||||
builder.set_will_route(public_internet_node_status.will_route);
|
||||
builder.set_will_tunnel(public_internet_node_status.will_tunnel);
|
||||
builder.set_will_signal(public_internet_node_status.will_signal);
|
||||
builder.set_will_relay(public_internet_node_status.will_relay);
|
||||
builder.set_will_validate_dial_info(public_internet_node_status.will_validate_dial_info);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn decode_node_status(
|
||||
reader: &veilid_capnp::node_status::Reader,
|
||||
) -> Result<NodeStatus, RPCError> {
|
||||
Ok(NodeStatus {
|
||||
pub fn decode_public_internet_node_status(
|
||||
reader: &veilid_capnp::public_internet_node_status::Reader,
|
||||
) -> Result<PublicInternetNodeStatus, RPCError> {
|
||||
Ok(PublicInternetNodeStatus {
|
||||
will_route: reader.reborrow().get_will_route(),
|
||||
will_tunnel: reader.reborrow().get_will_tunnel(),
|
||||
will_signal: reader.reborrow().get_will_signal(),
|
||||
@ -25,3 +25,60 @@ pub fn decode_node_status(
|
||||
will_validate_dial_info: reader.reborrow().get_will_validate_dial_info(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn encode_local_network_node_status(
|
||||
local_network_node_status: &LocalNetworkNodeStatus,
|
||||
builder: &mut veilid_capnp::local_network_node_status::Builder,
|
||||
) -> Result<(), RPCError> {
|
||||
builder.set_will_relay(local_network_node_status.will_relay);
|
||||
builder.set_will_validate_dial_info(local_network_node_status.will_validate_dial_info);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn decode_local_network_node_status(
|
||||
reader: &veilid_capnp::local_network_node_status::Reader,
|
||||
) -> Result<LocalNetworkNodeStatus, RPCError> {
|
||||
Ok(LocalNetworkNodeStatus {
|
||||
will_relay: reader.reborrow().get_will_relay(),
|
||||
will_validate_dial_info: reader.reborrow().get_will_validate_dial_info(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn encode_node_status(
|
||||
node_status: &NodeStatus,
|
||||
builder: &mut veilid_capnp::node_status::Builder,
|
||||
) -> Result<(), RPCError> {
|
||||
match node_status {
|
||||
NodeStatus::PublicInternet(ns) => {
|
||||
let mut pi_builder = builder.reborrow().init_public_internet();
|
||||
encode_public_internet_node_status(&ns, &mut pi_builder)
|
||||
}
|
||||
NodeStatus::LocalNetwork(ns) => {
|
||||
let mut ln_builder = builder.reborrow().init_local_network();
|
||||
encode_local_network_node_status(&ns, &mut ln_builder)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn decode_node_status(
|
||||
reader: &veilid_capnp::node_status::Reader,
|
||||
) -> Result<NodeStatus, RPCError> {
|
||||
Ok(
|
||||
match reader
|
||||
.which()
|
||||
.map_err(RPCError::map_internal("invalid node status"))?
|
||||
{
|
||||
veilid_capnp::node_status::PublicInternet(pi) => {
|
||||
let r = pi.map_err(RPCError::protocol)?;
|
||||
let pins = decode_public_internet_node_status(&r)?;
|
||||
NodeStatus::PublicInternet(pins)
|
||||
}
|
||||
veilid_capnp::node_status::LocalNetwork(ln) => {
|
||||
let r = ln.map_err(RPCError::protocol)?;
|
||||
let lnns = decode_local_network_node_status(&r)?;
|
||||
NodeStatus::LocalNetwork(lnns)
|
||||
}
|
||||
},
|
||||
)
|
||||
}
|
||||
|
@ -25,7 +25,7 @@ impl RPCOperationKind {
|
||||
let out = match which_reader {
|
||||
veilid_capnp::operation::kind::Which::Question(r) => {
|
||||
let q_reader = r.map_err(RPCError::protocol)?;
|
||||
let out = RPCQuestion::decode(&q_reader, sender_node_id)?;
|
||||
let out = RPCQuestion::decode(&q_reader)?;
|
||||
RPCOperationKind::Question(out)
|
||||
}
|
||||
veilid_capnp::operation::kind::Which::Statement(r) => {
|
||||
@ -58,26 +58,37 @@ impl RPCOperationKind {
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RPCOperation {
|
||||
op_id: u64,
|
||||
sender_node_info: Option<SignedNodeInfo>,
|
||||
kind: RPCOperationKind,
|
||||
}
|
||||
|
||||
impl RPCOperation {
|
||||
pub fn new_question(question: RPCQuestion) -> Self {
|
||||
pub fn new_question(question: RPCQuestion, sender_node_info: Option<SignedNodeInfo>) -> Self {
|
||||
Self {
|
||||
op_id: intf::get_random_u64(),
|
||||
sender_node_info,
|
||||
kind: RPCOperationKind::Question(question),
|
||||
}
|
||||
}
|
||||
pub fn new_statement(statement: RPCStatement) -> Self {
|
||||
pub fn new_statement(
|
||||
statement: RPCStatement,
|
||||
sender_node_info: Option<SignedNodeInfo>,
|
||||
) -> Self {
|
||||
Self {
|
||||
op_id: intf::get_random_u64(),
|
||||
sender_node_info,
|
||||
kind: RPCOperationKind::Statement(statement),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_answer(request: &RPCOperation, answer: RPCAnswer) -> Self {
|
||||
pub fn new_answer(
|
||||
request: &RPCOperation,
|
||||
answer: RPCAnswer,
|
||||
sender_node_info: Option<SignedNodeInfo>,
|
||||
) -> Self {
|
||||
Self {
|
||||
op_id: request.op_id,
|
||||
sender_node_info,
|
||||
kind: RPCOperationKind::Answer(answer),
|
||||
}
|
||||
}
|
||||
@ -86,6 +97,10 @@ impl RPCOperation {
|
||||
self.op_id
|
||||
}
|
||||
|
||||
pub fn sender_node_info(&self) -> Option<&SignedNodeInfo> {
|
||||
self.sender_node_info.as_ref()
|
||||
}
|
||||
|
||||
pub fn kind(&self) -> &RPCOperationKind {
|
||||
&self.kind
|
||||
}
|
||||
@ -100,14 +115,32 @@ impl RPCOperation {
|
||||
) -> Result<Self, RPCError> {
|
||||
let op_id = operation_reader.get_op_id();
|
||||
|
||||
let sender_node_info = if operation_reader.has_sender_node_info() {
|
||||
let sni_reader = operation_reader
|
||||
.get_sender_node_info()
|
||||
.map_err(RPCError::protocol)?;
|
||||
let sni = decode_signed_node_info(&sni_reader, sender_node_id, true)?;
|
||||
Some(sni)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let kind_reader = operation_reader.get_kind();
|
||||
let kind = RPCOperationKind::decode(&kind_reader, sender_node_id)?;
|
||||
|
||||
Ok(RPCOperation { op_id, kind })
|
||||
Ok(RPCOperation {
|
||||
op_id,
|
||||
sender_node_info,
|
||||
kind,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn encode(&self, builder: &mut veilid_capnp::operation::Builder) -> Result<(), RPCError> {
|
||||
builder.set_op_id(self.op_id);
|
||||
if let Some(sender_info) = &self.sender_node_info {
|
||||
let mut si_builder = builder.reborrow().init_sender_node_info();
|
||||
encode_signed_node_info(&sender_info, &mut si_builder)?;
|
||||
}
|
||||
let mut k_builder = builder.reborrow().init_kind();
|
||||
self.kind.encode(&mut k_builder)?;
|
||||
Ok(())
|
||||
|
@ -18,21 +18,12 @@ impl RPCQuestion {
|
||||
pub fn detail(&self) -> &RPCQuestionDetail {
|
||||
&self.detail
|
||||
}
|
||||
// pub fn into_detail(self) -> RPCQuestionDetail {
|
||||
// self.detail
|
||||
// }
|
||||
// pub fn into_respond_to_detail(self) -> (RespondTo, RPCQuestionDetail) {
|
||||
// (self.respond_to, self.detail)
|
||||
// }
|
||||
pub fn desc(&self) -> &'static str {
|
||||
self.detail.desc()
|
||||
}
|
||||
pub fn decode(
|
||||
reader: &veilid_capnp::question::Reader,
|
||||
sender_node_id: &DHTKey,
|
||||
) -> Result<RPCQuestion, RPCError> {
|
||||
pub fn decode(reader: &veilid_capnp::question::Reader) -> Result<RPCQuestion, RPCError> {
|
||||
let rt_reader = reader.get_respond_to();
|
||||
let respond_to = RespondTo::decode(&rt_reader, sender_node_id)?;
|
||||
let respond_to = RespondTo::decode(&rt_reader)?;
|
||||
let d_reader = reader.get_detail();
|
||||
let detail = RPCQuestionDetail::decode(&d_reader)?;
|
||||
Ok(RPCQuestion { respond_to, detail })
|
||||
|
@ -3,7 +3,7 @@ use rpc_processor::*;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum RespondTo {
|
||||
Sender(Option<SignedNodeInfo>),
|
||||
Sender,
|
||||
PrivateRoute(PrivateRoute),
|
||||
}
|
||||
|
||||
@ -13,11 +13,7 @@ impl RespondTo {
|
||||
builder: &mut veilid_capnp::question::respond_to::Builder,
|
||||
) -> Result<(), RPCError> {
|
||||
match self {
|
||||
Self::Sender(Some(sni)) => {
|
||||
let mut sni_builder = builder.reborrow().init_sender_with_info();
|
||||
encode_signed_node_info(sni, &mut sni_builder)?;
|
||||
}
|
||||
Self::Sender(None) => {
|
||||
Self::Sender => {
|
||||
builder.reborrow().set_sender(());
|
||||
}
|
||||
Self::PrivateRoute(pr) => {
|
||||
@ -28,17 +24,9 @@ impl RespondTo {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn decode(
|
||||
reader: &veilid_capnp::question::respond_to::Reader,
|
||||
sender_node_id: &DHTKey,
|
||||
) -> Result<Self, RPCError> {
|
||||
pub fn decode(reader: &veilid_capnp::question::respond_to::Reader) -> Result<Self, RPCError> {
|
||||
let respond_to = match reader.which().map_err(RPCError::protocol)? {
|
||||
veilid_capnp::question::respond_to::Sender(()) => RespondTo::Sender(None),
|
||||
veilid_capnp::question::respond_to::SenderWithInfo(sender_ni_reader) => {
|
||||
let sender_ni_reader = sender_ni_reader.map_err(RPCError::protocol)?;
|
||||
let sni = decode_signed_node_info(&sender_ni_reader, sender_node_id, true)?;
|
||||
RespondTo::Sender(Some(sni))
|
||||
}
|
||||
veilid_capnp::question::respond_to::Sender(()) => RespondTo::Sender,
|
||||
veilid_capnp::question::respond_to::PrivateRoute(pr_reader) => {
|
||||
let pr_reader = pr_reader.map_err(RPCError::protocol)?;
|
||||
let pr = decode_private_route(&pr_reader)?;
|
||||
|
188
veilid-core/src/rpc_processor/destination.rs
Normal file
188
veilid-core/src/rpc_processor/destination.rs
Normal file
@ -0,0 +1,188 @@
|
||||
use super::*;
|
||||
|
||||
/// Where to send an RPC message
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum Destination {
|
||||
/// Send to node directly
|
||||
Direct {
|
||||
/// The node to send to
|
||||
target: NodeRef,
|
||||
/// An optional safety route specification to send from for sender privacy
|
||||
safety_route_spec: Option<Arc<SafetyRouteSpec>>,
|
||||
},
|
||||
/// Send to node for relay purposes
|
||||
Relay {
|
||||
/// The relay to send to
|
||||
relay: NodeRef,
|
||||
/// The final destination the relay should send to
|
||||
target: DHTKey,
|
||||
/// An optional safety route specification to send from for sender privacy
|
||||
safety_route_spec: Option<Arc<SafetyRouteSpec>>,
|
||||
},
|
||||
/// Send to private route (privateroute)
|
||||
PrivateRoute {
|
||||
/// A private route to send to
|
||||
private_route: PrivateRoute,
|
||||
/// An optional safety route specification to send from for sender privacy
|
||||
safety_route_spec: Option<Arc<SafetyRouteSpec>>,
|
||||
},
|
||||
}
|
||||
|
||||
impl Destination {
|
||||
pub fn direct(target: NodeRef) -> Self {
|
||||
Self::Direct {
|
||||
target,
|
||||
safety_route_spec: None,
|
||||
}
|
||||
}
|
||||
pub fn relay(relay: NodeRef, target: DHTKey) -> Self {
|
||||
Self::Relay {
|
||||
relay,
|
||||
target,
|
||||
safety_route_spec: None,
|
||||
}
|
||||
}
|
||||
pub fn private_route(private_route: PrivateRoute) -> Self {
|
||||
Self::PrivateRoute {
|
||||
private_route,
|
||||
safety_route_spec: None,
|
||||
}
|
||||
}
|
||||
// pub fn target_id(&self) -> DHTKey {
|
||||
// match self {
|
||||
// Destination::Direct {
|
||||
// target,
|
||||
// safety_route_spec,
|
||||
// } => target.node_id(),
|
||||
// Destination::Relay {
|
||||
// relay,
|
||||
// target,
|
||||
// safety_route_spec,
|
||||
// } => *target,
|
||||
// Destination::PrivateRoute {
|
||||
// private_route,
|
||||
// safety_route_spec,
|
||||
// } => {}
|
||||
// }
|
||||
// }
|
||||
|
||||
// pub fn best_routing_domain(&self) -> RoutingDomain {
|
||||
// match self {
|
||||
// Destination::Direct {
|
||||
// target,
|
||||
// safety_route_spec,
|
||||
// } => {
|
||||
// if safety_route_spec.is_some() {
|
||||
// RoutingDomain::PublicInternet
|
||||
// } else {
|
||||
// target
|
||||
// .best_routing_domain()
|
||||
// .unwrap_or(RoutingDomain::PublicInternet)
|
||||
// }
|
||||
// }
|
||||
// Destination::Relay {
|
||||
// relay,
|
||||
// target,
|
||||
// safety_route_spec,
|
||||
// } => {
|
||||
// if safety_route_spec.is_some() {
|
||||
// RoutingDomain::PublicInternet
|
||||
// } else {
|
||||
// relay
|
||||
// .best_routing_domain()
|
||||
// .unwrap_or(RoutingDomain::PublicInternet)
|
||||
// }
|
||||
// }
|
||||
// Destination::PrivateRoute {
|
||||
// private_route: _,
|
||||
// safety_route_spec: _,
|
||||
// } => RoutingDomain::PublicInternet,
|
||||
// }
|
||||
// }
|
||||
|
||||
pub fn safety_route_spec(&self) -> Option<Arc<SafetyRouteSpec>> {
|
||||
match self {
|
||||
Destination::Direct {
|
||||
target: _,
|
||||
safety_route_spec,
|
||||
} => safety_route_spec.clone(),
|
||||
Destination::Relay {
|
||||
relay: _,
|
||||
target: _,
|
||||
safety_route_spec,
|
||||
} => safety_route_spec.clone(),
|
||||
Destination::PrivateRoute {
|
||||
private_route: _,
|
||||
safety_route_spec,
|
||||
} => safety_route_spec.clone(),
|
||||
}
|
||||
}
|
||||
pub fn with_safety_route_spec(self, safety_route_spec: Arc<SafetyRouteSpec>) -> Self {
|
||||
match self {
|
||||
Destination::Direct {
|
||||
target,
|
||||
safety_route_spec: _,
|
||||
} => Self::Direct {
|
||||
target,
|
||||
safety_route_spec: Some(safety_route_spec),
|
||||
},
|
||||
Destination::Relay {
|
||||
relay,
|
||||
target,
|
||||
safety_route_spec: _,
|
||||
} => Self::Relay {
|
||||
relay,
|
||||
target,
|
||||
safety_route_spec: Some(safety_route_spec),
|
||||
},
|
||||
Destination::PrivateRoute {
|
||||
private_route,
|
||||
safety_route_spec: _,
|
||||
} => Self::PrivateRoute {
|
||||
private_route,
|
||||
safety_route_spec: Some(safety_route_spec),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Destination {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self {
|
||||
Destination::Direct {
|
||||
target,
|
||||
safety_route_spec,
|
||||
} => {
|
||||
let sr = safety_route_spec
|
||||
.as_ref()
|
||||
.map(|_sr| "+SR".to_owned())
|
||||
.unwrap_or_default();
|
||||
|
||||
write!(f, "{:?}{}", target, sr)
|
||||
}
|
||||
Destination::Relay {
|
||||
relay,
|
||||
target,
|
||||
safety_route_spec,
|
||||
} => {
|
||||
let sr = safety_route_spec
|
||||
.as_ref()
|
||||
.map(|_sr| "+SR".to_owned())
|
||||
.unwrap_or_default();
|
||||
|
||||
write!(f, "{:?}@{:?}{}", target.encode(), relay, sr)
|
||||
}
|
||||
Destination::PrivateRoute {
|
||||
private_route,
|
||||
safety_route_spec,
|
||||
} => {
|
||||
let sr = safety_route_spec
|
||||
.as_ref()
|
||||
.map(|_sr| "+SR".to_owned())
|
||||
.unwrap_or_default();
|
||||
|
||||
write!(f, "{}{}", private_route, sr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,4 +1,5 @@
|
||||
mod coders;
|
||||
mod destination;
|
||||
mod private_route;
|
||||
mod rpc_cancel_tunnel;
|
||||
mod rpc_complete_tunnel;
|
||||
@ -18,6 +19,7 @@ mod rpc_validate_dial_info;
|
||||
mod rpc_value_changed;
|
||||
mod rpc_watch_value;
|
||||
|
||||
pub use destination::*;
|
||||
pub use private_route::*;
|
||||
pub use rpc_error::*;
|
||||
|
||||
@ -36,33 +38,6 @@ use stop_token::future::FutureExt;
|
||||
|
||||
type OperationId = u64;
|
||||
|
||||
/// Where to send an RPC message
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum Destination {
|
||||
/// Send to node (target noderef)
|
||||
Direct(NodeRef),
|
||||
/// Send to node for relay purposes (relay noderef, target nodeid)
|
||||
Relay(NodeRef, DHTKey),
|
||||
/// Send to private route (privateroute)
|
||||
PrivateRoute(PrivateRoute),
|
||||
}
|
||||
|
||||
impl fmt::Display for Destination {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self {
|
||||
Destination::Direct(nr) => {
|
||||
write!(f, "{:?}", nr)
|
||||
}
|
||||
Destination::Relay(nr, key) => {
|
||||
write!(f, "{:?}@{:?}", key.encode(), nr)
|
||||
}
|
||||
Destination::PrivateRoute(pr) => {
|
||||
write!(f, "{}", pr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The decoded header of an RPC message
|
||||
#[derive(Debug, Clone)]
|
||||
struct RPCMessageHeader {
|
||||
@ -76,6 +51,8 @@ struct RPCMessageHeader {
|
||||
peer_noderef: NodeRef,
|
||||
/// The connection from the peer sent the message (not the original sender)
|
||||
connection_descriptor: ConnectionDescriptor,
|
||||
/// The routing domain the message was sent through
|
||||
routing_domain: RoutingDomain,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
@ -177,7 +154,6 @@ pub struct RPCProcessorInner {
|
||||
pub struct RPCProcessor {
|
||||
crypto: Crypto,
|
||||
config: VeilidConfig,
|
||||
enable_local_peer_scope: bool,
|
||||
inner: Arc<Mutex<RPCProcessorInner>>,
|
||||
}
|
||||
|
||||
@ -200,11 +176,6 @@ impl RPCProcessor {
|
||||
Self {
|
||||
crypto: network_manager.crypto(),
|
||||
config: network_manager.config(),
|
||||
enable_local_peer_scope: network_manager
|
||||
.config()
|
||||
.get()
|
||||
.network
|
||||
.enable_local_peer_scope,
|
||||
inner: Arc::new(Mutex::new(Self::new_inner(network_manager))),
|
||||
}
|
||||
}
|
||||
@ -227,28 +198,10 @@ impl RPCProcessor {
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
|
||||
fn filter_peer_scope(&self, node_info: &NodeInfo) -> bool {
|
||||
// if local peer scope is enabled, then don't reject any peer info
|
||||
if self.enable_local_peer_scope {
|
||||
return true;
|
||||
}
|
||||
|
||||
// reject attempts to include non-public addresses in results
|
||||
for did in &node_info.dial_info_detail_list {
|
||||
if !did.dial_info.is_global() {
|
||||
// non-public address causes rejection
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if let Some(rpi) = &node_info.relay_peer_info {
|
||||
for did in &rpi.signed_node_info.node_info.dial_info_detail_list {
|
||||
if !did.dial_info.is_global() {
|
||||
// non-public address causes rejection
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
true
|
||||
/// Determine if a NodeInfo can be placed into the specified routing domain
|
||||
fn filter_node_info(&self, routing_domain: RoutingDomain, node_info: &NodeInfo) -> bool {
|
||||
let routing_table = self.routing_table();
|
||||
routing_table.node_info_is_valid_in_routing_domain(routing_domain, &node_info)
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
@ -370,7 +323,7 @@ impl RPCProcessor {
|
||||
.await
|
||||
.into_timeout_or();
|
||||
Ok(res.map(|res| {
|
||||
let (span_id, rpcreader) = res.take_value().unwrap();
|
||||
let (_span_id, rpcreader) = res.take_value().unwrap();
|
||||
let end_ts = intf::get_timestamp();
|
||||
|
||||
// fixme: causes crashes? "Missing otel data span extensions"??
|
||||
@ -390,17 +343,12 @@ impl RPCProcessor {
|
||||
Err(_) | Ok(TimeoutOr::Timeout) => {
|
||||
self.cancel_op_id_waiter(waitable_reply.op_id);
|
||||
|
||||
self.routing_table()
|
||||
.stats_question_lost(waitable_reply.node_ref.clone());
|
||||
waitable_reply.node_ref.stats_question_lost();
|
||||
}
|
||||
Ok(TimeoutOr::Value((rpcreader, _))) => {
|
||||
// Note that the remote node definitely received this node info since we got a reply
|
||||
waitable_reply.node_ref.set_seen_our_node_info();
|
||||
|
||||
// Reply received
|
||||
let recv_ts = intf::get_timestamp();
|
||||
self.routing_table().stats_answer_rcvd(
|
||||
waitable_reply.node_ref,
|
||||
waitable_reply.node_ref.stats_answer_rcvd(
|
||||
waitable_reply.send_ts,
|
||||
recv_ts,
|
||||
rpcreader.header.body_len,
|
||||
@ -411,34 +359,14 @@ impl RPCProcessor {
|
||||
out
|
||||
}
|
||||
|
||||
/// Gets a 'RespondTo::Sender' that contains either our dial info,
|
||||
/// or None if the peer has seen our dial info before or our node info is not yet valid
|
||||
/// because of an unknown network class
|
||||
pub fn make_respond_to_sender(&self, peer: NodeRef) -> RespondTo {
|
||||
if peer.has_seen_our_node_info()
|
||||
|| matches!(
|
||||
self.network_manager()
|
||||
.get_network_class()
|
||||
.unwrap_or(NetworkClass::Invalid),
|
||||
NetworkClass::Invalid
|
||||
)
|
||||
{
|
||||
RespondTo::Sender(None)
|
||||
} else {
|
||||
let our_sni = self.routing_table().get_own_signed_node_info();
|
||||
RespondTo::Sender(Some(our_sni))
|
||||
}
|
||||
}
|
||||
|
||||
/// Produce a byte buffer that represents the wire encoding of the entire
|
||||
/// unencrypted envelope body for a RPC message. This incorporates
|
||||
/// wrapping a private and/or safety route if they are specified.
|
||||
#[instrument(level = "debug", skip(self, operation, safety_route_spec), err)]
|
||||
#[instrument(level = "debug", skip(self, operation), err)]
|
||||
fn render_operation(
|
||||
&self,
|
||||
dest: Destination,
|
||||
operation: &RPCOperation,
|
||||
safety_route_spec: Option<&SafetyRouteSpec>,
|
||||
) -> Result<RenderedOperation, RPCError> {
|
||||
let out_node_id; // Envelope Node Id
|
||||
let mut out_node_ref: Option<NodeRef> = None; // Node to send envelope to
|
||||
@ -456,12 +384,25 @@ impl RPCProcessor {
|
||||
|
||||
// To where are we sending the request
|
||||
match dest {
|
||||
Destination::Direct(ref node_ref) | Destination::Relay(ref node_ref, _) => {
|
||||
Destination::Direct {
|
||||
target: ref node_ref,
|
||||
ref safety_route_spec,
|
||||
}
|
||||
| Destination::Relay {
|
||||
relay: ref node_ref,
|
||||
target: _,
|
||||
ref safety_route_spec,
|
||||
} => {
|
||||
// Send to a node without a private route
|
||||
// --------------------------------------
|
||||
|
||||
// Get the actual destination node id accounting for relays
|
||||
let (node_ref, node_id) = if let Destination::Relay(_, dht_key) = dest {
|
||||
let (node_ref, node_id) = if let Destination::Relay {
|
||||
relay: _,
|
||||
target: ref dht_key,
|
||||
safety_route_spec: _,
|
||||
} = dest
|
||||
{
|
||||
(node_ref.clone(), dht_key.clone())
|
||||
} else {
|
||||
let node_id = node_ref.node_id();
|
||||
@ -469,7 +410,7 @@ impl RPCProcessor {
|
||||
};
|
||||
|
||||
// Handle the existence of safety route
|
||||
match safety_route_spec {
|
||||
match safety_route_spec.as_ref() {
|
||||
None => {
|
||||
// If no safety route is being used, and we're not sending to a private
|
||||
// route, we can use a direct envelope instead of routing
|
||||
@ -493,12 +434,16 @@ impl RPCProcessor {
|
||||
.dial_info
|
||||
.node_id
|
||||
.key;
|
||||
out_message = self.wrap_with_route(Some(sr), private_route, message_vec)?;
|
||||
out_message =
|
||||
self.wrap_with_route(Some(sr.clone()), private_route, message_vec)?;
|
||||
out_hop_count = 1 + sr.hops.len();
|
||||
}
|
||||
};
|
||||
}
|
||||
Destination::PrivateRoute(private_route) => {
|
||||
Destination::PrivateRoute {
|
||||
private_route,
|
||||
safety_route_spec,
|
||||
} => {
|
||||
// Send to private route
|
||||
// ---------------------
|
||||
// Reply with 'route' operation
|
||||
@ -544,16 +489,65 @@ impl RPCProcessor {
|
||||
})
|
||||
}
|
||||
|
||||
// Get signed node info to package with RPC messages to improve
|
||||
// routing table caching when it is okay to do so
|
||||
// This is only done in the PublicInternet routing domain because
|
||||
// as far as we can tell this is the only domain that will really benefit
|
||||
fn get_sender_signed_node_info(&self, dest: &Destination) -> Option<SignedNodeInfo> {
|
||||
// Don't do this if the sender is to remain private
|
||||
if dest.safety_route_spec().is_some() {
|
||||
return None;
|
||||
}
|
||||
// Don't do this if our own signed node info isn't valid yet
|
||||
let routing_table = self.routing_table();
|
||||
if !routing_table.has_valid_own_node_info(RoutingDomain::PublicInternet) {
|
||||
return None;
|
||||
}
|
||||
|
||||
match dest {
|
||||
Destination::Direct {
|
||||
target,
|
||||
safety_route_spec: _,
|
||||
} => {
|
||||
// If the target has seen our node info already don't do this
|
||||
if target.has_seen_our_node_info(RoutingDomain::PublicInternet) {
|
||||
return None;
|
||||
}
|
||||
Some(routing_table.get_own_signed_node_info(RoutingDomain::PublicInternet))
|
||||
}
|
||||
Destination::Relay {
|
||||
relay: _,
|
||||
target,
|
||||
safety_route_spec: _,
|
||||
} => {
|
||||
if let Some(target) = routing_table.lookup_node_ref(*target) {
|
||||
if target.has_seen_our_node_info(RoutingDomain::PublicInternet) {
|
||||
return None;
|
||||
}
|
||||
Some(routing_table.get_own_signed_node_info(RoutingDomain::PublicInternet))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
Destination::PrivateRoute {
|
||||
private_route: _,
|
||||
safety_route_spec: _,
|
||||
} => None,
|
||||
}
|
||||
}
|
||||
|
||||
// Issue a question over the network, possibly using an anonymized route
|
||||
#[instrument(level = "debug", skip(self, question, safety_route_spec), err)]
|
||||
#[instrument(level = "debug", skip(self, question), err)]
|
||||
async fn question(
|
||||
&self,
|
||||
dest: Destination,
|
||||
question: RPCQuestion,
|
||||
safety_route_spec: Option<&SafetyRouteSpec>,
|
||||
) -> Result<NetworkResult<WaitableReply>, RPCError> {
|
||||
// Get sender info if we should send that
|
||||
let opt_sender_info = self.get_sender_signed_node_info(&dest);
|
||||
|
||||
// Wrap question in operation
|
||||
let operation = RPCOperation::new_question(question);
|
||||
let operation = RPCOperation::new_question(question, opt_sender_info);
|
||||
let op_id = operation.op_id();
|
||||
|
||||
// Log rpc send
|
||||
@ -565,7 +559,7 @@ impl RPCProcessor {
|
||||
node_id,
|
||||
node_ref,
|
||||
hop_count,
|
||||
} = self.render_operation(dest, &operation, safety_route_spec)?;
|
||||
} = self.render_operation(dest, &operation)?;
|
||||
|
||||
// If we need to resolve the first hop, do it
|
||||
let node_ref = match node_ref {
|
||||
@ -595,20 +589,19 @@ impl RPCProcessor {
|
||||
.map_err(|e| {
|
||||
// If we're returning an error, clean up
|
||||
self.cancel_op_id_waiter(op_id);
|
||||
self.routing_table()
|
||||
.stats_failed_to_send(node_ref.clone(), send_ts, true);
|
||||
RPCError::network(e) })?
|
||||
=> {
|
||||
node_ref
|
||||
.stats_failed_to_send(send_ts, true);
|
||||
RPCError::network(e)
|
||||
})? => {
|
||||
// If we couldn't send we're still cleaning up
|
||||
self.cancel_op_id_waiter(op_id);
|
||||
self.routing_table()
|
||||
.stats_failed_to_send(node_ref, send_ts, true);
|
||||
node_ref
|
||||
.stats_failed_to_send(send_ts, true);
|
||||
}
|
||||
);
|
||||
|
||||
// Successfully sent
|
||||
self.routing_table()
|
||||
.stats_question_sent(node_ref.clone(), send_ts, bytes, true);
|
||||
node_ref.stats_question_sent(send_ts, bytes, true);
|
||||
|
||||
// Pass back waitable reply completion
|
||||
Ok(NetworkResult::value(WaitableReply {
|
||||
@ -622,15 +615,17 @@ impl RPCProcessor {
|
||||
}
|
||||
|
||||
// Issue a statement over the network, possibly using an anonymized route
|
||||
#[instrument(level = "debug", skip(self, statement, safety_route_spec), err)]
|
||||
#[instrument(level = "debug", skip(self, statement), err)]
|
||||
async fn statement(
|
||||
&self,
|
||||
dest: Destination,
|
||||
statement: RPCStatement,
|
||||
safety_route_spec: Option<&SafetyRouteSpec>,
|
||||
) -> Result<NetworkResult<()>, RPCError> {
|
||||
// Get sender info if we should send that
|
||||
let opt_sender_info = self.get_sender_signed_node_info(&dest);
|
||||
|
||||
// Wrap statement in operation
|
||||
let operation = RPCOperation::new_statement(statement);
|
||||
let operation = RPCOperation::new_statement(statement, opt_sender_info);
|
||||
|
||||
// Log rpc send
|
||||
debug!(target: "rpc_message", dir = "send", kind = "statement", op_id = operation.op_id(), desc = operation.kind().desc(), ?dest);
|
||||
@ -641,7 +636,7 @@ impl RPCProcessor {
|
||||
node_id,
|
||||
node_ref,
|
||||
hop_count: _,
|
||||
} = self.render_operation(dest, &operation, safety_route_spec)?;
|
||||
} = self.render_operation(dest, &operation)?;
|
||||
|
||||
// If we need to resolve the first hop, do it
|
||||
let node_ref = match node_ref {
|
||||
@ -663,18 +658,18 @@ impl RPCProcessor {
|
||||
.await
|
||||
.map_err(|e| {
|
||||
// If we're returning an error, clean up
|
||||
self.routing_table()
|
||||
.stats_failed_to_send(node_ref.clone(), send_ts, true);
|
||||
RPCError::network(e) })? => {
|
||||
node_ref
|
||||
.stats_failed_to_send(send_ts, true);
|
||||
RPCError::network(e)
|
||||
})? => {
|
||||
// If we couldn't send we're still cleaning up
|
||||
self.routing_table()
|
||||
.stats_failed_to_send(node_ref, send_ts, true);
|
||||
node_ref
|
||||
.stats_failed_to_send(send_ts, true);
|
||||
}
|
||||
);
|
||||
|
||||
// Successfully sent
|
||||
self.routing_table()
|
||||
.stats_question_sent(node_ref.clone(), send_ts, bytes, true);
|
||||
node_ref.stats_question_sent(send_ts, bytes, true);
|
||||
|
||||
Ok(NetworkResult::value(()))
|
||||
}
|
||||
@ -691,7 +686,7 @@ impl RPCProcessor {
|
||||
|
||||
// To where should we respond?
|
||||
match respond_to {
|
||||
RespondTo::Sender(_) => {
|
||||
RespondTo::Sender => {
|
||||
// Reply directly to the request's source
|
||||
let sender_id = request.header.envelope.get_sender_id();
|
||||
|
||||
@ -701,30 +696,32 @@ impl RPCProcessor {
|
||||
// If the sender_id is that of the peer, then this is a direct reply
|
||||
// else it is a relayed reply through the peer
|
||||
if peer_noderef.node_id() == sender_id {
|
||||
Destination::Direct(peer_noderef)
|
||||
Destination::direct(peer_noderef)
|
||||
} else {
|
||||
Destination::Relay(peer_noderef, sender_id)
|
||||
Destination::relay(peer_noderef, sender_id)
|
||||
}
|
||||
}
|
||||
RespondTo::PrivateRoute(pr) => Destination::PrivateRoute(pr.clone()),
|
||||
RespondTo::PrivateRoute(pr) => Destination::private_route(pr.clone()),
|
||||
}
|
||||
}
|
||||
|
||||
// Issue a reply over the network, possibly using an anonymized route
|
||||
// The request must want a response, or this routine fails
|
||||
#[instrument(level = "debug", skip(self, request, answer, safety_route_spec), err)]
|
||||
#[instrument(level = "debug", skip(self, request, answer), err)]
|
||||
async fn answer(
|
||||
&self,
|
||||
request: RPCMessage,
|
||||
answer: RPCAnswer,
|
||||
safety_route_spec: Option<&SafetyRouteSpec>,
|
||||
) -> Result<NetworkResult<()>, RPCError> {
|
||||
// Wrap answer in operation
|
||||
let operation = RPCOperation::new_answer(&request.operation, answer);
|
||||
|
||||
// Extract destination from respond_to
|
||||
let dest = self.get_respond_to_destination(&request);
|
||||
|
||||
// Get sender info if we should send that
|
||||
let opt_sender_info = self.get_sender_signed_node_info(&dest);
|
||||
|
||||
// Wrap answer in operation
|
||||
let operation = RPCOperation::new_answer(&request.operation, answer, opt_sender_info);
|
||||
|
||||
// Log rpc send
|
||||
debug!(target: "rpc_message", dir = "send", kind = "answer", op_id = operation.op_id(), desc = operation.kind().desc(), ?dest);
|
||||
|
||||
@ -734,7 +731,7 @@ impl RPCProcessor {
|
||||
node_id,
|
||||
node_ref,
|
||||
hop_count: _,
|
||||
} = self.render_operation(dest, &operation, safety_route_spec)?;
|
||||
} = self.render_operation(dest, &operation)?;
|
||||
|
||||
// If we need to resolve the first hop, do it
|
||||
let node_ref = match node_ref {
|
||||
@ -755,17 +752,18 @@ impl RPCProcessor {
|
||||
.await
|
||||
.map_err(|e| {
|
||||
// If we're returning an error, clean up
|
||||
self.routing_table()
|
||||
.stats_failed_to_send(node_ref.clone(), send_ts, true);
|
||||
RPCError::network(e) })? => {
|
||||
node_ref
|
||||
.stats_failed_to_send(send_ts, true);
|
||||
RPCError::network(e)
|
||||
})? => {
|
||||
// If we couldn't send we're still cleaning up
|
||||
self.routing_table()
|
||||
.stats_failed_to_send(node_ref.clone(), send_ts, false);
|
||||
node_ref
|
||||
.stats_failed_to_send(send_ts, false);
|
||||
}
|
||||
);
|
||||
|
||||
// Reply successfully sent
|
||||
self.routing_table().stats_answer_sent(node_ref, bytes);
|
||||
node_ref.stats_answer_sent(bytes);
|
||||
|
||||
Ok(NetworkResult::value(()))
|
||||
}
|
||||
@ -776,6 +774,9 @@ impl RPCProcessor {
|
||||
&self,
|
||||
encoded_msg: RPCMessageEncoded,
|
||||
) -> Result<(), RPCError> {
|
||||
// Get the routing domain this message came over
|
||||
let routing_domain = encoded_msg.header.routing_domain;
|
||||
|
||||
// Decode the operation
|
||||
let sender_node_id = encoded_msg.header.envelope.get_sender_id();
|
||||
|
||||
@ -789,34 +790,34 @@ impl RPCProcessor {
|
||||
RPCOperation::decode(&op_reader, &sender_node_id)?
|
||||
};
|
||||
|
||||
// Get the sender noderef, incorporating and 'sender node info' we have from a question
|
||||
// Get the sender noderef, incorporating and 'sender node info'
|
||||
let mut opt_sender_nr: Option<NodeRef> = None;
|
||||
match operation.kind() {
|
||||
RPCOperationKind::Question(q) => {
|
||||
match q.respond_to() {
|
||||
RespondTo::Sender(Some(sender_ni)) => {
|
||||
// Sender NodeInfo was specified, update our routing table with it
|
||||
if !self.filter_peer_scope(&sender_ni.node_info) {
|
||||
return Err(RPCError::invalid_format(
|
||||
"respond_to_sender_signed_node_info has invalid peer scope",
|
||||
));
|
||||
}
|
||||
opt_sender_nr = self.routing_table().register_node_with_signed_node_info(
|
||||
sender_node_id,
|
||||
sender_ni.clone(),
|
||||
false,
|
||||
);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
if let Some(sender_node_info) = operation.sender_node_info() {
|
||||
// Sender NodeInfo was specified, update our routing table with it
|
||||
if !self.filter_node_info(RoutingDomain::PublicInternet, &sender_node_info.node_info) {
|
||||
return Err(RPCError::invalid_format(
|
||||
"sender signednodeinfo has invalid peer scope",
|
||||
));
|
||||
}
|
||||
_ => {}
|
||||
};
|
||||
opt_sender_nr = self.routing_table().register_node_with_signed_node_info(
|
||||
routing_domain,
|
||||
sender_node_id,
|
||||
sender_node_info.clone(),
|
||||
false,
|
||||
);
|
||||
}
|
||||
|
||||
// look up sender node, in case it's different than our peer due to relaying
|
||||
if opt_sender_nr.is_none() {
|
||||
// look up sender node, in case it's different than our peer due to relaying
|
||||
opt_sender_nr = self.routing_table().lookup_node_ref(sender_node_id)
|
||||
}
|
||||
|
||||
// Mark this sender as having seen our node info over this routing domain
|
||||
// because it managed to reach us over that routing domain
|
||||
if let Some(sender_nr) = &opt_sender_nr {
|
||||
sender_nr.set_seen_our_node_info(routing_domain);
|
||||
}
|
||||
|
||||
// Make the RPC message
|
||||
let msg = RPCMessage {
|
||||
header: encoded_msg.header,
|
||||
@ -828,21 +829,13 @@ impl RPCProcessor {
|
||||
let kind = match msg.operation.kind() {
|
||||
RPCOperationKind::Question(_) => {
|
||||
if let Some(sender_nr) = msg.opt_sender_nr.clone() {
|
||||
self.routing_table().stats_question_rcvd(
|
||||
sender_nr,
|
||||
msg.header.timestamp,
|
||||
msg.header.body_len,
|
||||
);
|
||||
sender_nr.stats_question_rcvd(msg.header.timestamp, msg.header.body_len);
|
||||
}
|
||||
"question"
|
||||
}
|
||||
RPCOperationKind::Statement(_) => {
|
||||
if let Some(sender_nr) = msg.opt_sender_nr.clone() {
|
||||
self.routing_table().stats_question_rcvd(
|
||||
sender_nr,
|
||||
msg.header.timestamp,
|
||||
msg.header.body_len,
|
||||
);
|
||||
sender_nr.stats_question_rcvd(msg.header.timestamp, msg.header.body_len);
|
||||
}
|
||||
"statement"
|
||||
}
|
||||
@ -900,7 +893,7 @@ impl RPCProcessor {
|
||||
stop_token: StopToken,
|
||||
receiver: flume::Receiver<(Option<Id>, RPCMessageEncoded)>,
|
||||
) {
|
||||
while let Ok(Ok((span_id, msg))) =
|
||||
while let Ok(Ok((_span_id, msg))) =
|
||||
receiver.recv_async().timeout_at(stop_token.clone()).await
|
||||
{
|
||||
let rpc_worker_span = span!(parent: None, Level::TRACE, "rpc_worker");
|
||||
@ -1001,6 +994,7 @@ impl RPCProcessor {
|
||||
body: Vec<u8>,
|
||||
peer_noderef: NodeRef,
|
||||
connection_descriptor: ConnectionDescriptor,
|
||||
routing_domain: RoutingDomain,
|
||||
) -> EyreResult<()> {
|
||||
let msg = RPCMessageEncoded {
|
||||
header: RPCMessageHeader {
|
||||
@ -1009,6 +1003,7 @@ impl RPCProcessor {
|
||||
body_len: body.len() as u64,
|
||||
peer_noderef,
|
||||
connection_descriptor,
|
||||
routing_domain,
|
||||
},
|
||||
data: RPCMessageData { contents: body },
|
||||
};
|
||||
|
53
veilid-core/src/rpc_processor/origin.rs
Normal file
53
veilid-core/src/rpc_processor/origin.rs
Normal file
@ -0,0 +1,53 @@
|
||||
use super::*;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum Origin {
|
||||
Sender,
|
||||
PrivateRoute(PrivateRoute),
|
||||
}
|
||||
|
||||
impl Origin {
|
||||
pub fn sender() -> Self {
|
||||
Self::Sender
|
||||
}
|
||||
|
||||
pub fn private_route(private_route: PrivateRoute) -> Self {
|
||||
Self::PrivateRoute(private_route)
|
||||
}
|
||||
|
||||
pub fn into_respond_to(self, destination: &Destination) -> Result<RespondTo, RPCError> {
|
||||
match self {
|
||||
Self::Sender => {
|
||||
let peer = match destination {
|
||||
Destination::Direct {
|
||||
target,
|
||||
safety_route_spec,
|
||||
} => todo!(),
|
||||
Destination::Relay {
|
||||
relay,
|
||||
target,
|
||||
safety_route_spec,
|
||||
} => todo!(),
|
||||
Destination::PrivateRoute {
|
||||
private_route,
|
||||
safety_route_spec,
|
||||
} => todo!(),
|
||||
};
|
||||
let routing_table = peer.routing_table();
|
||||
let routing_domain = peer.best_routing_domain();
|
||||
// Send some signed node info along with the question if this node needs to be replied to
|
||||
if routing_table.has_valid_own_node_info()
|
||||
&& !peer.has_seen_our_node_info(routing_domain)
|
||||
{
|
||||
let our_sni = self
|
||||
.routing_table()
|
||||
.get_own_signed_node_info(routing_domain);
|
||||
RespondTo::Sender(Some(our_sni))
|
||||
} else {
|
||||
RespondTo::Sender(None)
|
||||
}
|
||||
}
|
||||
Self::PrivateRoute(pr) => RespondTo::PrivateRoute(pr),
|
||||
}
|
||||
}
|
||||
}
|
@ -4,7 +4,7 @@ impl RPCProcessor {
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
fn compile_safety_route(
|
||||
&self,
|
||||
safety_route_spec: &SafetyRouteSpec,
|
||||
safety_route_spec: Arc<SafetyRouteSpec>,
|
||||
private_route: PrivateRoute,
|
||||
) -> Result<SafetyRoute, RPCError> {
|
||||
// Ensure the total hop count isn't too long for our config
|
||||
@ -111,15 +111,15 @@ impl RPCProcessor {
|
||||
// Wrap an operation inside a route
|
||||
pub(super) fn wrap_with_route(
|
||||
&self,
|
||||
safety_route_spec: Option<&SafetyRouteSpec>,
|
||||
safety_route_spec: Option<Arc<SafetyRouteSpec>>,
|
||||
private_route: PrivateRoute,
|
||||
message_data: Vec<u8>,
|
||||
) -> Result<Vec<u8>, RPCError> {
|
||||
// Encrypt routed operation
|
||||
// Xmsg + ENC(Xmsg, DH(PKapr, SKbsr))
|
||||
let nonce = Crypto::get_random_nonce();
|
||||
let stub_safety_route_spec = SafetyRouteSpec::new();
|
||||
let safety_route_spec = safety_route_spec.unwrap_or(&stub_safety_route_spec);
|
||||
let safety_route_spec =
|
||||
safety_route_spec.unwrap_or_else(|| Arc::new(SafetyRouteSpec::new()));
|
||||
let dh_secret = self
|
||||
.crypto
|
||||
.cached_dh(&private_route.public_key, &safety_route_spec.secret_key)
|
||||
@ -139,7 +139,7 @@ impl RPCProcessor {
|
||||
operation,
|
||||
};
|
||||
let operation =
|
||||
RPCOperation::new_statement(RPCStatement::new(RPCStatementDetail::Route(route)));
|
||||
RPCOperation::new_statement(RPCStatement::new(RPCStatementDetail::Route(route)), None);
|
||||
|
||||
// Convert message to bytes and return it
|
||||
let mut route_msg = ::capnp::message::Builder::new_default();
|
||||
|
@ -8,15 +8,13 @@ impl RPCProcessor {
|
||||
self,
|
||||
dest: Destination,
|
||||
key: DHTKey,
|
||||
safety_route: Option<&SafetyRouteSpec>,
|
||||
respond_to: RespondTo,
|
||||
) -> Result<NetworkResult<Answer<Vec<PeerInfo>>>, RPCError> {
|
||||
let find_node_q = RPCOperationFindNodeQ { node_id: key };
|
||||
let question = RPCQuestion::new(respond_to, RPCQuestionDetail::FindNodeQ(find_node_q));
|
||||
let find_node_q_detail =
|
||||
RPCQuestionDetail::FindNodeQ(RPCOperationFindNodeQ { node_id: key });
|
||||
let find_node_q = RPCQuestion::new(RespondTo::Sender, find_node_q_detail);
|
||||
|
||||
// Send the find_node request
|
||||
let waitable_reply =
|
||||
network_result_try!(self.question(dest, question, safety_route).await?);
|
||||
let waitable_reply = network_result_try!(self.question(dest, find_node_q).await?);
|
||||
|
||||
// Wait for reply
|
||||
let (msg, latency) = match self.wait_for_reply(waitable_reply).await? {
|
||||
@ -35,7 +33,10 @@ impl RPCProcessor {
|
||||
|
||||
// Verify peers are in the correct peer scope
|
||||
for peer_info in &find_node_a.peers {
|
||||
if !self.filter_peer_scope(&peer_info.signed_node_info.node_info) {
|
||||
if !self.filter_node_info(
|
||||
RoutingDomain::PublicInternet,
|
||||
&peer_info.signed_node_info.node_info,
|
||||
) {
|
||||
return Err(RPCError::invalid_format(
|
||||
"find_node response has invalid peer scope",
|
||||
));
|
||||
@ -61,19 +62,16 @@ impl RPCProcessor {
|
||||
|
||||
// add node information for the requesting node to our routing table
|
||||
let routing_table = self.routing_table();
|
||||
let rt2 = routing_table.clone();
|
||||
let rt3 = routing_table.clone();
|
||||
|
||||
// find N nodes closest to the target node in our routing table
|
||||
let own_peer_info = routing_table.get_own_peer_info();
|
||||
let own_peer_info_is_valid = own_peer_info.signed_node_info.is_valid();
|
||||
|
||||
let closest_nodes = routing_table.find_closest_nodes(
|
||||
find_node_q.node_id,
|
||||
// filter
|
||||
Some(move |_k, v| {
|
||||
RoutingTable::filter_has_valid_signed_node_info(v, own_peer_info_is_valid)
|
||||
}),
|
||||
move |_k, v| rt2.filter_has_valid_signed_node_info(RoutingDomain::PublicInternet, v),
|
||||
// transform
|
||||
move |k, v| RoutingTable::transform_to_peer_info(k, v, &own_peer_info),
|
||||
move |k, v| rt3.transform_to_peer_info(RoutingDomain::PublicInternet, k, v),
|
||||
);
|
||||
|
||||
// Make status answer
|
||||
@ -83,11 +81,7 @@ impl RPCProcessor {
|
||||
|
||||
// Send status answer
|
||||
let res = self
|
||||
.answer(
|
||||
msg,
|
||||
RPCAnswer::new(RPCAnswerDetail::FindNodeA(find_node_a)),
|
||||
None,
|
||||
)
|
||||
.answer(msg, RPCAnswer::new(RPCAnswerDetail::FindNodeA(find_node_a)))
|
||||
.await?;
|
||||
tracing::Span::current().record("res", &tracing::field::display(res));
|
||||
Ok(())
|
||||
|
@ -2,19 +2,29 @@ use super::*;
|
||||
|
||||
impl RPCProcessor {
|
||||
// Sends a our node info to another node
|
||||
// Can be sent via all methods including relays and routes
|
||||
#[instrument(level = "trace", skip(self), ret, err)]
|
||||
pub async fn rpc_call_node_info_update(
|
||||
self,
|
||||
dest: Destination,
|
||||
safety_route: Option<&SafetyRouteSpec>,
|
||||
target: NodeRef,
|
||||
routing_domain: RoutingDomain,
|
||||
) -> Result<NetworkResult<()>, RPCError> {
|
||||
let signed_node_info = self.routing_table().get_own_signed_node_info();
|
||||
// Get the signed node info for the desired routing domain to send update with
|
||||
let signed_node_info = self
|
||||
.routing_table()
|
||||
.get_own_signed_node_info(routing_domain);
|
||||
let node_info_update = RPCOperationNodeInfoUpdate { signed_node_info };
|
||||
let statement = RPCStatement::new(RPCStatementDetail::NodeInfoUpdate(node_info_update));
|
||||
|
||||
// Send the node_info_update request
|
||||
network_result_try!(self.statement(dest, statement, safety_route).await?);
|
||||
// Send the node_info_update request to the specific routing domain requested
|
||||
network_result_try!(
|
||||
self.statement(
|
||||
Destination::direct(
|
||||
target.filtered_clone(NodeRefFilter::new().with_routing_domain(routing_domain))
|
||||
),
|
||||
statement,
|
||||
)
|
||||
.await?
|
||||
);
|
||||
|
||||
Ok(NetworkResult::value(()))
|
||||
}
|
||||
@ -22,6 +32,7 @@ impl RPCProcessor {
|
||||
#[instrument(level = "trace", skip(self, msg), fields(msg.operation.op_id), err)]
|
||||
pub(crate) async fn process_node_info_update(&self, msg: RPCMessage) -> Result<(), RPCError> {
|
||||
let sender_node_id = msg.header.envelope.get_sender_id();
|
||||
let routing_domain = msg.header.routing_domain;
|
||||
|
||||
// Get the statement
|
||||
let node_info_update = match msg.operation.into_kind() {
|
||||
@ -33,14 +44,13 @@ impl RPCProcessor {
|
||||
};
|
||||
|
||||
// Update our routing table with signed node info
|
||||
if !self.filter_peer_scope(&node_info_update.signed_node_info.node_info) {
|
||||
log_rpc!(debug
|
||||
"node_info_update has invalid peer scope from {}", sender_node_id
|
||||
);
|
||||
if !self.filter_node_info(routing_domain, &node_info_update.signed_node_info.node_info) {
|
||||
log_rpc!(debug "node info doesn't belong in {:?} routing domain: {}", routing_domain, sender_node_id);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
self.routing_table().register_node_with_signed_node_info(
|
||||
routing_domain,
|
||||
sender_node_id,
|
||||
node_info_update.signed_node_info,
|
||||
false,
|
||||
|
@ -7,7 +7,6 @@ impl RPCProcessor {
|
||||
pub async fn rpc_call_return_receipt<D: AsRef<[u8]>>(
|
||||
self,
|
||||
dest: Destination,
|
||||
safety_route: Option<&SafetyRouteSpec>,
|
||||
receipt: D,
|
||||
) -> Result<NetworkResult<()>, RPCError> {
|
||||
let receipt = receipt.as_ref().to_vec();
|
||||
@ -16,7 +15,7 @@ impl RPCProcessor {
|
||||
let statement = RPCStatement::new(RPCStatementDetail::ReturnReceipt(return_receipt));
|
||||
|
||||
// Send the return_receipt request
|
||||
network_result_try!(self.statement(dest, statement, safety_route).await?);
|
||||
network_result_try!(self.statement(dest, statement).await?);
|
||||
|
||||
Ok(NetworkResult::value(()))
|
||||
}
|
||||
|
@ -7,15 +7,13 @@ impl RPCProcessor {
|
||||
pub async fn rpc_call_signal(
|
||||
self,
|
||||
dest: Destination,
|
||||
safety_route: Option<&SafetyRouteSpec>,
|
||||
signal_info: SignalInfo,
|
||||
) -> Result<NetworkResult<()>, RPCError> {
|
||||
//let signed_node_info = self.routing_table().get_own_signed_node_info();
|
||||
let signal = RPCOperationSignal { signal_info };
|
||||
let statement = RPCStatement::new(RPCStatementDetail::Signal(signal));
|
||||
|
||||
// Send the signal request
|
||||
network_result_try!(self.statement(dest, statement, safety_route).await?);
|
||||
network_result_try!(self.statement(dest, statement).await?);
|
||||
|
||||
Ok(NetworkResult::value(()))
|
||||
}
|
||||
|
@ -8,14 +8,21 @@ impl RPCProcessor {
|
||||
self,
|
||||
peer: NodeRef,
|
||||
) -> Result<NetworkResult<Answer<SenderInfo>>, RPCError> {
|
||||
let node_status = self.network_manager().generate_node_status();
|
||||
let routing_domain = match peer.best_routing_domain() {
|
||||
Some(rd) => rd,
|
||||
None => {
|
||||
return Ok(NetworkResult::no_connection_other(
|
||||
"no routing domain for peer",
|
||||
))
|
||||
}
|
||||
};
|
||||
let node_status = self.network_manager().generate_node_status(routing_domain);
|
||||
let status_q = RPCOperationStatusQ { node_status };
|
||||
let respond_to = self.make_respond_to_sender(peer.clone());
|
||||
let question = RPCQuestion::new(respond_to, RPCQuestionDetail::StatusQ(status_q));
|
||||
let question = RPCQuestion::new(RespondTo::Sender, RPCQuestionDetail::StatusQ(status_q));
|
||||
|
||||
// Send the info request
|
||||
let waitable_reply = network_result_try!(
|
||||
self.question(Destination::Direct(peer.clone()), question, None)
|
||||
self.question(Destination::direct(peer.clone()), question)
|
||||
.await?
|
||||
);
|
||||
|
||||
@ -37,28 +44,48 @@ impl RPCProcessor {
|
||||
_ => return Err(RPCError::invalid_format("not an answer")),
|
||||
};
|
||||
|
||||
// Ensure the returned node status is the kind for the routing domain we asked for
|
||||
match routing_domain {
|
||||
RoutingDomain::PublicInternet => {
|
||||
if !matches!(status_a.node_status, NodeStatus::PublicInternet(_)) {
|
||||
return Ok(NetworkResult::invalid_message(
|
||||
"node status doesn't match PublicInternet routing domain",
|
||||
));
|
||||
}
|
||||
}
|
||||
RoutingDomain::LocalNetwork => {
|
||||
if !matches!(status_a.node_status, NodeStatus::LocalNetwork(_)) {
|
||||
return Ok(NetworkResult::invalid_message(
|
||||
"node status doesn't match LocalNetwork routing domain",
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update latest node status in routing table
|
||||
peer.operate_mut(|e| {
|
||||
e.update_node_status(status_a.node_status.clone());
|
||||
});
|
||||
peer.update_node_status(status_a.node_status);
|
||||
|
||||
// Report sender_info IP addresses to network manager
|
||||
// Don't need to validate these addresses for the current routing domain
|
||||
// the address itself is irrelevant, and the remote node can lie anyway
|
||||
if let Some(socket_address) = status_a.sender_info.socket_address {
|
||||
match send_data_kind {
|
||||
SendDataKind::Direct(connection_descriptor) => {
|
||||
match connection_descriptor.peer_scope() {
|
||||
PeerScope::Global => self.network_manager().report_global_socket_address(
|
||||
SendDataKind::Direct(connection_descriptor) => match routing_domain {
|
||||
RoutingDomain::PublicInternet => self
|
||||
.network_manager()
|
||||
.report_public_internet_socket_address(
|
||||
socket_address,
|
||||
connection_descriptor,
|
||||
peer,
|
||||
),
|
||||
PeerScope::Local => self.network_manager().report_local_socket_address(
|
||||
RoutingDomain::LocalNetwork => {
|
||||
self.network_manager().report_local_network_socket_address(
|
||||
socket_address,
|
||||
connection_descriptor,
|
||||
peer,
|
||||
),
|
||||
)
|
||||
}
|
||||
}
|
||||
},
|
||||
SendDataKind::Indirect => {
|
||||
// Do nothing in this case, as the socket address returned here would be for any node other than ours
|
||||
}
|
||||
@ -77,6 +104,7 @@ impl RPCProcessor {
|
||||
#[instrument(level = "trace", skip(self, msg), fields(msg.operation.op_id, res), err)]
|
||||
pub(crate) async fn process_status_q(&self, msg: RPCMessage) -> Result<(), RPCError> {
|
||||
let connection_descriptor = msg.header.connection_descriptor;
|
||||
let routing_domain = msg.header.routing_domain;
|
||||
|
||||
// Get the question
|
||||
let status_q = match msg.operation.kind() {
|
||||
@ -87,16 +115,30 @@ impl RPCProcessor {
|
||||
_ => panic!("not a question"),
|
||||
};
|
||||
|
||||
// Ensure the node status from the question is the kind for the routing domain we received the request in
|
||||
match routing_domain {
|
||||
RoutingDomain::PublicInternet => {
|
||||
if !matches!(status_q.node_status, NodeStatus::PublicInternet(_)) {
|
||||
log_rpc!(debug "node status doesn't match PublicInternet routing domain");
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
RoutingDomain::LocalNetwork => {
|
||||
if !matches!(status_q.node_status, NodeStatus::LocalNetwork(_)) {
|
||||
log_rpc!(debug "node status doesn't match LocalNetwork routing domain");
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// update node status for the requesting node to our routing table
|
||||
if let Some(sender_nr) = msg.opt_sender_nr.clone() {
|
||||
// Update latest node status in routing table for the statusq sender
|
||||
sender_nr.operate_mut(|e| {
|
||||
e.update_node_status(status_q.node_status.clone());
|
||||
});
|
||||
sender_nr.update_node_status(status_q.node_status.clone());
|
||||
}
|
||||
|
||||
// Make status answer
|
||||
let node_status = self.network_manager().generate_node_status();
|
||||
let node_status = self.network_manager().generate_node_status(routing_domain);
|
||||
|
||||
// Get the peer address in the returned sender info
|
||||
let sender_info = SenderInfo {
|
||||
@ -110,11 +152,7 @@ impl RPCProcessor {
|
||||
|
||||
// Send status answer
|
||||
let res = self
|
||||
.answer(
|
||||
msg,
|
||||
RPCAnswer::new(RPCAnswerDetail::StatusA(status_a)),
|
||||
None,
|
||||
)
|
||||
.answer(msg, RPCAnswer::new(RPCAnswerDetail::StatusA(status_a)))
|
||||
.await?;
|
||||
tracing::Span::current().record("res", &tracing::field::display(res));
|
||||
Ok(())
|
||||
|
@ -32,7 +32,7 @@ impl RPCProcessor {
|
||||
|
||||
// Send the validate_dial_info request
|
||||
// This can only be sent directly, as relays can not validate dial info
|
||||
network_result_value_or_log!(debug self.statement(Destination::Direct(peer), statement, None)
|
||||
network_result_value_or_log!(debug self.statement(Destination::direct(peer), statement)
|
||||
.await? => {
|
||||
return Ok(false);
|
||||
}
|
||||
@ -81,6 +81,7 @@ impl RPCProcessor {
|
||||
// an ipv6 address
|
||||
let routing_table = self.routing_table();
|
||||
let sender_id = msg.header.envelope.get_sender_id();
|
||||
let routing_domain = msg.header.routing_domain;
|
||||
let node_count = {
|
||||
let c = self.config.get();
|
||||
c.network.dht.max_find_node_count as usize
|
||||
@ -88,15 +89,18 @@ impl RPCProcessor {
|
||||
|
||||
// Filter on nodes that can validate dial info, and can reach a specific dial info
|
||||
let outbound_dial_info_entry_filter =
|
||||
RoutingTable::make_outbound_dial_info_entry_filter(dial_info.clone());
|
||||
RoutingTable::make_outbound_dial_info_entry_filter(
|
||||
routing_domain,
|
||||
dial_info.clone(),
|
||||
);
|
||||
let will_validate_dial_info_filter = |e: &BucketEntryInner| {
|
||||
if let Some(status) = &e.peer_stats().status {
|
||||
status.will_validate_dial_info
|
||||
if let Some(status) = &e.node_status(routing_domain) {
|
||||
status.will_validate_dial_info()
|
||||
} else {
|
||||
true
|
||||
}
|
||||
};
|
||||
let filter = RoutingTable::combine_filters(
|
||||
let filter = RoutingTable::combine_entry_filters(
|
||||
outbound_dial_info_entry_filter,
|
||||
will_validate_dial_info_filter,
|
||||
);
|
||||
@ -126,7 +130,7 @@ impl RPCProcessor {
|
||||
|
||||
// Send the validate_dial_info request
|
||||
// This can only be sent directly, as relays can not validate dial info
|
||||
network_result_value_or_log!(debug self.statement(Destination::Direct(peer), statement, None)
|
||||
network_result_value_or_log!(debug self.statement(Destination::direct(peer), statement)
|
||||
.await? => {
|
||||
return Ok(());
|
||||
}
|
||||
|
@ -224,7 +224,6 @@ fn config_callback(key: String) -> ConfigCallbackReturn {
|
||||
"network.upnp" => Ok(Box::new(false)),
|
||||
"network.natpmp" => Ok(Box::new(false)),
|
||||
"network.detect_address_changes" => Ok(Box::new(true)),
|
||||
"network.enable_local_peer_scope" => Ok(Box::new(false)),
|
||||
"network.restricted_nat_retries" => Ok(Box::new(3u32)),
|
||||
"network.tls.certificate_path" => Ok(Box::new(get_certfile_path())),
|
||||
"network.tls.private_key_path" => Ok(Box::new(get_keyfile_path())),
|
||||
@ -354,7 +353,6 @@ pub async fn test_config() {
|
||||
assert_eq!(inner.network.upnp, false);
|
||||
assert_eq!(inner.network.natpmp, false);
|
||||
assert_eq!(inner.network.detect_address_changes, true);
|
||||
assert_eq!(inner.network.enable_local_peer_scope, false);
|
||||
assert_eq!(inner.network.restricted_nat_retries, 3u32);
|
||||
assert_eq!(inner.network.tls.certificate_path, get_certfile_path());
|
||||
assert_eq!(inner.network.tls.private_key_path, get_keyfile_path());
|
||||
|
@ -45,6 +45,16 @@ fn get_address_type(text: &str) -> Option<AddressType> {
|
||||
None
|
||||
}
|
||||
}
|
||||
fn get_routing_domain(text: &str) -> Option<RoutingDomain> {
|
||||
let lctext = text.to_ascii_lowercase();
|
||||
if "publicinternet".starts_with(&lctext) {
|
||||
Some(RoutingDomain::PublicInternet)
|
||||
} else if "localnetwork".starts_with(&lctext) {
|
||||
Some(RoutingDomain::LocalNetwork)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
fn get_debug_argument<T, G: FnOnce(&str) -> Option<T>>(
|
||||
value: &str,
|
||||
@ -294,25 +304,41 @@ impl VeilidAPI {
|
||||
None => return Ok("Node id not found in routing table".to_owned()),
|
||||
};
|
||||
|
||||
if args.len() >= 2 {
|
||||
let pt = get_debug_argument_at(
|
||||
let mut ai = 1;
|
||||
let mut routing_domain = None;
|
||||
while ai < args.len() {
|
||||
if let Ok(pt) = get_debug_argument_at(
|
||||
&args,
|
||||
1,
|
||||
ai,
|
||||
"debug_contact",
|
||||
"protocol_type",
|
||||
get_protocol_type,
|
||||
)?;
|
||||
nr.merge_filter(DialInfoFilter::all().with_protocol_type(pt));
|
||||
if args.len() >= 3 {
|
||||
let at = get_debug_argument_at(
|
||||
&args,
|
||||
2,
|
||||
"debug_contact",
|
||||
"address_type",
|
||||
get_address_type,
|
||||
)?;
|
||||
nr.merge_filter(DialInfoFilter::all().with_address_type(at));
|
||||
) {
|
||||
nr.merge_filter(NodeRefFilter::new().with_protocol_type(pt));
|
||||
} else if let Ok(at) =
|
||||
get_debug_argument_at(&args, ai, "debug_contact", "address_type", get_address_type)
|
||||
{
|
||||
nr.merge_filter(NodeRefFilter::new().with_address_type(at));
|
||||
} else if let Ok(rd) = get_debug_argument_at(
|
||||
&args,
|
||||
ai,
|
||||
"debug_contact",
|
||||
"routing_domain",
|
||||
get_routing_domain,
|
||||
) {
|
||||
if routing_domain.is_none() {
|
||||
routing_domain = Some(rd);
|
||||
} else {
|
||||
return Ok("Multiple routing domains specified".to_owned());
|
||||
}
|
||||
} else {
|
||||
return Ok(format!("Invalid argument specified: {}", args[ai]));
|
||||
}
|
||||
ai += 1;
|
||||
}
|
||||
|
||||
if let Some(routing_domain) = routing_domain {
|
||||
nr.merge_filter(NodeRefFilter::new().with_routing_domain(routing_domain))
|
||||
}
|
||||
|
||||
let cm = network_manager.get_contact_method(nr);
|
||||
@ -321,33 +347,51 @@ impl VeilidAPI {
|
||||
}
|
||||
|
||||
async fn debug_ping(&self, args: String) -> Result<String, VeilidAPIError> {
|
||||
let netman = self.network_manager()?;
|
||||
let routing_table = netman.routing_table();
|
||||
let rpc = netman.rpc_processor();
|
||||
|
||||
let args: Vec<String> = args.split_whitespace().map(|s| s.to_owned()).collect();
|
||||
|
||||
let node_id = get_debug_argument_at(&args, 0, "debug_ping", "node_id", get_dht_key)?;
|
||||
|
||||
let routing_table = self.network_manager()?.routing_table();
|
||||
let mut nr = match routing_table.lookup_node_ref(node_id) {
|
||||
Some(nr) => nr,
|
||||
None => return Ok("Node id not found in routing table".to_owned()),
|
||||
};
|
||||
|
||||
if args.len() >= 2 {
|
||||
let pt =
|
||||
get_debug_argument_at(&args, 1, "debug_ping", "protocol_type", get_protocol_type)?;
|
||||
nr.merge_filter(DialInfoFilter::all().with_protocol_type(pt));
|
||||
if args.len() >= 3 {
|
||||
let at = get_debug_argument_at(
|
||||
&args,
|
||||
2,
|
||||
"debug_ping",
|
||||
"address_type",
|
||||
get_address_type,
|
||||
)?;
|
||||
nr.merge_filter(DialInfoFilter::all().with_address_type(at));
|
||||
let mut ai = 1;
|
||||
let mut routing_domain = None;
|
||||
while ai < args.len() {
|
||||
if let Ok(pt) =
|
||||
get_debug_argument_at(&args, ai, "debug_ping", "protocol_type", get_protocol_type)
|
||||
{
|
||||
nr.merge_filter(NodeRefFilter::new().with_protocol_type(pt));
|
||||
} else if let Ok(at) =
|
||||
get_debug_argument_at(&args, ai, "debug_ping", "address_type", get_address_type)
|
||||
{
|
||||
nr.merge_filter(NodeRefFilter::new().with_address_type(at));
|
||||
} else if let Ok(rd) = get_debug_argument_at(
|
||||
&args,
|
||||
ai,
|
||||
"debug_ping",
|
||||
"routing_domain",
|
||||
get_routing_domain,
|
||||
) {
|
||||
if routing_domain.is_none() {
|
||||
routing_domain = Some(rd);
|
||||
} else {
|
||||
return Ok("Multiple routing domains specified".to_owned());
|
||||
}
|
||||
} else {
|
||||
return Ok(format!("Invalid argument specified: {}", args[ai]));
|
||||
}
|
||||
ai += 1;
|
||||
}
|
||||
|
||||
let rpc = self.network_manager()?.rpc_processor();
|
||||
if let Some(routing_domain) = routing_domain {
|
||||
nr.merge_filter(NodeRefFilter::new().with_routing_domain(routing_domain))
|
||||
}
|
||||
|
||||
// Dump routing table entry
|
||||
let out = match rpc
|
||||
@ -383,7 +427,7 @@ impl VeilidAPI {
|
||||
attach
|
||||
detach
|
||||
restart network
|
||||
ping <node_id> [protocol_type [address_type]]
|
||||
ping <node_id> [protocol_type][address_type][routing_domain]
|
||||
contact <node_id> [protocol_type [address_type]]
|
||||
"#
|
||||
.to_owned())
|
||||
|
@ -215,22 +215,33 @@ impl fmt::Display for VeilidLogLevel {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct VeilidStateLog {
|
||||
pub log_level: VeilidLogLevel,
|
||||
pub message: String,
|
||||
pub backtrace: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct VeilidStateAttachment {
|
||||
pub state: AttachmentState,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct PeerTableData {
|
||||
pub node_id: DHTKey,
|
||||
pub peer_address: PeerAddress,
|
||||
pub peer_stats: PeerStats,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct VeilidStateNetwork {
|
||||
pub started: bool,
|
||||
#[serde(with = "json_as_string")]
|
||||
pub bps_down: u64,
|
||||
#[serde(with = "json_as_string")]
|
||||
pub bps_up: u64,
|
||||
pub peers: Vec<PeerTableData>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
@ -390,8 +401,12 @@ impl NetworkClass {
|
||||
}
|
||||
}
|
||||
|
||||
/// RoutingDomain-specific status for each node
|
||||
/// is returned by the StatusA call
|
||||
|
||||
/// PublicInternet RoutingDomain Status
|
||||
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
|
||||
pub struct NodeStatus {
|
||||
pub struct PublicInternetNodeStatus {
|
||||
pub will_route: bool,
|
||||
pub will_tunnel: bool,
|
||||
pub will_signal: bool,
|
||||
@ -399,6 +414,51 @@ pub struct NodeStatus {
|
||||
pub will_validate_dial_info: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
|
||||
pub struct LocalNetworkNodeStatus {
|
||||
pub will_relay: bool,
|
||||
pub will_validate_dial_info: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub enum NodeStatus {
|
||||
PublicInternet(PublicInternetNodeStatus),
|
||||
LocalNetwork(LocalNetworkNodeStatus),
|
||||
}
|
||||
|
||||
impl NodeStatus {
|
||||
pub fn will_route(&self) -> bool {
|
||||
match self {
|
||||
NodeStatus::PublicInternet(pi) => pi.will_route,
|
||||
NodeStatus::LocalNetwork(_) => false,
|
||||
}
|
||||
}
|
||||
pub fn will_tunnel(&self) -> bool {
|
||||
match self {
|
||||
NodeStatus::PublicInternet(pi) => pi.will_tunnel,
|
||||
NodeStatus::LocalNetwork(_) => false,
|
||||
}
|
||||
}
|
||||
pub fn will_signal(&self) -> bool {
|
||||
match self {
|
||||
NodeStatus::PublicInternet(pi) => pi.will_signal,
|
||||
NodeStatus::LocalNetwork(_) => false,
|
||||
}
|
||||
}
|
||||
pub fn will_relay(&self) -> bool {
|
||||
match self {
|
||||
NodeStatus::PublicInternet(pi) => pi.will_relay,
|
||||
NodeStatus::LocalNetwork(ln) => ln.will_relay,
|
||||
}
|
||||
}
|
||||
pub fn will_validate_dial_info(&self) -> bool {
|
||||
match self {
|
||||
NodeStatus::PublicInternet(pi) => pi.will_validate_dial_info,
|
||||
NodeStatus::LocalNetwork(ln) => ln.will_validate_dial_info,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct NodeInfo {
|
||||
pub network_class: NetworkClass,
|
||||
@ -411,9 +471,6 @@ pub struct NodeInfo {
|
||||
}
|
||||
|
||||
impl NodeInfo {
|
||||
pub fn is_valid(&self) -> bool {
|
||||
!matches!(self.network_class, NetworkClass::Invalid)
|
||||
}
|
||||
pub fn first_filtered_dial_info_detail<F>(&self, filter: F) -> Option<DialInfoDetail>
|
||||
where
|
||||
F: Fn(&DialInfoDetail) -> bool,
|
||||
@ -502,43 +559,6 @@ impl NodeInfo {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct LocalNodeInfo {
|
||||
pub dial_info_list: Vec<DialInfo>,
|
||||
}
|
||||
|
||||
impl LocalNodeInfo {
|
||||
pub fn first_filtered_dial_info<F>(&self, filter: F) -> Option<DialInfo>
|
||||
where
|
||||
F: Fn(&DialInfo) -> bool,
|
||||
{
|
||||
for di in &self.dial_info_list {
|
||||
if filter(di) {
|
||||
return Some(di.clone());
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
pub fn all_filtered_dial_info<F>(&self, filter: F) -> Vec<DialInfo>
|
||||
where
|
||||
F: Fn(&DialInfo) -> bool,
|
||||
{
|
||||
let mut dial_info_list = Vec::new();
|
||||
|
||||
for di in &self.dial_info_list {
|
||||
if filter(di) {
|
||||
dial_info_list.push(di.clone());
|
||||
}
|
||||
}
|
||||
dial_info_list
|
||||
}
|
||||
|
||||
pub fn has_dial_info(&self) -> bool {
|
||||
!self.dial_info_list.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::derive_hash_xor_eq)]
|
||||
#[derive(Debug, PartialOrd, Ord, Hash, Serialize, Deserialize, EnumSetType)]
|
||||
// Keep member order appropriate for sorting < preference
|
||||
@ -590,13 +610,23 @@ pub enum AddressType {
|
||||
}
|
||||
pub type AddressTypeSet = EnumSet<AddressType>;
|
||||
|
||||
// Routing domain here is listed in order of preference, keep in order
|
||||
#[allow(clippy::derive_hash_xor_eq)]
|
||||
#[derive(Debug, Ord, PartialOrd, Hash, Serialize, Deserialize, EnumSetType)]
|
||||
pub enum PeerScope {
|
||||
Global,
|
||||
Local,
|
||||
pub enum RoutingDomain {
|
||||
LocalNetwork = 0,
|
||||
PublicInternet = 1,
|
||||
}
|
||||
pub type PeerScopeSet = EnumSet<PeerScope>;
|
||||
impl RoutingDomain {
|
||||
pub const fn count() -> usize {
|
||||
2
|
||||
}
|
||||
pub const fn all() -> [RoutingDomain; RoutingDomain::count()] {
|
||||
// Routing domain here is listed in order of preference, keep in order
|
||||
[RoutingDomain::LocalNetwork, RoutingDomain::PublicInternet]
|
||||
}
|
||||
}
|
||||
pub type RoutingDomainSet = EnumSet<RoutingDomain>;
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Ord, Eq, Hash, Serialize, Deserialize)]
|
||||
pub enum Address {
|
||||
@ -687,6 +717,15 @@ impl Address {
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Address {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Address::IPV4(v4) => write!(f, "{}", v4),
|
||||
Address::IPV6(v6) => write!(f, "{}", v6),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for Address {
|
||||
type Err = VeilidAPIError;
|
||||
fn from_str(host: &str) -> Result<Address, VeilidAPIError> {
|
||||
@ -763,7 +802,6 @@ impl FromStr for SocketAddress {
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub struct DialInfoFilter {
|
||||
pub peer_scope_set: PeerScopeSet,
|
||||
pub protocol_type_set: ProtocolTypeSet,
|
||||
pub address_type_set: AddressTypeSet,
|
||||
}
|
||||
@ -771,7 +809,6 @@ pub struct DialInfoFilter {
|
||||
impl Default for DialInfoFilter {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
peer_scope_set: PeerScopeSet::all(),
|
||||
protocol_type_set: ProtocolTypeSet::all(),
|
||||
address_type_set: AddressTypeSet::all(),
|
||||
}
|
||||
@ -781,28 +818,6 @@ impl Default for DialInfoFilter {
|
||||
impl DialInfoFilter {
|
||||
pub fn all() -> Self {
|
||||
Self {
|
||||
peer_scope_set: PeerScopeSet::all(),
|
||||
protocol_type_set: ProtocolTypeSet::all(),
|
||||
address_type_set: AddressTypeSet::all(),
|
||||
}
|
||||
}
|
||||
pub fn global() -> Self {
|
||||
Self {
|
||||
peer_scope_set: PeerScopeSet::only(PeerScope::Global),
|
||||
protocol_type_set: ProtocolTypeSet::all(),
|
||||
address_type_set: AddressTypeSet::all(),
|
||||
}
|
||||
}
|
||||
pub fn local() -> Self {
|
||||
Self {
|
||||
peer_scope_set: PeerScopeSet::only(PeerScope::Local),
|
||||
protocol_type_set: ProtocolTypeSet::all(),
|
||||
address_type_set: AddressTypeSet::all(),
|
||||
}
|
||||
}
|
||||
pub fn scoped(peer_scope: PeerScope) -> Self {
|
||||
Self {
|
||||
peer_scope_set: PeerScopeSet::only(peer_scope),
|
||||
protocol_type_set: ProtocolTypeSet::all(),
|
||||
address_type_set: AddressTypeSet::all(),
|
||||
}
|
||||
@ -823,30 +838,28 @@ impl DialInfoFilter {
|
||||
self.address_type_set = address_set;
|
||||
self
|
||||
}
|
||||
pub fn filtered(mut self, other_dif: DialInfoFilter) -> Self {
|
||||
self.peer_scope_set &= other_dif.peer_scope_set;
|
||||
pub fn filtered(mut self, other_dif: &DialInfoFilter) -> Self {
|
||||
self.protocol_type_set &= other_dif.protocol_type_set;
|
||||
self.address_type_set &= other_dif.address_type_set;
|
||||
self
|
||||
}
|
||||
pub fn is_dead(&self) -> bool {
|
||||
self.peer_scope_set.is_empty()
|
||||
|| self.protocol_type_set.is_empty()
|
||||
|| self.address_type_set.is_empty()
|
||||
self.protocol_type_set.is_empty() || self.address_type_set.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for DialInfoFilter {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||
let mut out = String::new();
|
||||
if self.peer_scope_set != PeerScopeSet::all() {
|
||||
out += &format!("+{:?}", self.peer_scope_set);
|
||||
}
|
||||
if self.protocol_type_set != ProtocolTypeSet::all() {
|
||||
out += &format!("+{:?}", self.protocol_type_set);
|
||||
} else {
|
||||
out += "*";
|
||||
}
|
||||
if self.address_type_set != AddressTypeSet::all() {
|
||||
out += &format!("+{:?}", self.address_type_set);
|
||||
} else {
|
||||
out += "*";
|
||||
}
|
||||
write!(f, "[{}]", out)
|
||||
}
|
||||
@ -1104,6 +1117,14 @@ impl DialInfo {
|
||||
pub fn address_type(&self) -> AddressType {
|
||||
self.socket_address().address_type()
|
||||
}
|
||||
pub fn address(&self) -> Address {
|
||||
match self {
|
||||
Self::UDP(di) => di.socket_address.address,
|
||||
Self::TCP(di) => di.socket_address.address,
|
||||
Self::WS(di) => di.socket_address.address,
|
||||
Self::WSS(di) => di.socket_address.address,
|
||||
}
|
||||
}
|
||||
pub fn socket_address(&self) -> SocketAddress {
|
||||
match self {
|
||||
Self::UDP(di) => di.socket_address,
|
||||
@ -1160,49 +1181,15 @@ impl DialInfo {
|
||||
Self::WSS(di) => Some(format!("wss://{}", di.request)),
|
||||
}
|
||||
}
|
||||
pub fn is_global(&self) -> bool {
|
||||
self.socket_address().address().is_global()
|
||||
}
|
||||
pub fn is_local(&self) -> bool {
|
||||
self.socket_address().address().is_local()
|
||||
}
|
||||
pub fn is_valid(&self) -> bool {
|
||||
let socket_address = self.socket_address();
|
||||
let address = socket_address.address();
|
||||
let port = socket_address.port();
|
||||
(address.is_global() || address.is_local()) && port > 0
|
||||
}
|
||||
pub fn peer_scope(&self) -> Option<PeerScope> {
|
||||
let addr = self.socket_address().address();
|
||||
if addr.is_global() {
|
||||
return Some(PeerScope::Global);
|
||||
}
|
||||
if addr.is_local() {
|
||||
return Some(PeerScope::Local);
|
||||
}
|
||||
None
|
||||
}
|
||||
pub fn matches_peer_scope(&self, scope: PeerScopeSet) -> bool {
|
||||
if let Some(ps) = self.peer_scope() {
|
||||
scope.contains(ps)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
pub fn make_filter(&self, scoped: bool) -> DialInfoFilter {
|
||||
pub fn make_filter(&self) -> DialInfoFilter {
|
||||
DialInfoFilter {
|
||||
peer_scope_set: if scoped {
|
||||
if self.is_global() {
|
||||
PeerScopeSet::only(PeerScope::Global)
|
||||
} else if self.is_local() {
|
||||
PeerScopeSet::only(PeerScope::Local)
|
||||
} else {
|
||||
PeerScopeSet::empty()
|
||||
}
|
||||
} else {
|
||||
PeerScopeSet::all()
|
||||
},
|
||||
protocol_type_set: ProtocolTypeSet::only(self.protocol_type()),
|
||||
address_type_set: AddressTypeSet::only(self.address_type()),
|
||||
}
|
||||
@ -1389,9 +1376,6 @@ impl DialInfo {
|
||||
|
||||
impl MatchesDialInfoFilter for DialInfo {
|
||||
fn matches_filter(&self, filter: &DialInfoFilter) -> bool {
|
||||
if !self.matches_peer_scope(filter.peer_scope_set) {
|
||||
return false;
|
||||
}
|
||||
if !filter.protocol_type_set.contains(self.protocol_type()) {
|
||||
return false;
|
||||
}
|
||||
@ -1464,8 +1448,8 @@ impl SignedNodeInfo {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_valid(&self) -> bool {
|
||||
self.signature.valid && self.node_info.is_valid()
|
||||
pub fn has_valid_signature(&self) -> bool {
|
||||
self.signature.valid
|
||||
}
|
||||
}
|
||||
|
||||
@ -1486,8 +1470,9 @@ impl PeerInfo {
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, Serialize, Deserialize)]
|
||||
pub struct PeerAddress {
|
||||
socket_address: SocketAddress,
|
||||
protocol_type: ProtocolType,
|
||||
#[serde(with = "json_as_string")]
|
||||
socket_address: SocketAddress,
|
||||
}
|
||||
|
||||
impl PeerAddress {
|
||||
@ -1522,40 +1507,17 @@ pub struct ConnectionDescriptor {
|
||||
}
|
||||
|
||||
impl ConnectionDescriptor {
|
||||
fn validate_peer_scope(remote: PeerAddress) -> Result<(), VeilidAPIError> {
|
||||
// Verify address is in one of our peer scopes we care about
|
||||
let addr = remote.socket_address.address();
|
||||
|
||||
// Allow WASM to have unresolved addresses, for bootstraps
|
||||
cfg_if::cfg_if! {
|
||||
if #[cfg(target_arch = "wasm32")] {
|
||||
if addr.is_unspecified() {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
if !addr.is_global() && !addr.is_local() {
|
||||
return Err(VeilidAPIError::generic(format!(
|
||||
"not a valid peer scope: {:?}",
|
||||
addr
|
||||
)));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn new(remote: PeerAddress, local: SocketAddress) -> Result<Self, VeilidAPIError> {
|
||||
Self::validate_peer_scope(remote)?;
|
||||
Ok(Self {
|
||||
pub fn new(remote: PeerAddress, local: SocketAddress) -> Self {
|
||||
Self {
|
||||
remote,
|
||||
local: Some(local),
|
||||
})
|
||||
}
|
||||
}
|
||||
pub fn new_no_local(remote: PeerAddress) -> Result<Self, VeilidAPIError> {
|
||||
Self::validate_peer_scope(remote)?;
|
||||
Ok(Self {
|
||||
pub fn new_no_local(remote: PeerAddress) -> Self {
|
||||
Self {
|
||||
remote,
|
||||
local: None,
|
||||
})
|
||||
}
|
||||
}
|
||||
pub fn remote(&self) -> PeerAddress {
|
||||
self.remote
|
||||
@ -1572,36 +1534,15 @@ impl ConnectionDescriptor {
|
||||
pub fn address_type(&self) -> AddressType {
|
||||
self.remote.address_type()
|
||||
}
|
||||
pub fn peer_scope(&self) -> PeerScope {
|
||||
let addr = self.remote.socket_address.address();
|
||||
// Allow WASM to have unresolved addresses, for bootstraps
|
||||
cfg_if::cfg_if! {
|
||||
if #[cfg(target_arch = "wasm32")] {
|
||||
if addr.is_unspecified() {
|
||||
return PeerScope::Global;
|
||||
}
|
||||
}
|
||||
}
|
||||
if addr.is_global() {
|
||||
return PeerScope::Global;
|
||||
}
|
||||
PeerScope::Local
|
||||
}
|
||||
pub fn make_dial_info_filter(&self) -> DialInfoFilter {
|
||||
DialInfoFilter::scoped(self.peer_scope())
|
||||
DialInfoFilter::all()
|
||||
.with_protocol_type(self.protocol_type())
|
||||
.with_address_type(self.address_type())
|
||||
}
|
||||
pub fn matches_peer_scope(&self, scope: PeerScopeSet) -> bool {
|
||||
scope.contains(self.peer_scope())
|
||||
}
|
||||
}
|
||||
|
||||
impl MatchesDialInfoFilter for ConnectionDescriptor {
|
||||
fn matches_filter(&self, filter: &DialInfoFilter) -> bool {
|
||||
if !self.matches_peer_scope(filter.peer_scope_set) {
|
||||
return false;
|
||||
}
|
||||
if !filter.protocol_type_set.contains(self.protocol_type()) {
|
||||
return false;
|
||||
}
|
||||
@ -1649,46 +1590,56 @@ impl FromStr for NodeDialInfo {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
|
||||
#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct LatencyStats {
|
||||
#[serde(with = "json_as_string")]
|
||||
pub fastest: u64, // fastest latency in the ROLLING_LATENCIES_SIZE last latencies
|
||||
#[serde(with = "json_as_string")]
|
||||
pub average: u64, // average latency over the ROLLING_LATENCIES_SIZE last latencies
|
||||
#[serde(with = "json_as_string")]
|
||||
pub slowest: u64, // slowest latency in the ROLLING_LATENCIES_SIZE last latencies
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
|
||||
#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct TransferStats {
|
||||
pub total: u64, // total amount transferred ever
|
||||
#[serde(with = "json_as_string")]
|
||||
pub total: u64, // total amount transferred ever
|
||||
#[serde(with = "json_as_string")]
|
||||
pub maximum: u64, // maximum rate over the ROLLING_TRANSFERS_SIZE last amounts
|
||||
#[serde(with = "json_as_string")]
|
||||
pub average: u64, // average rate over the ROLLING_TRANSFERS_SIZE last amounts
|
||||
#[serde(with = "json_as_string")]
|
||||
pub minimum: u64, // minimum rate over the ROLLING_TRANSFERS_SIZE last amounts
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
|
||||
#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct TransferStatsDownUp {
|
||||
pub down: TransferStats,
|
||||
pub up: TransferStats,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
|
||||
#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct RPCStats {
|
||||
pub messages_sent: u32, // number of rpcs that have been sent in the total_time range
|
||||
pub messages_rcvd: u32, // number of rpcs that have been received in the total_time range
|
||||
pub questions_in_flight: u32, // number of questions issued that have yet to be answered
|
||||
#[serde(with = "opt_json_as_string")]
|
||||
pub last_question: Option<u64>, // when the peer was last questioned (either successfully or not) and we wanted an answer
|
||||
#[serde(with = "opt_json_as_string")]
|
||||
pub last_seen_ts: Option<u64>, // when the peer was last seen for any reason, including when we first attempted to reach out to it
|
||||
#[serde(with = "opt_json_as_string")]
|
||||
pub first_consecutive_seen_ts: Option<u64>, // the timestamp of the first consecutive proof-of-life for this node (an answer or received question)
|
||||
pub recent_lost_answers: u32, // number of answers that have been lost since we lost reliability
|
||||
pub failed_to_send: u32, // number of messages that have failed to send since we last successfully sent one
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
|
||||
#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct PeerStats {
|
||||
pub time_added: u64, // when the peer was added to the routing table
|
||||
#[serde(with = "json_as_string")]
|
||||
pub time_added: u64, // when the peer was added to the routing table
|
||||
pub rpc_stats: RPCStats, // information about RPCs
|
||||
pub latency: Option<LatencyStats>, // latencies for communications with the peer
|
||||
pub transfer: TransferStatsDownUp, // Stats for communications with the peer
|
||||
pub status: Option<NodeStatus>, // Last known node status
|
||||
}
|
||||
|
||||
pub type ValueChangeCallback =
|
||||
|
@ -40,3 +40,59 @@ pub fn serialize_json<T: Serialize + Debug>(val: T) -> String {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub mod json_as_string {
|
||||
use std::fmt::Display;
|
||||
use std::str::FromStr;
|
||||
|
||||
use serde::{de, Deserialize, Deserializer, Serializer};
|
||||
|
||||
pub fn serialize<T, S>(value: &T, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
T: Display,
|
||||
S: Serializer,
|
||||
{
|
||||
serializer.collect_str(value)
|
||||
}
|
||||
|
||||
pub fn deserialize<'de, T, D>(deserializer: D) -> Result<T, D::Error>
|
||||
where
|
||||
T: FromStr,
|
||||
T::Err: Display,
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
String::deserialize(deserializer)?
|
||||
.parse()
|
||||
.map_err(de::Error::custom)
|
||||
}
|
||||
}
|
||||
|
||||
pub mod opt_json_as_string {
|
||||
use std::fmt::Display;
|
||||
use std::str::FromStr;
|
||||
|
||||
use serde::{de, Deserialize, Deserializer, Serializer};
|
||||
|
||||
pub fn serialize<T, S>(value: &Option<T>, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
T: Display,
|
||||
S: Serializer,
|
||||
{
|
||||
match value {
|
||||
Some(v) => serializer.collect_str(v),
|
||||
None => serializer.serialize_none(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn deserialize<'de, T, D>(deserializer: D) -> Result<Option<T>, D::Error>
|
||||
where
|
||||
T: FromStr,
|
||||
T::Err: Display,
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
match Option::<String>::deserialize(deserializer)? {
|
||||
None => Ok(None),
|
||||
Some(v) => Ok(Some(v.parse::<T>().map_err(de::Error::custom)?)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -226,7 +226,6 @@ pub struct VeilidConfigNetwork {
|
||||
pub upnp: bool,
|
||||
pub natpmp: bool,
|
||||
pub detect_address_changes: bool,
|
||||
pub enable_local_peer_scope: bool,
|
||||
pub restricted_nat_retries: u32,
|
||||
pub tls: VeilidConfigTLS,
|
||||
pub application: VeilidConfigApplication,
|
||||
@ -448,7 +447,6 @@ impl VeilidConfig {
|
||||
get_config!(inner.network.upnp);
|
||||
get_config!(inner.network.natpmp);
|
||||
get_config!(inner.network.detect_address_changes);
|
||||
get_config!(inner.network.enable_local_peer_scope);
|
||||
get_config!(inner.network.restricted_nat_retries);
|
||||
get_config!(inner.network.tls.certificate_path);
|
||||
get_config!(inner.network.tls.private_key_path);
|
||||
|
@ -215,3 +215,54 @@ pub fn ip_to_ipblock(ip6_prefix_size: usize, addr: IpAddr) -> IpAddr {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn ipaddr_apply_netmask(addr: IpAddr, netmask: IpAddr) -> IpAddr {
|
||||
match addr {
|
||||
IpAddr::V4(v4) => {
|
||||
let v4mask = match netmask {
|
||||
IpAddr::V4(v4mask) => v4mask,
|
||||
IpAddr::V6(_) => {
|
||||
panic!("netmask doesn't match ipv4 address");
|
||||
}
|
||||
};
|
||||
let v4 = v4.octets();
|
||||
let v4mask = v4mask.octets();
|
||||
IpAddr::V4(Ipv4Addr::new(
|
||||
v4[0] & v4mask[0],
|
||||
v4[1] & v4mask[1],
|
||||
v4[2] & v4mask[2],
|
||||
v4[3] & v4mask[3],
|
||||
))
|
||||
}
|
||||
IpAddr::V6(v6) => {
|
||||
let v6mask = match netmask {
|
||||
IpAddr::V4(_) => {
|
||||
panic!("netmask doesn't match ipv6 address");
|
||||
}
|
||||
IpAddr::V6(v6mask) => v6mask,
|
||||
};
|
||||
let v6 = v6.segments();
|
||||
let v6mask = v6mask.segments();
|
||||
IpAddr::V6(Ipv6Addr::new(
|
||||
v6[0] & v6mask[0],
|
||||
v6[1] & v6mask[1],
|
||||
v6[2] & v6mask[2],
|
||||
v6[3] & v6mask[3],
|
||||
v6[4] & v6mask[4],
|
||||
v6[5] & v6mask[5],
|
||||
v6[6] & v6mask[6],
|
||||
v6[7] & v6mask[7],
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn ipaddr_in_network(addr: IpAddr, netaddr: IpAddr, netmask: IpAddr) -> bool {
|
||||
if addr.is_ipv4() && !netaddr.is_ipv4() {
|
||||
return false;
|
||||
}
|
||||
if addr.is_ipv6() && !netaddr.is_ipv6() {
|
||||
return false;
|
||||
}
|
||||
ipaddr_apply_netmask(netaddr, netmask) == ipaddr_apply_netmask(addr, netmask)
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
|
||||
package="com.veilid.veilid">
|
||||
package="com.veilid.veilid">
|
||||
|
||||
<uses-permission android:name="android.permission.INTERNET" />
|
||||
<uses-permission android:name="android.permission.ACCESS_NETWORK_STATE" />
|
||||
|
@ -56,6 +56,12 @@ android {
|
||||
// Signing with the debug keys for now, so `flutter run --release` works.
|
||||
signingConfig signingConfigs.debug
|
||||
}
|
||||
debug {
|
||||
packagingOptions {
|
||||
jniLibs.useLegacyPackaging = true
|
||||
jniLibs.keepDebugSymbols += '**/libveilid_flutter.so'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
|
||||
package="com.veilid.veilid_example">
|
||||
package="com.veilid.veilid_example">
|
||||
<application
|
||||
android:label="veilid_example"
|
||||
android:name="${applicationName}"
|
||||
|
@ -6,7 +6,7 @@ buildscript {
|
||||
}
|
||||
|
||||
dependencies {
|
||||
classpath 'com.android.tools.build:gradle:4.1.0'
|
||||
classpath 'com.android.tools.build:gradle:7.2.0'
|
||||
classpath "org.jetbrains.kotlin:kotlin-gradle-plugin:$kotlin_version"
|
||||
}
|
||||
}
|
||||
|
@ -3,4 +3,4 @@ distributionBase=GRADLE_USER_HOME
|
||||
distributionPath=wrapper/dists
|
||||
zipStoreBase=GRADLE_USER_HOME
|
||||
zipStorePath=wrapper/dists
|
||||
distributionUrl=https\://services.gradle.org/distributions/gradle-6.7-all.zip
|
||||
distributionUrl=https\://services.gradle.org/distributions/gradle-7.3.3-all.zip
|
||||
|
@ -85,7 +85,6 @@ Future<VeilidConfig> getDefaultVeilidConfig() async {
|
||||
upnp: true,
|
||||
natpmp: true,
|
||||
detectAddressChanges: true,
|
||||
enableLocalPeerScope: false,
|
||||
restrictedNatRetries: 0,
|
||||
tls: VeilidConfigTLS(
|
||||
certificatePath: "",
|
||||
|
@ -3,7 +3,6 @@ import 'dart:typed_data';
|
||||
import 'dart:convert';
|
||||
|
||||
import 'package:flutter/material.dart';
|
||||
import 'package:flutter/services.dart';
|
||||
import 'package:flutter/foundation.dart' show kIsWeb;
|
||||
import 'package:veilid/veilid.dart';
|
||||
import 'package:flutter_loggy/flutter_loggy.dart';
|
||||
@ -12,7 +11,7 @@ import 'package:loggy/loggy.dart';
|
||||
import 'config.dart';
|
||||
|
||||
// Loggy tools
|
||||
const LogLevel traceLevel = LogLevel('trace', 1);
|
||||
const LogLevel traceLevel = LogLevel('Trace', 1);
|
||||
|
||||
class ConsolePrinter extends LoggyPrinter {
|
||||
ConsolePrinter(this.childPrinter) : super();
|
||||
@ -162,21 +161,30 @@ class _MyAppState extends State<MyApp> with UiLoggy {
|
||||
}
|
||||
|
||||
Future<void> processUpdateLog(VeilidUpdateLog update) async {
|
||||
StackTrace? stackTrace;
|
||||
Object? error;
|
||||
final backtrace = update.backtrace;
|
||||
if (backtrace != null) {
|
||||
stackTrace =
|
||||
StackTrace.fromString("$backtrace\n${StackTrace.current.toString()}");
|
||||
error = 'embedded stack trace for ${update.logLevel} ${update.message}';
|
||||
}
|
||||
|
||||
switch (update.logLevel) {
|
||||
case VeilidLogLevel.error:
|
||||
loggy.error(update.message);
|
||||
loggy.error(update.message, error, stackTrace);
|
||||
break;
|
||||
case VeilidLogLevel.warn:
|
||||
loggy.warning(update.message);
|
||||
loggy.warning(update.message, error, stackTrace);
|
||||
break;
|
||||
case VeilidLogLevel.info:
|
||||
loggy.info(update.message);
|
||||
loggy.info(update.message, error, stackTrace);
|
||||
break;
|
||||
case VeilidLogLevel.debug:
|
||||
loggy.debug(update.message);
|
||||
loggy.debug(update.message, error, stackTrace);
|
||||
break;
|
||||
case VeilidLogLevel.trace:
|
||||
loggy.trace(update.message);
|
||||
loggy.trace(update.message, error, stackTrace);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -188,7 +196,7 @@ class _MyAppState extends State<MyApp> with UiLoggy {
|
||||
if (update is VeilidUpdateLog) {
|
||||
await processUpdateLog(update);
|
||||
} else {
|
||||
loggy.trace("Update: " + update.json.toString());
|
||||
loggy.trace("Update: ${update.json}");
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -218,12 +226,14 @@ class _MyAppState extends State<MyApp> with UiLoggy {
|
||||
onPressed: _updateStream != null
|
||||
? null
|
||||
: () async {
|
||||
var updateStream = Veilid.instance.startupVeilidCore(
|
||||
await getDefaultVeilidConfig());
|
||||
var updateStream = await Veilid.instance
|
||||
.startupVeilidCore(
|
||||
await getDefaultVeilidConfig());
|
||||
setState(() {
|
||||
_updateStream = updateStream;
|
||||
_updateProcessor = processUpdates();
|
||||
});
|
||||
await Veilid.instance.attach();
|
||||
},
|
||||
child: const Text('Startup'),
|
||||
),
|
||||
|
@ -19,7 +19,7 @@ EXTERNAL SOURCES:
|
||||
:path: Flutter/ephemeral/.symlinks/plugins/veilid/macos
|
||||
|
||||
SPEC CHECKSUMS:
|
||||
FlutterMacOS: 57701585bf7de1b3fc2bb61f6378d73bbdea8424
|
||||
FlutterMacOS: ae6af50a8ea7d6103d888583d46bd8328a7e9811
|
||||
path_provider_macos: 3c0c3b4b0d4a76d2bf989a913c2de869c5641a19
|
||||
veilid: 6bed3adec63fd8708a2ace498e0e17941c9fc32b
|
||||
|
||||
|
@ -7,7 +7,7 @@ packages:
|
||||
name: async
|
||||
url: "https://pub.dartlang.org"
|
||||
source: hosted
|
||||
version: "2.8.2"
|
||||
version: "2.9.0"
|
||||
boolean_selector:
|
||||
dependency: transitive
|
||||
description:
|
||||
@ -28,21 +28,14 @@ packages:
|
||||
name: characters
|
||||
url: "https://pub.dartlang.org"
|
||||
source: hosted
|
||||
version: "1.2.0"
|
||||
charcode:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: charcode
|
||||
url: "https://pub.dartlang.org"
|
||||
source: hosted
|
||||
version: "1.3.1"
|
||||
version: "1.2.1"
|
||||
clock:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: clock
|
||||
url: "https://pub.dartlang.org"
|
||||
source: hosted
|
||||
version: "1.1.0"
|
||||
version: "1.1.1"
|
||||
collection:
|
||||
dependency: transitive
|
||||
description:
|
||||
@ -63,21 +56,21 @@ packages:
|
||||
name: fake_async
|
||||
url: "https://pub.dartlang.org"
|
||||
source: hosted
|
||||
version: "1.3.0"
|
||||
version: "1.3.1"
|
||||
ffi:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: ffi
|
||||
url: "https://pub.dartlang.org"
|
||||
source: hosted
|
||||
version: "2.0.0"
|
||||
version: "2.0.1"
|
||||
file:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: file
|
||||
url: "https://pub.dartlang.org"
|
||||
source: hosted
|
||||
version: "6.1.2"
|
||||
version: "6.1.4"
|
||||
flutter:
|
||||
dependency: "direct main"
|
||||
description: flutter
|
||||
@ -134,30 +127,30 @@ packages:
|
||||
name: matcher
|
||||
url: "https://pub.dartlang.org"
|
||||
source: hosted
|
||||
version: "0.12.11"
|
||||
version: "0.12.12"
|
||||
material_color_utilities:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: material_color_utilities
|
||||
url: "https://pub.dartlang.org"
|
||||
source: hosted
|
||||
version: "0.1.4"
|
||||
version: "0.1.5"
|
||||
meta:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: meta
|
||||
url: "https://pub.dartlang.org"
|
||||
source: hosted
|
||||
version: "1.7.0"
|
||||
version: "1.8.0"
|
||||
path:
|
||||
dependency: transitive
|
||||
dependency: "direct main"
|
||||
description:
|
||||
name: path
|
||||
url: "https://pub.dartlang.org"
|
||||
source: hosted
|
||||
version: "1.8.1"
|
||||
version: "1.8.2"
|
||||
path_provider:
|
||||
dependency: transitive
|
||||
dependency: "direct main"
|
||||
description:
|
||||
name: path_provider
|
||||
url: "https://pub.dartlang.org"
|
||||
@ -169,14 +162,14 @@ packages:
|
||||
name: path_provider_android
|
||||
url: "https://pub.dartlang.org"
|
||||
source: hosted
|
||||
version: "2.0.14"
|
||||
version: "2.0.20"
|
||||
path_provider_ios:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: path_provider_ios
|
||||
url: "https://pub.dartlang.org"
|
||||
source: hosted
|
||||
version: "2.0.10"
|
||||
version: "2.0.11"
|
||||
path_provider_linux:
|
||||
dependency: transitive
|
||||
description:
|
||||
@ -204,7 +197,7 @@ packages:
|
||||
name: path_provider_windows
|
||||
url: "https://pub.dartlang.org"
|
||||
source: hosted
|
||||
version: "2.1.0"
|
||||
version: "2.1.3"
|
||||
platform:
|
||||
dependency: transitive
|
||||
description:
|
||||
@ -232,7 +225,7 @@ packages:
|
||||
name: rxdart
|
||||
url: "https://pub.dartlang.org"
|
||||
source: hosted
|
||||
version: "0.27.4"
|
||||
version: "0.27.5"
|
||||
sky_engine:
|
||||
dependency: transitive
|
||||
description: flutter
|
||||
@ -244,7 +237,7 @@ packages:
|
||||
name: source_span
|
||||
url: "https://pub.dartlang.org"
|
||||
source: hosted
|
||||
version: "1.8.2"
|
||||
version: "1.9.0"
|
||||
stack_trace:
|
||||
dependency: transitive
|
||||
description:
|
||||
@ -265,21 +258,21 @@ packages:
|
||||
name: string_scanner
|
||||
url: "https://pub.dartlang.org"
|
||||
source: hosted
|
||||
version: "1.1.0"
|
||||
version: "1.1.1"
|
||||
term_glyph:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: term_glyph
|
||||
url: "https://pub.dartlang.org"
|
||||
source: hosted
|
||||
version: "1.2.0"
|
||||
version: "1.2.1"
|
||||
test_api:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: test_api
|
||||
url: "https://pub.dartlang.org"
|
||||
source: hosted
|
||||
version: "0.4.9"
|
||||
version: "0.4.12"
|
||||
vector_math:
|
||||
dependency: transitive
|
||||
description:
|
||||
@ -300,14 +293,14 @@ packages:
|
||||
name: win32
|
||||
url: "https://pub.dartlang.org"
|
||||
source: hosted
|
||||
version: "2.7.0"
|
||||
version: "3.0.0"
|
||||
xdg_directories:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: xdg_directories
|
||||
url: "https://pub.dartlang.org"
|
||||
source: hosted
|
||||
version: "0.2.0+1"
|
||||
version: "0.2.0+2"
|
||||
sdks:
|
||||
dart: ">=2.17.0 <3.0.0"
|
||||
flutter: ">=3.0.0"
|
||||
|
@ -34,6 +34,8 @@ dependencies:
|
||||
cupertino_icons: ^1.0.2
|
||||
loggy: ^2.0.1+1
|
||||
flutter_loggy: ^2.0.1
|
||||
path_provider: ^2.0.11
|
||||
path: ^1.8.1
|
||||
|
||||
dev_dependencies:
|
||||
flutter_test:
|
||||
|
@ -741,7 +741,6 @@ class VeilidConfigNetwork {
|
||||
bool upnp;
|
||||
bool natpmp;
|
||||
bool detectAddressChanges;
|
||||
bool enableLocalPeerScope;
|
||||
int restrictedNatRetries;
|
||||
VeilidConfigTLS tls;
|
||||
VeilidConfigApplication application;
|
||||
@ -767,7 +766,6 @@ class VeilidConfigNetwork {
|
||||
required this.upnp,
|
||||
required this.natpmp,
|
||||
required this.detectAddressChanges,
|
||||
required this.enableLocalPeerScope,
|
||||
required this.restrictedNatRetries,
|
||||
required this.tls,
|
||||
required this.application,
|
||||
@ -795,7 +793,6 @@ class VeilidConfigNetwork {
|
||||
'upnp': upnp,
|
||||
'natpmp': natpmp,
|
||||
'detect_address_changes': detectAddressChanges,
|
||||
'enable_local_peer_scope': enableLocalPeerScope,
|
||||
'restricted_nat_retries': restrictedNatRetries,
|
||||
'tls': tls.json,
|
||||
'application': application.json,
|
||||
@ -826,7 +823,6 @@ class VeilidConfigNetwork {
|
||||
upnp = json['upnp'],
|
||||
natpmp = json['natpmp'],
|
||||
detectAddressChanges = json['detect_address_changes'],
|
||||
enableLocalPeerScope = json['enable_local_peer_scope'],
|
||||
restrictedNatRetries = json['restricted_nat_retries'],
|
||||
tls = VeilidConfigTLS.fromJson(json['tls']),
|
||||
application = VeilidConfigApplication.fromJson(json['application']),
|
||||
@ -991,6 +987,243 @@ class VeilidConfig {
|
||||
network = VeilidConfigNetwork.fromJson(json['network']);
|
||||
}
|
||||
|
||||
////////////
|
||||
|
||||
class LatencyStats {
|
||||
BigInt fastest;
|
||||
BigInt average;
|
||||
BigInt slowest;
|
||||
|
||||
LatencyStats({
|
||||
required this.fastest,
|
||||
required this.average,
|
||||
required this.slowest,
|
||||
});
|
||||
|
||||
Map<String, dynamic> get json {
|
||||
return {
|
||||
'fastest': fastest.toString(),
|
||||
'average': average.toString(),
|
||||
'slowest': slowest.toString(),
|
||||
};
|
||||
}
|
||||
|
||||
LatencyStats.fromJson(Map<String, dynamic> json)
|
||||
: fastest = BigInt.parse(json['fastest']),
|
||||
average = BigInt.parse(json['average']),
|
||||
slowest = BigInt.parse(json['slowest']);
|
||||
}
|
||||
|
||||
////////////
|
||||
|
||||
class TransferStats {
|
||||
BigInt total;
|
||||
BigInt maximum;
|
||||
BigInt average;
|
||||
BigInt minimum;
|
||||
|
||||
TransferStats({
|
||||
required this.total,
|
||||
required this.maximum,
|
||||
required this.average,
|
||||
required this.minimum,
|
||||
});
|
||||
|
||||
Map<String, dynamic> get json {
|
||||
return {
|
||||
'total': total.toString(),
|
||||
'maximum': maximum.toString(),
|
||||
'average': average.toString(),
|
||||
'minimum': minimum.toString(),
|
||||
};
|
||||
}
|
||||
|
||||
TransferStats.fromJson(Map<String, dynamic> json)
|
||||
: total = BigInt.parse(json['total']),
|
||||
maximum = BigInt.parse(json['maximum']),
|
||||
average = BigInt.parse(json['average']),
|
||||
minimum = BigInt.parse(json['minimum']);
|
||||
}
|
||||
|
||||
////////////
|
||||
|
||||
class TransferStatsDownUp {
|
||||
TransferStats down;
|
||||
TransferStats up;
|
||||
|
||||
TransferStatsDownUp({
|
||||
required this.down,
|
||||
required this.up,
|
||||
});
|
||||
|
||||
Map<String, dynamic> get json {
|
||||
return {
|
||||
'down': down.json,
|
||||
'up': up.json,
|
||||
};
|
||||
}
|
||||
|
||||
TransferStatsDownUp.fromJson(Map<String, dynamic> json)
|
||||
: down = TransferStats.fromJson(json['down']),
|
||||
up = TransferStats.fromJson(json['up']);
|
||||
}
|
||||
|
||||
////////////
|
||||
|
||||
class RPCStats {
|
||||
int messagesSent;
|
||||
int messagesRcvd;
|
||||
int questionsInFlight;
|
||||
BigInt? lastQuestion;
|
||||
BigInt? lastSeenTs;
|
||||
BigInt? firstConsecutiveSeenTs;
|
||||
int recentLostAnswers;
|
||||
int failedToSend;
|
||||
|
||||
RPCStats({
|
||||
required this.messagesSent,
|
||||
required this.messagesRcvd,
|
||||
required this.questionsInFlight,
|
||||
required this.lastQuestion,
|
||||
required this.lastSeenTs,
|
||||
required this.firstConsecutiveSeenTs,
|
||||
required this.recentLostAnswers,
|
||||
required this.failedToSend,
|
||||
});
|
||||
|
||||
Map<String, dynamic> get json {
|
||||
return {
|
||||
'messages_sent': messagesSent,
|
||||
'messages_rcvd': messagesRcvd,
|
||||
'questions_in_flight': questionsInFlight,
|
||||
'last_question': lastQuestion?.toString(),
|
||||
'last_seen_ts': lastSeenTs?.toString(),
|
||||
'first_consecutive_seen_ts': firstConsecutiveSeenTs?.toString(),
|
||||
'recent_lost_answers': recentLostAnswers,
|
||||
'failed_to_send': failedToSend,
|
||||
};
|
||||
}
|
||||
|
||||
RPCStats.fromJson(Map<String, dynamic> json)
|
||||
: messagesSent = json['messages_sent'],
|
||||
messagesRcvd = json['messages_rcvd'],
|
||||
questionsInFlight = json['questions_in_flight'],
|
||||
lastQuestion = json['last_question'] != null
|
||||
? BigInt.parse(json['last_question'])
|
||||
: null,
|
||||
lastSeenTs = json['last_seen_ts'] != null
|
||||
? BigInt.parse(json['last_seen_ts'])
|
||||
: null,
|
||||
firstConsecutiveSeenTs = json['first_consecutive_seen_ts'] != null
|
||||
? BigInt.parse(json['first_consecutive_seen_ts'])
|
||||
: null,
|
||||
recentLostAnswers = json['recent_lost_answers'],
|
||||
failedToSend = json['failed_to_send'];
|
||||
}
|
||||
|
||||
////////////
|
||||
|
||||
class PeerStats {
|
||||
BigInt timeAdded;
|
||||
RPCStats rpcStats;
|
||||
LatencyStats? latency;
|
||||
TransferStatsDownUp transfer;
|
||||
|
||||
PeerStats({
|
||||
required this.timeAdded,
|
||||
required this.rpcStats,
|
||||
required this.latency,
|
||||
required this.transfer,
|
||||
});
|
||||
|
||||
Map<String, dynamic> get json {
|
||||
return {
|
||||
'time_added': timeAdded.toString(),
|
||||
'rpc_stats': rpcStats.json,
|
||||
'latency': latency?.json,
|
||||
'transfer': transfer.json,
|
||||
};
|
||||
}
|
||||
|
||||
PeerStats.fromJson(Map<String, dynamic> json)
|
||||
: timeAdded = BigInt.parse(json['time_added']),
|
||||
rpcStats = RPCStats.fromJson(json['rpc_stats']),
|
||||
latency = json['latency'] != null
|
||||
? LatencyStats.fromJson(json['latency'])
|
||||
: null,
|
||||
transfer = TransferStatsDownUp.fromJson(json['transfer']);
|
||||
}
|
||||
|
||||
////////////
|
||||
|
||||
class PeerTableData {
|
||||
String nodeId;
|
||||
PeerAddress peerAddress;
|
||||
PeerStats peerStats;
|
||||
|
||||
PeerTableData({
|
||||
required this.nodeId,
|
||||
required this.peerAddress,
|
||||
required this.peerStats,
|
||||
});
|
||||
|
||||
Map<String, dynamic> get json {
|
||||
return {
|
||||
'node_id': nodeId,
|
||||
'peer_address': peerAddress.json,
|
||||
'peer_stats': peerStats.json,
|
||||
};
|
||||
}
|
||||
|
||||
PeerTableData.fromJson(Map<String, dynamic> json)
|
||||
: nodeId = json['node_id'],
|
||||
peerAddress = PeerAddress.fromJson(json['peer_address']),
|
||||
peerStats = PeerStats.fromJson(json['peer_stats']);
|
||||
}
|
||||
|
||||
//////////////////////////////////////
|
||||
/// AttachmentState
|
||||
|
||||
enum ProtocolType {
|
||||
udp,
|
||||
tcp,
|
||||
ws,
|
||||
wss,
|
||||
}
|
||||
|
||||
extension ProtocolTypeExt on ProtocolType {
|
||||
String get json {
|
||||
return name.toUpperCase();
|
||||
}
|
||||
}
|
||||
|
||||
ProtocolType protocolTypeFromJson(String j) {
|
||||
return ProtocolType.values.byName(j.toLowerCase());
|
||||
}
|
||||
|
||||
////////////
|
||||
|
||||
class PeerAddress {
|
||||
ProtocolType protocolType;
|
||||
String socketAddress;
|
||||
|
||||
PeerAddress({
|
||||
required this.protocolType,
|
||||
required this.socketAddress,
|
||||
});
|
||||
|
||||
Map<String, dynamic> get json {
|
||||
return {
|
||||
'protocol_type': protocolType.json,
|
||||
'socket_address': socketAddress,
|
||||
};
|
||||
}
|
||||
|
||||
PeerAddress.fromJson(Map<String, dynamic> json)
|
||||
: protocolType = protocolTypeFromJson(json['protocol_type']),
|
||||
socketAddress = json['socket_address'];
|
||||
}
|
||||
|
||||
//////////////////////////////////////
|
||||
/// VeilidUpdate
|
||||
|
||||
@ -1000,16 +1233,18 @@ abstract class VeilidUpdate {
|
||||
case "Log":
|
||||
{
|
||||
return VeilidUpdateLog(
|
||||
veilidLogLevelFromJson(json["log_level"]), json["message"]);
|
||||
logLevel: veilidLogLevelFromJson(json["log_level"]),
|
||||
message: json["message"],
|
||||
backtrace: json["backtrace"]);
|
||||
}
|
||||
case "Attachment":
|
||||
{
|
||||
return VeilidUpdateAttachment(attachmentStateFromJson(json["state"]));
|
||||
return VeilidUpdateAttachment(
|
||||
state: VeilidStateAttachment.fromJson(json));
|
||||
}
|
||||
case "Network":
|
||||
{
|
||||
return VeilidUpdateNetwork(
|
||||
json["started"], json["bps_up"], json["bps_down"]);
|
||||
return VeilidUpdateNetwork(state: VeilidStateNetwork.fromJson(json));
|
||||
}
|
||||
default:
|
||||
{
|
||||
@ -1024,8 +1259,13 @@ abstract class VeilidUpdate {
|
||||
class VeilidUpdateLog implements VeilidUpdate {
|
||||
final VeilidLogLevel logLevel;
|
||||
final String message;
|
||||
final String? backtrace;
|
||||
//
|
||||
VeilidUpdateLog(this.logLevel, this.message);
|
||||
VeilidUpdateLog({
|
||||
required this.logLevel,
|
||||
required this.message,
|
||||
required this.backtrace,
|
||||
});
|
||||
|
||||
@override
|
||||
Map<String, dynamic> get json {
|
||||
@ -1033,39 +1273,34 @@ class VeilidUpdateLog implements VeilidUpdate {
|
||||
'kind': "Log",
|
||||
'log_level': logLevel.json,
|
||||
'message': message,
|
||||
'backtrace': backtrace
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
class VeilidUpdateAttachment implements VeilidUpdate {
|
||||
final AttachmentState state;
|
||||
final VeilidStateAttachment state;
|
||||
//
|
||||
VeilidUpdateAttachment(this.state);
|
||||
VeilidUpdateAttachment({required this.state});
|
||||
|
||||
@override
|
||||
Map<String, dynamic> get json {
|
||||
return {
|
||||
'kind': "Attachment",
|
||||
'state': state.json,
|
||||
};
|
||||
var jsonRep = state.json;
|
||||
jsonRep['kind'] = "Attachment";
|
||||
return jsonRep;
|
||||
}
|
||||
}
|
||||
|
||||
class VeilidUpdateNetwork implements VeilidUpdate {
|
||||
final bool started;
|
||||
final int bpsDown;
|
||||
final int bpsUp;
|
||||
final VeilidStateNetwork state;
|
||||
//
|
||||
VeilidUpdateNetwork(this.started, this.bpsDown, this.bpsUp);
|
||||
VeilidUpdateNetwork({required this.state});
|
||||
|
||||
@override
|
||||
Map<String, dynamic> get json {
|
||||
return {
|
||||
'kind': "Network",
|
||||
'started': started,
|
||||
'bps_down': bpsDown,
|
||||
'bps_up': bpsUp
|
||||
};
|
||||
var jsonRep = state.json;
|
||||
jsonRep['kind'] = "Network";
|
||||
return jsonRep;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1079,6 +1314,12 @@ class VeilidStateAttachment {
|
||||
|
||||
VeilidStateAttachment.fromJson(Map<String, dynamic> json)
|
||||
: state = attachmentStateFromJson(json['state']);
|
||||
|
||||
Map<String, dynamic> get json {
|
||||
return {
|
||||
'state': state.json,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////
|
||||
@ -1086,11 +1327,31 @@ class VeilidStateAttachment {
|
||||
|
||||
class VeilidStateNetwork {
|
||||
final bool started;
|
||||
final BigInt bpsDown;
|
||||
final BigInt bpsUp;
|
||||
final List<PeerTableData> peers;
|
||||
|
||||
VeilidStateNetwork(this.started);
|
||||
VeilidStateNetwork(
|
||||
{required this.started,
|
||||
required this.bpsDown,
|
||||
required this.bpsUp,
|
||||
required this.peers});
|
||||
|
||||
VeilidStateNetwork.fromJson(Map<String, dynamic> json)
|
||||
: started = json['started'];
|
||||
: started = json['started'],
|
||||
bpsDown = BigInt.parse(json['bps_down']),
|
||||
bpsUp = BigInt.parse(json['bps_up']),
|
||||
peers = List<PeerTableData>.from(
|
||||
json['peers'].map((j) => PeerTableData.fromJson(j)));
|
||||
|
||||
Map<String, dynamic> get json {
|
||||
return {
|
||||
'started': started,
|
||||
'bps_down': bpsDown.toString(),
|
||||
'bps_up': bpsUp.toString(),
|
||||
'peers': peers.map((p) => p.json).toList(),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////
|
||||
@ -1100,11 +1361,13 @@ class VeilidState {
|
||||
final VeilidStateAttachment attachment;
|
||||
final VeilidStateNetwork network;
|
||||
|
||||
VeilidState(this.attachment, this.network);
|
||||
|
||||
VeilidState.fromJson(Map<String, dynamic> json)
|
||||
: attachment = VeilidStateAttachment.fromJson(json['attachment']),
|
||||
network = VeilidStateNetwork.fromJson(json['network']);
|
||||
|
||||
Map<String, dynamic> get json {
|
||||
return {'attachment': attachment.json, 'network': network.json};
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////
|
||||
@ -1303,8 +1566,10 @@ abstract class Veilid {
|
||||
|
||||
void initializeVeilidCore(Map<String, dynamic> platformConfigJson);
|
||||
void changeLogLevel(String layer, VeilidConfigLogLevel logLevel);
|
||||
Stream<VeilidUpdate> startupVeilidCore(VeilidConfig config);
|
||||
Future<Stream<VeilidUpdate>> startupVeilidCore(VeilidConfig config);
|
||||
Future<VeilidState> getVeilidState();
|
||||
Future<void> attach();
|
||||
Future<void> detach();
|
||||
Future<void> shutdownVeilidCore();
|
||||
Future<String> debug(String command);
|
||||
String veilidVersionString();
|
||||
|
@ -36,11 +36,17 @@ typedef _InitializeVeilidCoreDart = void Function(Pointer<Utf8>);
|
||||
typedef _ChangeLogLevelC = Void Function(Pointer<Utf8>, Pointer<Utf8>);
|
||||
typedef _ChangeLogLevelDart = void Function(Pointer<Utf8>, Pointer<Utf8>);
|
||||
// fn startup_veilid_core(port: i64, config: FfiStr)
|
||||
typedef _StartupVeilidCoreC = Void Function(Int64, Pointer<Utf8>);
|
||||
typedef _StartupVeilidCoreDart = void Function(int, Pointer<Utf8>);
|
||||
typedef _StartupVeilidCoreC = Void Function(Int64, Int64, Pointer<Utf8>);
|
||||
typedef _StartupVeilidCoreDart = void Function(int, int, Pointer<Utf8>);
|
||||
// fn get_veilid_state(port: i64)
|
||||
typedef _GetVeilidStateC = Void Function(Int64);
|
||||
typedef _GetVeilidStateDart = void Function(int);
|
||||
// fn attach(port: i64)
|
||||
typedef _AttachC = Void Function(Int64);
|
||||
typedef _AttachDart = void Function(int);
|
||||
// fn detach(port: i64)
|
||||
typedef _DetachC = Void Function(Int64);
|
||||
typedef _DetachDart = void Function(int);
|
||||
// fn debug(port: i64, log_level: FfiStr)
|
||||
typedef _DebugC = Void Function(Int64, Pointer<Utf8>);
|
||||
typedef _DebugDart = void Function(int, Pointer<Utf8>);
|
||||
@ -193,6 +199,51 @@ Future<void> processFutureVoid(Future<dynamic> future) {
|
||||
});
|
||||
}
|
||||
|
||||
Future<Stream<T>> processFutureStream<T>(
|
||||
Stream<T> returnStream, Future<dynamic> future) {
|
||||
return future.then((value) {
|
||||
final list = value as List<dynamic>;
|
||||
switch (list[0] as int) {
|
||||
case messageOk:
|
||||
{
|
||||
if (list[1] != null) {
|
||||
throw VeilidAPIExceptionInternal(
|
||||
"Unexpected MESSAGE_OK value '${list[1]}' where null expected");
|
||||
}
|
||||
return returnStream;
|
||||
}
|
||||
case messageErr:
|
||||
{
|
||||
throw VeilidAPIExceptionInternal("Internal API Error: ${list[1]}");
|
||||
}
|
||||
case messageOkJson:
|
||||
{
|
||||
var ret = jsonDecode(list[1] as String);
|
||||
if (ret != null) {
|
||||
throw VeilidAPIExceptionInternal(
|
||||
"Unexpected MESSAGE_OK_JSON value '$ret' where null expected");
|
||||
}
|
||||
return returnStream;
|
||||
}
|
||||
case messageErrJson:
|
||||
{
|
||||
throw VeilidAPIException.fromJson(jsonDecode(list[1] as String));
|
||||
}
|
||||
default:
|
||||
{
|
||||
throw VeilidAPIExceptionInternal(
|
||||
"Unexpected async return message type: ${list[0]}");
|
||||
}
|
||||
}
|
||||
}).catchError((e) {
|
||||
// Wrap all other errors in VeilidAPIExceptionInternal
|
||||
throw VeilidAPIExceptionInternal(e.toString());
|
||||
}, test: (e) {
|
||||
// Pass errors that are already VeilidAPIException through without wrapping
|
||||
return e is! VeilidAPIException;
|
||||
});
|
||||
}
|
||||
|
||||
Stream<T> processStreamJson<T>(
|
||||
T Function(Map<String, dynamic>) jsonConstructor, ReceivePort port) async* {
|
||||
try {
|
||||
@ -249,6 +300,8 @@ class VeilidFFI implements Veilid {
|
||||
final _ChangeLogLevelDart _changeLogLevel;
|
||||
final _StartupVeilidCoreDart _startupVeilidCore;
|
||||
final _GetVeilidStateDart _getVeilidState;
|
||||
final _AttachDart _attach;
|
||||
final _DetachDart _detach;
|
||||
final _ShutdownVeilidCoreDart _shutdownVeilidCore;
|
||||
final _DebugDart _debug;
|
||||
final _VeilidVersionStringDart _veilidVersionString;
|
||||
@ -269,6 +322,8 @@ class VeilidFFI implements Veilid {
|
||||
_getVeilidState =
|
||||
dylib.lookupFunction<_GetVeilidStateC, _GetVeilidStateDart>(
|
||||
'get_veilid_state'),
|
||||
_attach = dylib.lookupFunction<_AttachC, _AttachDart>('attach'),
|
||||
_detach = dylib.lookupFunction<_DetachC, _DetachDart>('detach'),
|
||||
_shutdownVeilidCore =
|
||||
dylib.lookupFunction<_ShutdownVeilidCoreC, _ShutdownVeilidCoreDart>(
|
||||
'shutdown_veilid_core'),
|
||||
@ -308,15 +363,20 @@ class VeilidFFI implements Veilid {
|
||||
}
|
||||
|
||||
@override
|
||||
Stream<VeilidUpdate> startupVeilidCore(VeilidConfig config) {
|
||||
Future<Stream<VeilidUpdate>> startupVeilidCore(VeilidConfig config) {
|
||||
var nativeConfig =
|
||||
jsonEncode(config.json, toEncodable: veilidApiToEncodable)
|
||||
.toNativeUtf8();
|
||||
final recvStreamPort = ReceivePort("veilid_api_stream");
|
||||
final sendStreamPort = recvStreamPort.sendPort;
|
||||
final recvPort = ReceivePort("startup_veilid_core");
|
||||
final sendPort = recvPort.sendPort;
|
||||
_startupVeilidCore(sendPort.nativePort, nativeConfig);
|
||||
_startupVeilidCore(
|
||||
sendPort.nativePort, sendStreamPort.nativePort, nativeConfig);
|
||||
malloc.free(nativeConfig);
|
||||
return processStreamJson(VeilidUpdate.fromJson, recvPort);
|
||||
return processFutureStream(
|
||||
processStreamJson(VeilidUpdate.fromJson, recvStreamPort),
|
||||
recvPort.first);
|
||||
}
|
||||
|
||||
@override
|
||||
@ -327,6 +387,22 @@ class VeilidFFI implements Veilid {
|
||||
return processFutureJson(VeilidState.fromJson, recvPort.first);
|
||||
}
|
||||
|
||||
@override
|
||||
Future<void> attach() async {
|
||||
final recvPort = ReceivePort("attach");
|
||||
final sendPort = recvPort.sendPort;
|
||||
_attach(sendPort.nativePort);
|
||||
return processFutureVoid(recvPort.first);
|
||||
}
|
||||
|
||||
@override
|
||||
Future<void> detach() async {
|
||||
final recvPort = ReceivePort("detach");
|
||||
final sendPort = recvPort.sendPort;
|
||||
_detach(sendPort.nativePort);
|
||||
return processFutureVoid(recvPort.first);
|
||||
}
|
||||
|
||||
@override
|
||||
Future<void> shutdownVeilidCore() async {
|
||||
final recvPort = ReceivePort("shutdown_veilid_core");
|
||||
|
@ -35,7 +35,7 @@ class VeilidJS implements Veilid {
|
||||
}
|
||||
|
||||
@override
|
||||
Stream<VeilidUpdate> startupVeilidCore(VeilidConfig config) async* {
|
||||
Future<Stream<VeilidUpdate>> startupVeilidCore(VeilidConfig config) async {
|
||||
var streamController = StreamController<VeilidUpdate>();
|
||||
updateCallback(String update) {
|
||||
var updateJson = jsonDecode(update);
|
||||
@ -51,7 +51,8 @@ class VeilidJS implements Veilid {
|
||||
js.allowInterop(updateCallback),
|
||||
jsonEncode(config.json, toEncodable: veilidApiToEncodable)
|
||||
]));
|
||||
yield* streamController.stream;
|
||||
|
||||
return streamController.stream;
|
||||
}
|
||||
|
||||
@override
|
||||
@ -60,6 +61,16 @@ class VeilidJS implements Veilid {
|
||||
js_util.callMethod(wasm, "get_veilid_state", []))));
|
||||
}
|
||||
|
||||
@override
|
||||
Future<void> attach() async {
|
||||
return _wrapApiPromise(js_util.callMethod(wasm, "attach", []));
|
||||
}
|
||||
|
||||
@override
|
||||
Future<void> detach() async {
|
||||
return _wrapApiPromise(js_util.callMethod(wasm, "detach", []));
|
||||
}
|
||||
|
||||
@override
|
||||
Future<void> shutdownVeilidCore() {
|
||||
return _wrapApiPromise(
|
||||
|
@ -48,19 +48,6 @@ define_string_destructor!(free_string);
|
||||
type APIResult<T> = Result<T, veilid_core::VeilidAPIError>;
|
||||
const APIRESULT_VOID: APIResult<()> = APIResult::Ok(());
|
||||
|
||||
// Stream abort macro for simplified error handling
|
||||
macro_rules! check_err_json {
|
||||
($stream:expr, $ex:expr) => {
|
||||
match $ex {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
$stream.abort_json(e);
|
||||
return;
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/////////////////////////////////////////
|
||||
// FFI-specific cofnig
|
||||
|
||||
@ -253,25 +240,24 @@ pub extern "C" fn change_log_level(layer: FfiStr, log_level: FfiStr) {
|
||||
|
||||
#[no_mangle]
|
||||
#[instrument]
|
||||
pub extern "C" fn startup_veilid_core(port: i64, config: FfiStr) {
|
||||
pub extern "C" fn startup_veilid_core(port: i64, stream_port: i64, config: FfiStr) {
|
||||
let config = config.into_opt_string();
|
||||
let stream = DartIsolateStream::new(port);
|
||||
spawn(async move {
|
||||
let stream = DartIsolateStream::new(stream_port);
|
||||
DartIsolateWrapper::new(port).spawn_result_json(async move {
|
||||
let config_json = match config {
|
||||
Some(v) => v,
|
||||
None => {
|
||||
stream.abort_json(veilid_core::VeilidAPIError::MissingArgument {
|
||||
let err = veilid_core::VeilidAPIError::MissingArgument {
|
||||
context: "startup_veilid_core".to_owned(),
|
||||
argument: "config".to_owned(),
|
||||
});
|
||||
return;
|
||||
};
|
||||
return APIResult::Err(err);
|
||||
}
|
||||
};
|
||||
|
||||
let mut api_lock = VEILID_API.lock().await;
|
||||
if api_lock.is_some() {
|
||||
stream.abort_json(veilid_core::VeilidAPIError::AlreadyInitialized);
|
||||
return;
|
||||
return APIResult::Err(veilid_core::VeilidAPIError::AlreadyInitialized);
|
||||
}
|
||||
|
||||
let sink = stream.clone();
|
||||
@ -287,9 +273,10 @@ pub extern "C" fn startup_veilid_core(port: i64, config: FfiStr) {
|
||||
}
|
||||
});
|
||||
|
||||
let res = veilid_core::api_startup_json(update_callback, config_json).await;
|
||||
let veilid_api = check_err_json!(stream, res);
|
||||
let veilid_api = veilid_core::api_startup_json(update_callback, config_json).await?;
|
||||
*api_lock = Some(veilid_api);
|
||||
|
||||
APIRESULT_VOID
|
||||
});
|
||||
}
|
||||
|
||||
@ -302,6 +289,24 @@ pub extern "C" fn get_veilid_state(port: i64) {
|
||||
});
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn attach(port: i64) {
|
||||
DartIsolateWrapper::new(port).spawn_result_json(async move {
|
||||
let veilid_api = get_veilid_api().await?;
|
||||
veilid_api.attach().await?;
|
||||
APIRESULT_VOID
|
||||
});
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn detach(port: i64) {
|
||||
DartIsolateWrapper::new(port).spawn_result_json(async move {
|
||||
let veilid_api = get_veilid_api().await?;
|
||||
veilid_api.detach().await?;
|
||||
APIRESULT_VOID
|
||||
});
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
#[instrument]
|
||||
pub extern "C" fn shutdown_veilid_core(port: i64) {
|
||||
|
@ -20,7 +20,7 @@ const MESSAGE_ERR_JSON: i32 = 3;
|
||||
//const MESSAGE_STREAM_ITEM: i32 = 4;
|
||||
const MESSAGE_STREAM_ITEM_JSON: i32 = 5;
|
||||
//const MESSAGE_STREAM_ABORT: i32 = 6;
|
||||
const MESSAGE_STREAM_ABORT_JSON: i32 = 7;
|
||||
//const MESSAGE_STREAM_ABORT_JSON: i32 = 7;
|
||||
const MESSAGE_STREAM_CLOSE: i32 = 8;
|
||||
|
||||
impl DartIsolateWrapper {
|
||||
@ -148,17 +148,17 @@ impl DartIsolateStream {
|
||||
// }
|
||||
// }
|
||||
|
||||
pub fn abort_json<E: Serialize + Debug>(self, error: E) -> bool {
|
||||
let mut inner = self.inner.lock();
|
||||
if let Some(isolate) = inner.isolate.take() {
|
||||
isolate.post(vec![
|
||||
MESSAGE_STREAM_ABORT_JSON.into_dart(),
|
||||
veilid_core::serialize_json(error).into_dart(),
|
||||
])
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
// pub fn abort_json<E: Serialize + Debug>(self, error: E) -> bool {
|
||||
// let mut inner = self.inner.lock();
|
||||
// if let Some(isolate) = inner.isolate.take() {
|
||||
// isolate.post(vec![
|
||||
// MESSAGE_STREAM_ABORT_JSON.into_dart(),
|
||||
// veilid_core::serialize_json(error).into_dart(),
|
||||
// ])
|
||||
// } else {
|
||||
// false
|
||||
// }
|
||||
// }
|
||||
|
||||
pub fn close(self) -> bool {
|
||||
let mut inner = self.inner.lock();
|
||||
|
@ -130,11 +130,7 @@ fn do_clap_matches(default_config_path: &OsStr) -> Result<clap::ArgMatches, clap
|
||||
.value_name("BOOTSTRAP_NODE_LIST")
|
||||
.help("Specify a list of bootstrap node dialinfos to use"),
|
||||
)
|
||||
.arg(
|
||||
Arg::new("local")
|
||||
.long("local")
|
||||
.help("Enable local peer scope")
|
||||
);
|
||||
;
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
let matches = matches.arg(
|
||||
@ -218,9 +214,6 @@ pub fn process_command_line() -> EyreResult<(Settings, ArgMatches)> {
|
||||
if matches.is_present("attach") {
|
||||
settingsrw.auto_attach = !matches!(matches.value_of("attach"), Some("true"));
|
||||
}
|
||||
if matches.is_present("local") {
|
||||
settingsrw.core.network.enable_local_peer_scope = true;
|
||||
}
|
||||
if matches.occurrences_of("delete-protected-store") != 0 {
|
||||
settingsrw.core.protected_store.delete = true;
|
||||
}
|
||||
|
@ -99,7 +99,6 @@ core:
|
||||
upnp: true
|
||||
natpmp: false
|
||||
detect_address_changes: true
|
||||
enable_local_peer_scope: false
|
||||
restricted_nat_retries: 0
|
||||
tls:
|
||||
certificate_path: '%CERTIFICATE_PATH%'
|
||||
@ -589,7 +588,6 @@ pub struct Network {
|
||||
pub upnp: bool,
|
||||
pub natpmp: bool,
|
||||
pub detect_address_changes: bool,
|
||||
pub enable_local_peer_scope: bool,
|
||||
pub restricted_nat_retries: u32,
|
||||
pub tls: Tls,
|
||||
pub application: Application,
|
||||
@ -986,7 +984,6 @@ impl Settings {
|
||||
set_config_value!(inner.core.network.upnp, value);
|
||||
set_config_value!(inner.core.network.natpmp, value);
|
||||
set_config_value!(inner.core.network.detect_address_changes, value);
|
||||
set_config_value!(inner.core.network.enable_local_peer_scope, value);
|
||||
set_config_value!(inner.core.network.restricted_nat_retries, value);
|
||||
set_config_value!(inner.core.network.tls.certificate_path, value);
|
||||
set_config_value!(inner.core.network.tls.private_key_path, value);
|
||||
@ -1187,9 +1184,6 @@ impl Settings {
|
||||
"network.detect_address_changes" => {
|
||||
Ok(Box::new(inner.core.network.detect_address_changes))
|
||||
}
|
||||
"network.enable_local_peer_scope" => {
|
||||
Ok(Box::new(inner.core.network.enable_local_peer_scope))
|
||||
}
|
||||
"network.restricted_nat_retries" => {
|
||||
Ok(Box::new(inner.core.network.restricted_nat_retries))
|
||||
}
|
||||
@ -1513,7 +1507,6 @@ mod tests {
|
||||
assert_eq!(s.core.network.upnp, true);
|
||||
assert_eq!(s.core.network.natpmp, false);
|
||||
assert_eq!(s.core.network.detect_address_changes, true);
|
||||
assert_eq!(s.core.network.enable_local_peer_scope, false);
|
||||
assert_eq!(s.core.network.restricted_nat_retries, 0u32);
|
||||
//
|
||||
assert_eq!(
|
||||
|
@ -232,6 +232,24 @@ pub fn get_veilid_state() -> Promise {
|
||||
})
|
||||
}
|
||||
|
||||
#[wasm_bindgen()]
|
||||
pub fn attach() -> Promise {
|
||||
wrap_api_future(async move {
|
||||
let veilid_api = get_veilid_api()?;
|
||||
veilid_api.attach().await?;
|
||||
APIRESULT_UNDEFINED
|
||||
})
|
||||
}
|
||||
|
||||
#[wasm_bindgen()]
|
||||
pub fn detach() -> Promise {
|
||||
wrap_api_future(async move {
|
||||
let veilid_api = get_veilid_api()?;
|
||||
veilid_api.detach().await?;
|
||||
APIRESULT_UNDEFINED
|
||||
})
|
||||
}
|
||||
|
||||
#[wasm_bindgen()]
|
||||
pub fn shutdown_veilid_core() -> Promise {
|
||||
wrap_api_future(async move {
|
||||
|
Loading…
x
Reference in New Issue
Block a user