mirror of
https://gitlab.com/veilid/veilid.git
synced 2024-10-01 01:26:08 -04:00
Merge branch 'dev' into 'main'
Latest from dev: Private Routing See merge request veilid/veilid!12
This commit is contained in:
commit
2f3485e9b7
@ -1,3 +1,6 @@
|
||||
[build]
|
||||
rustflags = ["--cfg", "tokio_unstable"]
|
||||
|
||||
[target.aarch64-unknown-linux-gnu]
|
||||
linker = "aarch64-linux-gnu-gcc"
|
||||
|
||||
|
1544
Cargo.lock
generated
1544
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
42
Earthfile
42
Earthfile
@ -7,7 +7,7 @@ FROM --platform amd64 ubuntu:16.04
|
||||
# Install build prerequisites
|
||||
deps-base:
|
||||
RUN apt-get -y update
|
||||
RUN apt-get install -y iproute2 curl build-essential cmake libssl-dev openssl file git pkg-config libdbus-1-dev libdbus-glib-1-dev libgirepository1.0-dev libcairo2-dev
|
||||
RUN apt-get install -y iproute2 curl build-essential cmake libssl-dev openssl file git pkg-config libdbus-1-dev libdbus-glib-1-dev libgirepository1.0-dev libcairo2-dev checkinstall unzip
|
||||
|
||||
# Install Cap'n Proto
|
||||
deps-capnp:
|
||||
@ -15,9 +15,15 @@ deps-capnp:
|
||||
COPY scripts/earthly/install_capnproto.sh /
|
||||
RUN /bin/bash /install_capnproto.sh; rm /install_capnproto.sh
|
||||
|
||||
# Install protoc
|
||||
deps-protoc:
|
||||
FROM +deps-capnp
|
||||
COPY scripts/earthly/install_protoc.sh /
|
||||
RUN /bin/bash /install_protoc.sh; rm /install_protoc.sh
|
||||
|
||||
# Install Rust
|
||||
deps-rust:
|
||||
FROM +deps-capnp
|
||||
FROM +deps-protoc
|
||||
ENV RUSTUP_HOME=/usr/local/rustup
|
||||
ENV CARGO_HOME=/usr/local/cargo
|
||||
ENV PATH=/usr/local/cargo/bin:$PATH
|
||||
@ -49,35 +55,43 @@ deps-android:
|
||||
RUN curl -o /Android/cmdline-tools.zip https://dl.google.com/android/repository/commandlinetools-linux-7583922_latest.zip
|
||||
RUN cd /Android; unzip /Android/cmdline-tools.zip
|
||||
RUN yes | /Android/cmdline-tools/bin/sdkmanager --sdk_root=/Android/Sdk build-tools\;30.0.3 ndk\;22.0.7026061 cmake\;3.18.1 platform-tools platforms\;android-30
|
||||
|
||||
# Clean up the apt cache to save space
|
||||
deps:
|
||||
FROM +deps-android
|
||||
RUN apt-get clean
|
||||
|
||||
code:
|
||||
FROM +deps
|
||||
# Just linux build not android
|
||||
deps-linux:
|
||||
FROM +deps-cross
|
||||
RUN apt-get clean
|
||||
|
||||
# Code + Linux deps
|
||||
code-linux:
|
||||
FROM +deps-linux
|
||||
COPY --dir .cargo external files scripts veilid-cli veilid-core veilid-server veilid-flutter veilid-wasm Cargo.lock Cargo.toml /veilid
|
||||
WORKDIR /veilid
|
||||
|
||||
# Code + Linux + Android deps
|
||||
code-android:
|
||||
FROM +deps-android
|
||||
COPY --dir .cargo external files scripts veilid-cli veilid-core veilid-server veilid-flutter veilid-wasm Cargo.lock Cargo.toml /veilid
|
||||
WORKDIR /veilid
|
||||
|
||||
# Clippy only
|
||||
clippy:
|
||||
FROM +code
|
||||
FROM +code-linux
|
||||
RUN cargo clippy
|
||||
|
||||
# Build
|
||||
build-linux-amd64:
|
||||
FROM +code
|
||||
FROM +code-linux
|
||||
RUN cargo build --target x86_64-unknown-linux-gnu --release
|
||||
SAVE ARTIFACT ./target/x86_64-unknown-linux-gnu AS LOCAL ./target/artifacts/x86_64-unknown-linux-gnu
|
||||
|
||||
build-linux-arm64:
|
||||
FROM +code
|
||||
FROM +code-linux
|
||||
RUN cargo build --target aarch64-unknown-linux-gnu --release
|
||||
SAVE ARTIFACT ./target/aarch64-unknown-linux-gnu AS LOCAL ./target/artifacts/aarch64-unknown-linux-gnu
|
||||
|
||||
build-android:
|
||||
FROM +code
|
||||
FROM +code-android
|
||||
WORKDIR /veilid/veilid-core
|
||||
ENV PATH=$PATH:/Android/Sdk/ndk/22.0.7026061/toolchains/llvm/prebuilt/linux-x86_64/bin/
|
||||
RUN cargo build --target aarch64-linux-android --release
|
||||
@ -92,11 +106,11 @@ build-android:
|
||||
|
||||
# Unit tests
|
||||
unit-tests-linux-amd64:
|
||||
FROM +code
|
||||
FROM +code-linux
|
||||
RUN cargo test --target x86_64-unknown-linux-gnu --release
|
||||
|
||||
unit-tests-linux-arm64:
|
||||
FROM +code
|
||||
FROM +code-linux
|
||||
RUN cargo test --target aarch64-unknown-linux-gnu --release
|
||||
|
||||
# Package
|
||||
|
@ -63,7 +63,9 @@ core:
|
||||
max_timestamp_behind_ms: 10000
|
||||
max_timestamp_ahead_ms: 10000
|
||||
timeout_ms: 10000
|
||||
max_route_hop_count: 7
|
||||
max_route_hop_count: 4
|
||||
default_route_hop_count: 1
|
||||
|
||||
dht:
|
||||
resolve_node_timeout:
|
||||
resolve_node_count: 20
|
||||
|
@ -228,7 +228,8 @@ rpc:
|
||||
max_timestamp_behind_ms: 10000
|
||||
max_timestamp_ahead_ms: 10000
|
||||
timeout_ms: 10000
|
||||
max_route_hop_count: 7
|
||||
max_route_hop_count: 4
|
||||
default_route_hop_count: 1
|
||||
```
|
||||
|
||||
#### core:network:dht
|
||||
|
2
external/cursive
vendored
2
external/cursive
vendored
@ -1 +1 @@
|
||||
Subproject commit fea04c2f9bb8c4c9551ca6eb4f2cb1268551120f
|
||||
Subproject commit f1504cf37a7021454020cda5cfba815755399794
|
2
external/cursive-flexi-logger-view
vendored
2
external/cursive-flexi-logger-view
vendored
@ -1 +1 @@
|
||||
Subproject commit fd560c499be0f34305e0d48aca7f1bc3d015a17f
|
||||
Subproject commit effa60cea24e99f294865ed325ffc57612d72785
|
2
external/hashlink
vendored
2
external/hashlink
vendored
@ -1 +1 @@
|
||||
Subproject commit c8da3a58485c850f4029a58de99b1af83112ba8a
|
||||
Subproject commit a089b448071ef36633947693b90023c67dc8485f
|
@ -1,11 +1,28 @@
|
||||
#!/bin/bash
|
||||
mkdir /tmp/capnproto-install
|
||||
cd /tmp/capnproto-install
|
||||
curl -O https://capnproto.org/capnproto-c++-0.9.1.tar.gz
|
||||
tar zxf capnproto-c++-0.9.1.tar.gz
|
||||
cd capnproto-c++-0.9.1
|
||||
./configure
|
||||
pushd /tmp/capnproto-install
|
||||
curl -O https://capnproto.org/capnproto-c++-0.10.2.tar.gz
|
||||
tar zxf capnproto-c++-0.10.2.tar.gz
|
||||
cd capnproto-c++-0.10.2
|
||||
./configure --without-openssl
|
||||
make -j6 check
|
||||
if [ "$EUID" -ne 0 ]; then
|
||||
if command -v checkinstall &> /dev/null; then
|
||||
sudo checkinstall -y
|
||||
cp *.deb ~
|
||||
else
|
||||
sudo make install
|
||||
|
||||
fi
|
||||
popd
|
||||
sudo rm -rf /tmp/capnproto-install
|
||||
else
|
||||
if command -v checkinstall &> /dev/null; then
|
||||
checkinstall -y
|
||||
cp *.deb ~
|
||||
else
|
||||
make install
|
||||
cd /
|
||||
fi
|
||||
popd
|
||||
rm -rf /tmp/capnproto-install
|
||||
fi
|
||||
|
26
scripts/earthly/install_protoc.sh
Executable file
26
scripts/earthly/install_protoc.sh
Executable file
@ -0,0 +1,26 @@
|
||||
#!/bin/bash
|
||||
VERSION=21.9
|
||||
|
||||
mkdir /tmp/protoc-install
|
||||
pushd /tmp/protoc-install
|
||||
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v$VERSION/protoc-$VERSION-linux-x86_64.zip
|
||||
unzip protoc-$VERSION-linux-x86_64.zip
|
||||
if [ "$EUID" -ne 0 ]; then
|
||||
if command -v checkinstall &> /dev/null; then
|
||||
sudo checkinstall --pkgversion=$VERSION -y cp -r bin include /usr/local/
|
||||
cp *.deb ~
|
||||
else
|
||||
sudo make install
|
||||
fi
|
||||
popd
|
||||
sudo rm -rf /tmp/protoc-install
|
||||
else
|
||||
if command -v checkinstall &> /dev/null; then
|
||||
checkinstall --pkgversion=$VERSION -y cp -r bin include /usr/local/
|
||||
cp *.deb ~
|
||||
else
|
||||
make install
|
||||
fi
|
||||
popd
|
||||
rm -rf /tmp/protoc-install
|
||||
fi
|
@ -74,7 +74,12 @@ fi
|
||||
rustup target add aarch64-linux-android armv7-linux-androideabi i686-linux-android x86_64-linux-android wasm32-unknown-unknown
|
||||
|
||||
# install cargo packages
|
||||
cargo install wasm-bindgen-cli
|
||||
cargo install wasm-bindgen-cli wasm-pack
|
||||
|
||||
# Ensure packages are installed
|
||||
sudo apt-get install libc6-dev-i386 libc6:i386 libncurses5:i386 libstdc++6:i386 lib32z1 libbz2-1.0:i386 openjdk-11-jdk llvm wabt capnproto
|
||||
sudo apt-get install libc6-dev-i386 libc6:i386 libncurses5:i386 libstdc++6:i386 lib32z1 libbz2-1.0:i386 openjdk-11-jdk llvm wabt checkinstall
|
||||
|
||||
# Install capnproto using the same mechanism as our earthly build
|
||||
$SCRIPTDIR/scripts/earthly/install_capnproto.sh
|
||||
# Install protoc using the same mechanism as our earthly build
|
||||
$SCRIPTDIR/scripts/earthly/install_protoc.sh
|
||||
|
@ -90,13 +90,7 @@ fi
|
||||
rustup target add aarch64-apple-darwin aarch64-apple-ios x86_64-apple-darwin x86_64-apple-ios wasm32-unknown-unknown aarch64-linux-android armv7-linux-androideabi i686-linux-android x86_64-linux-android
|
||||
|
||||
# install cargo packages
|
||||
cargo install wasm-bindgen-cli
|
||||
|
||||
# install bitcode compatible ios toolchain
|
||||
# echo Manual Step:
|
||||
# echo install +ios-arm64-1.57.0 toolchain for bitcode from https://github.com/getditto/rust-bitcode/releases/latest and unzip
|
||||
# echo xattr -d -r com.apple.quarantine .
|
||||
# echo ./install.sh
|
||||
cargo install wasm-bindgen-cli wasm-pack
|
||||
|
||||
# ensure we have command line tools
|
||||
xcode-select --install
|
||||
@ -114,5 +108,5 @@ if [ "$BREW_USER" == "" ]; then
|
||||
BREW_USER=`whoami`
|
||||
fi
|
||||
fi
|
||||
sudo -H -u $BREW_USER brew install capnp cmake wabt llvm
|
||||
sudo -H -u $BREW_USER brew install capnp cmake wabt llvm protobuf
|
||||
|
||||
|
@ -42,7 +42,9 @@ bugsalot = "^0"
|
||||
flexi_logger = { version = "^0", features = ["use_chrono_for_offset"] }
|
||||
thiserror = "^1"
|
||||
crossbeam-channel = "^0"
|
||||
hex = "^0"
|
||||
veilid-core = { path = "../veilid-core", default_features = false }
|
||||
json = "^0"
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "^0"
|
||||
|
@ -3,6 +3,7 @@ use crate::tools::*;
|
||||
use crate::veilid_client_capnp::*;
|
||||
use capnp::capability::Promise;
|
||||
use capnp_rpc::{pry, rpc_twoparty_capnp, twoparty, Disconnector, RpcSystem};
|
||||
use futures::future::FutureExt;
|
||||
use serde::de::DeserializeOwned;
|
||||
use std::cell::RefCell;
|
||||
use std::net::SocketAddr;
|
||||
@ -76,12 +77,21 @@ impl veilid_client::Server for VeilidClientImpl {
|
||||
VeilidUpdate::Log(log) => {
|
||||
self.comproc.update_log(log);
|
||||
}
|
||||
VeilidUpdate::AppMessage(msg) => {
|
||||
self.comproc.update_app_message(msg);
|
||||
}
|
||||
VeilidUpdate::AppCall(call) => {
|
||||
self.comproc.update_app_call(call);
|
||||
}
|
||||
VeilidUpdate::Attachment(attachment) => {
|
||||
self.comproc.update_attachment(attachment);
|
||||
}
|
||||
VeilidUpdate::Network(network) => {
|
||||
self.comproc.update_network_status(network);
|
||||
}
|
||||
VeilidUpdate::Config(config) => {
|
||||
self.comproc.update_config(config);
|
||||
}
|
||||
VeilidUpdate::Shutdown => self.comproc.update_shutdown(),
|
||||
}
|
||||
|
||||
@ -94,7 +104,9 @@ struct ClientApiConnectionInner {
|
||||
connect_addr: Option<SocketAddr>,
|
||||
disconnector: Option<Disconnector<rpc_twoparty_capnp::Side>>,
|
||||
server: Option<Rc<RefCell<veilid_server::Client>>>,
|
||||
server_settings: Option<String>,
|
||||
disconnect_requested: bool,
|
||||
cancel_eventual: Eventual,
|
||||
}
|
||||
|
||||
type Handle<T> = Rc<RefCell<T>>;
|
||||
@ -112,10 +124,21 @@ impl ClientApiConnection {
|
||||
connect_addr: None,
|
||||
disconnector: None,
|
||||
server: None,
|
||||
server_settings: None,
|
||||
disconnect_requested: false,
|
||||
cancel_eventual: Eventual::new(),
|
||||
})),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn cancel(&self) {
|
||||
let eventual = {
|
||||
let inner = self.inner.borrow();
|
||||
inner.cancel_eventual.clone()
|
||||
};
|
||||
eventual.resolve(); // don't need to await this
|
||||
}
|
||||
|
||||
async fn process_veilid_state<'a>(
|
||||
&'a mut self,
|
||||
veilid_state: VeilidState,
|
||||
@ -123,7 +146,7 @@ impl ClientApiConnection {
|
||||
let mut inner = self.inner.borrow_mut();
|
||||
inner.comproc.update_attachment(veilid_state.attachment);
|
||||
inner.comproc.update_network_status(veilid_state.network);
|
||||
|
||||
inner.comproc.update_config(veilid_state.config);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -191,6 +214,13 @@ impl ClientApiConnection {
|
||||
.map_err(|e| format!("failed to get deserialize veilid state: {}", e))?;
|
||||
self.process_veilid_state(veilid_state).await?;
|
||||
|
||||
// Save server settings
|
||||
let server_settings = response
|
||||
.get_settings()
|
||||
.map_err(|e| format!("failed to get initial veilid server settings: {}", e))?
|
||||
.to_owned();
|
||||
self.inner.borrow_mut().server_settings = Some(server_settings.clone());
|
||||
|
||||
// Don't drop the registration, doing so will remove the client
|
||||
// object mapping from the server which we need for the update backchannel
|
||||
|
||||
@ -201,9 +231,10 @@ impl ClientApiConnection {
|
||||
res.map_err(|e| format!("client RPC system error: {}", e))
|
||||
}
|
||||
|
||||
async fn handle_connection(&mut self) -> Result<(), String> {
|
||||
async fn handle_connection(&mut self, connect_addr: SocketAddr) -> Result<(), String> {
|
||||
trace!("ClientApiConnection::handle_connection");
|
||||
let connect_addr = self.inner.borrow().connect_addr.unwrap();
|
||||
|
||||
self.inner.borrow_mut().connect_addr = Some(connect_addr);
|
||||
// Connect the TCP socket
|
||||
let stream = TcpStream::connect(connect_addr)
|
||||
.await
|
||||
@ -245,9 +276,11 @@ impl ClientApiConnection {
|
||||
// Drop the server and disconnector too (if we still have it)
|
||||
let mut inner = self.inner.borrow_mut();
|
||||
let disconnect_requested = inner.disconnect_requested;
|
||||
inner.server_settings = None;
|
||||
inner.server = None;
|
||||
inner.disconnector = None;
|
||||
inner.disconnect_requested = false;
|
||||
inner.connect_addr = None;
|
||||
|
||||
if !disconnect_requested {
|
||||
// Connection lost
|
||||
@ -258,6 +291,34 @@ impl ClientApiConnection {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn cancellable<T>(&mut self, p: Promise<T, capnp::Error>) -> Promise<T, capnp::Error>
|
||||
where
|
||||
T: 'static,
|
||||
{
|
||||
let (mut cancel_instance, cancel_eventual) = {
|
||||
let inner = self.inner.borrow();
|
||||
(
|
||||
inner.cancel_eventual.instance_empty().fuse(),
|
||||
inner.cancel_eventual.clone(),
|
||||
)
|
||||
};
|
||||
let mut p = p.fuse();
|
||||
|
||||
Promise::from_future(async move {
|
||||
let out = select! {
|
||||
a = p => {
|
||||
a
|
||||
},
|
||||
_ = cancel_instance => {
|
||||
Err(capnp::Error::failed("cancelled".into()))
|
||||
}
|
||||
};
|
||||
drop(cancel_instance);
|
||||
cancel_eventual.reset();
|
||||
out
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn server_attach(&mut self) -> Result<(), String> {
|
||||
trace!("ClientApiConnection::server_attach");
|
||||
let server = {
|
||||
@ -269,7 +330,10 @@ impl ClientApiConnection {
|
||||
.clone()
|
||||
};
|
||||
let request = server.borrow().attach_request();
|
||||
let response = request.send().promise.await.map_err(map_to_string)?;
|
||||
let response = self
|
||||
.cancellable(request.send().promise)
|
||||
.await
|
||||
.map_err(map_to_string)?;
|
||||
let reader = response
|
||||
.get()
|
||||
.map_err(map_to_string)?
|
||||
@ -290,7 +354,10 @@ impl ClientApiConnection {
|
||||
.clone()
|
||||
};
|
||||
let request = server.borrow().detach_request();
|
||||
let response = request.send().promise.await.map_err(map_to_string)?;
|
||||
let response = self
|
||||
.cancellable(request.send().promise)
|
||||
.await
|
||||
.map_err(map_to_string)?;
|
||||
let reader = response
|
||||
.get()
|
||||
.map_err(map_to_string)?
|
||||
@ -311,7 +378,10 @@ impl ClientApiConnection {
|
||||
.clone()
|
||||
};
|
||||
let request = server.borrow().shutdown_request();
|
||||
let response = request.send().promise.await.map_err(map_to_string)?;
|
||||
let response = self
|
||||
.cancellable(request.send().promise)
|
||||
.await
|
||||
.map_err(map_to_string)?;
|
||||
response.get().map(drop).map_err(map_to_string)
|
||||
}
|
||||
|
||||
@ -327,7 +397,10 @@ impl ClientApiConnection {
|
||||
};
|
||||
let mut request = server.borrow().debug_request();
|
||||
request.get().set_command(&what);
|
||||
let response = request.send().promise.await.map_err(map_to_string)?;
|
||||
let response = self
|
||||
.cancellable(request.send().promise)
|
||||
.await
|
||||
.map_err(map_to_string)?;
|
||||
let reader = response
|
||||
.get()
|
||||
.map_err(map_to_string)?
|
||||
@ -355,7 +428,36 @@ impl ClientApiConnection {
|
||||
request.get().set_layer(&layer);
|
||||
let log_level_json = veilid_core::serialize_json(&log_level);
|
||||
request.get().set_log_level(&log_level_json);
|
||||
let response = request.send().promise.await.map_err(map_to_string)?;
|
||||
let response = self
|
||||
.cancellable(request.send().promise)
|
||||
.await
|
||||
.map_err(map_to_string)?;
|
||||
let reader = response
|
||||
.get()
|
||||
.map_err(map_to_string)?
|
||||
.get_result()
|
||||
.map_err(map_to_string)?;
|
||||
let res: Result<(), VeilidAPIError> = decode_api_result(&reader);
|
||||
res.map_err(map_to_string)
|
||||
}
|
||||
|
||||
pub async fn server_appcall_reply(&mut self, id: u64, msg: Vec<u8>) -> Result<(), String> {
|
||||
trace!("ClientApiConnection::appcall_reply");
|
||||
let server = {
|
||||
let inner = self.inner.borrow();
|
||||
inner
|
||||
.server
|
||||
.as_ref()
|
||||
.ok_or_else(|| "Not connected, ignoring change_log_level request".to_owned())?
|
||||
.clone()
|
||||
};
|
||||
let mut request = server.borrow().app_call_reply_request();
|
||||
request.get().set_id(id);
|
||||
request.get().set_message(&msg);
|
||||
let response = self
|
||||
.cancellable(request.send().promise)
|
||||
.await
|
||||
.map_err(map_to_string)?;
|
||||
let reader = response
|
||||
.get()
|
||||
.map_err(map_to_string)?
|
||||
@ -369,9 +471,7 @@ impl ClientApiConnection {
|
||||
pub async fn connect(&mut self, connect_addr: SocketAddr) -> Result<(), String> {
|
||||
trace!("ClientApiConnection::connect");
|
||||
// Save the address to connect to
|
||||
self.inner.borrow_mut().connect_addr = Some(connect_addr);
|
||||
|
||||
self.handle_connection().await
|
||||
self.handle_connection(connect_addr).await
|
||||
}
|
||||
|
||||
// End Client API connection
|
||||
@ -382,7 +482,6 @@ impl ClientApiConnection {
|
||||
Some(d) => {
|
||||
self.inner.borrow_mut().disconnect_requested = true;
|
||||
d.await.unwrap();
|
||||
self.inner.borrow_mut().connect_addr = None;
|
||||
}
|
||||
None => {
|
||||
debug!("disconnector doesn't exist");
|
||||
|
@ -49,6 +49,7 @@ struct CommandProcessorInner {
|
||||
autoreconnect: bool,
|
||||
server_addr: Option<SocketAddr>,
|
||||
connection_waker: Eventual,
|
||||
last_call_id: Option<u64>,
|
||||
}
|
||||
|
||||
type Handle<T> = Rc<RefCell<T>>;
|
||||
@ -70,6 +71,7 @@ impl CommandProcessor {
|
||||
autoreconnect: settings.autoreconnect,
|
||||
server_addr: None,
|
||||
connection_waker: Eventual::new(),
|
||||
last_call_id: None,
|
||||
})),
|
||||
}
|
||||
}
|
||||
@ -100,6 +102,12 @@ impl CommandProcessor {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn cancel_command(&self) {
|
||||
trace!("CommandProcessor::cancel_command");
|
||||
let capi = self.capi();
|
||||
capi.cancel();
|
||||
}
|
||||
|
||||
pub fn cmd_help(&self, _rest: Option<String>, callback: UICallback) -> Result<(), String> {
|
||||
trace!("CommandProcessor::cmd_help");
|
||||
self.ui().add_node_event(
|
||||
@ -111,6 +119,7 @@ attach - attach the server to the Veilid network
|
||||
detach - detach the server from the Veilid network
|
||||
debug - send a debugging command to the Veilid server
|
||||
change_log_level - change the log level for a tracing layer
|
||||
reply - reply to an AppCall not handled directly by the server
|
||||
"#
|
||||
.to_owned(),
|
||||
);
|
||||
@ -225,6 +234,66 @@ change_log_level - change the log level for a tracing layer
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn cmd_reply(&self, rest: Option<String>, callback: UICallback) -> Result<(), String> {
|
||||
trace!("CommandProcessor::cmd_reply");
|
||||
|
||||
let mut capi = self.capi();
|
||||
let ui = self.ui();
|
||||
let some_last_id = self.inner_mut().last_call_id.take();
|
||||
spawn_detached_local(async move {
|
||||
let (first, second) = Self::word_split(&rest.clone().unwrap_or_default());
|
||||
let (id, msg) = if let Some(second) = second {
|
||||
let id = match u64::from_str(&first) {
|
||||
Err(e) => {
|
||||
ui.add_node_event(format!("invalid appcall id: {}", e));
|
||||
ui.send_callback(callback);
|
||||
return;
|
||||
}
|
||||
Ok(v) => v,
|
||||
};
|
||||
(id, second)
|
||||
} else {
|
||||
let id = match some_last_id {
|
||||
None => {
|
||||
ui.add_node_event("must specify last call id".to_owned());
|
||||
ui.send_callback(callback);
|
||||
return;
|
||||
}
|
||||
Some(v) => v,
|
||||
};
|
||||
(id, rest.unwrap_or_default())
|
||||
};
|
||||
let msg = if msg[0..1] == "#".to_owned() {
|
||||
match hex::decode(msg[1..].as_bytes().to_vec()) {
|
||||
Err(e) => {
|
||||
ui.add_node_event(format!("invalid hex message: {}", e));
|
||||
ui.send_callback(callback);
|
||||
return;
|
||||
}
|
||||
Ok(v) => v,
|
||||
}
|
||||
} else {
|
||||
msg[1..].as_bytes().to_vec()
|
||||
};
|
||||
let msglen = msg.len();
|
||||
match capi.server_appcall_reply(id, msg).await {
|
||||
Ok(()) => {
|
||||
ui.add_node_event(format!("reply sent to {} : {} bytes", id, msglen));
|
||||
ui.send_callback(callback);
|
||||
return;
|
||||
}
|
||||
Err(e) => {
|
||||
ui.display_string_dialog(
|
||||
"Server command 'appcall_reply' failed",
|
||||
e.to_string(),
|
||||
callback,
|
||||
);
|
||||
}
|
||||
}
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn run_command(&self, command_line: &str, callback: UICallback) -> Result<(), String> {
|
||||
//
|
||||
let (cmd, rest) = Self::word_split(command_line);
|
||||
@ -238,6 +307,7 @@ change_log_level - change the log level for a tracing layer
|
||||
"detach" => self.cmd_detach(callback),
|
||||
"debug" => self.cmd_debug(rest, callback),
|
||||
"change_log_level" => self.cmd_change_log_level(rest, callback),
|
||||
"reply" => self.cmd_reply(rest, callback),
|
||||
_ => {
|
||||
let ui = self.ui();
|
||||
ui.send_callback(callback);
|
||||
@ -318,6 +388,7 @@ change_log_level - change the log level for a tracing layer
|
||||
// called by client_api_connection
|
||||
// calls into ui
|
||||
////////////////////////////////////////////
|
||||
|
||||
pub fn update_attachment(&mut self, attachment: veilid_core::VeilidStateAttachment) {
|
||||
self.inner_mut().ui.set_attachment_state(attachment.state);
|
||||
}
|
||||
@ -330,8 +401,11 @@ change_log_level - change the log level for a tracing layer
|
||||
network.peers,
|
||||
);
|
||||
}
|
||||
pub fn update_config(&mut self, config: veilid_core::VeilidStateConfig) {
|
||||
self.inner_mut().ui.set_config(config.config)
|
||||
}
|
||||
|
||||
pub fn update_log(&mut self, log: veilid_core::VeilidStateLog) {
|
||||
pub fn update_log(&mut self, log: veilid_core::VeilidLog) {
|
||||
self.inner().ui.add_node_event(format!(
|
||||
"{}: {}{}",
|
||||
log.log_level,
|
||||
@ -344,6 +418,49 @@ change_log_level - change the log level for a tracing layer
|
||||
));
|
||||
}
|
||||
|
||||
pub fn update_app_message(&mut self, msg: veilid_core::VeilidAppMessage) {
|
||||
// check is message body is ascii printable
|
||||
let mut printable = true;
|
||||
for c in &msg.message {
|
||||
if *c < 32 || *c > 126 {
|
||||
printable = false;
|
||||
}
|
||||
}
|
||||
|
||||
let strmsg = if printable {
|
||||
String::from_utf8_lossy(&msg.message).to_string()
|
||||
} else {
|
||||
hex::encode(&msg.message)
|
||||
};
|
||||
|
||||
self.inner()
|
||||
.ui
|
||||
.add_node_event(format!("AppMessage ({:?}): {}", msg.sender, strmsg));
|
||||
}
|
||||
|
||||
pub fn update_app_call(&mut self, call: veilid_core::VeilidAppCall) {
|
||||
// check is message body is ascii printable
|
||||
let mut printable = true;
|
||||
for c in &call.message {
|
||||
if *c < 32 || *c > 126 {
|
||||
printable = false;
|
||||
}
|
||||
}
|
||||
|
||||
let strmsg = if printable {
|
||||
String::from_utf8_lossy(&call.message).to_string()
|
||||
} else {
|
||||
format!("#{}", hex::encode(&call.message))
|
||||
};
|
||||
|
||||
self.inner().ui.add_node_event(format!(
|
||||
"AppCall ({:?}) id = {:016x} : {}",
|
||||
call.sender, call.id, strmsg
|
||||
));
|
||||
|
||||
self.inner_mut().last_call_id = Some(call.id);
|
||||
}
|
||||
|
||||
pub fn update_shutdown(&mut self) {
|
||||
// Do nothing with this, we'll process shutdown when rpc connection closes
|
||||
}
|
||||
@ -381,7 +498,6 @@ change_log_level - change the log level for a tracing layer
|
||||
// calls into client_api_connection
|
||||
////////////////////////////////////////////
|
||||
pub fn attach(&mut self) {
|
||||
trace!("CommandProcessor::attach");
|
||||
let mut capi = self.capi();
|
||||
|
||||
spawn_detached_local(async move {
|
||||
@ -392,7 +508,6 @@ change_log_level - change the log level for a tracing layer
|
||||
}
|
||||
|
||||
pub fn detach(&mut self) {
|
||||
trace!("CommandProcessor::detach");
|
||||
let mut capi = self.capi();
|
||||
|
||||
spawn_detached_local(async move {
|
||||
|
@ -55,6 +55,7 @@ struct UIState {
|
||||
network_down_up: Dirty<(f32, f32)>,
|
||||
connection_state: Dirty<ConnectionState>,
|
||||
peers_state: Dirty<Vec<PeerTableData>>,
|
||||
node_id: Dirty<String>,
|
||||
}
|
||||
|
||||
impl UIState {
|
||||
@ -65,6 +66,7 @@ impl UIState {
|
||||
network_down_up: Dirty::new((0.0, 0.0)),
|
||||
connection_state: Dirty::new(ConnectionState::Disconnected),
|
||||
peers_state: Dirty::new(Vec::new()),
|
||||
node_id: Dirty::new("".to_owned()),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -213,7 +215,13 @@ impl UI {
|
||||
UI::setup_quit_handler(s);
|
||||
});
|
||||
}
|
||||
|
||||
fn clear_handler(siv: &mut Cursive) {
|
||||
cursive_flexi_logger_view::clear_log();
|
||||
UI::update_cb(siv);
|
||||
}
|
||||
fn node_events_panel(s: &mut Cursive) -> ViewRef<Panel<ScrollView<FlexiLoggerView>>> {
|
||||
s.find_name("node-events-panel").unwrap()
|
||||
}
|
||||
fn command_line(s: &mut Cursive) -> ViewRef<EditView> {
|
||||
s.find_name("command-line").unwrap()
|
||||
}
|
||||
@ -306,11 +314,18 @@ impl UI {
|
||||
fn run_command(s: &mut Cursive, text: &str) -> Result<(), String> {
|
||||
// disable ui
|
||||
Self::enable_command_ui(s, false);
|
||||
|
||||
// run command
|
||||
s.set_global_callback(cursive::event::Event::Key(Key::Esc), |s| {
|
||||
let cmdproc = Self::command_processor(s);
|
||||
cmdproc.cancel_command();
|
||||
});
|
||||
|
||||
let cmdproc = Self::command_processor(s);
|
||||
cmdproc.run_command(
|
||||
text,
|
||||
Box::new(|s| {
|
||||
s.set_global_callback(cursive::event::Event::Key(Key::Esc), UI::quit_handler);
|
||||
Self::enable_command_ui(s, true);
|
||||
}),
|
||||
)
|
||||
@ -565,6 +580,12 @@ impl UI {
|
||||
}
|
||||
}
|
||||
|
||||
fn refresh_main_titlebar(s: &mut Cursive) {
|
||||
let mut main_window = UI::node_events_panel(s);
|
||||
let inner = Self::inner_mut(s);
|
||||
main_window.set_title(format!("Node: {}", inner.ui_state.node_id.get()));
|
||||
}
|
||||
|
||||
fn refresh_statusbar(s: &mut Cursive) {
|
||||
let mut statusbar = UI::status_bar(s);
|
||||
|
||||
@ -627,6 +648,7 @@ impl UI {
|
||||
let mut refresh_button_attach = false;
|
||||
let mut refresh_connection_dialog = false;
|
||||
let mut refresh_peers = false;
|
||||
let mut refresh_main_titlebar = false;
|
||||
if inner.ui_state.attachment_state.take_dirty() {
|
||||
refresh_statusbar = true;
|
||||
refresh_button_attach = true;
|
||||
@ -647,6 +669,9 @@ impl UI {
|
||||
if inner.ui_state.peers_state.take_dirty() {
|
||||
refresh_peers = true;
|
||||
}
|
||||
if inner.ui_state.node_id.take_dirty() {
|
||||
refresh_main_titlebar = true;
|
||||
}
|
||||
|
||||
drop(inner);
|
||||
|
||||
@ -662,6 +687,9 @@ impl UI {
|
||||
if refresh_peers {
|
||||
Self::refresh_peers(s);
|
||||
}
|
||||
if refresh_main_titlebar {
|
||||
Self::refresh_main_titlebar(s);
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
@ -709,13 +737,11 @@ impl UI {
|
||||
|
||||
// Create layouts
|
||||
|
||||
let node_events_view = Panel::new(
|
||||
FlexiLoggerView::new_scrollable()
|
||||
.with_name("node-events")
|
||||
.full_screen(),
|
||||
)
|
||||
let node_events_view = Panel::new(FlexiLoggerView::new_scrollable())
|
||||
.title_position(HAlign::Left)
|
||||
.title("Node Events");
|
||||
.title("Node Events")
|
||||
.with_name("node-events-panel")
|
||||
.full_screen();
|
||||
|
||||
let peers_table_view = PeersTableView::new()
|
||||
.column(PeerTableColumn::NodeId, "Node Id", |c| c.width(43))
|
||||
@ -794,6 +820,7 @@ impl UI {
|
||||
|
||||
UI::setup_colors(&mut siv, &mut inner, settings);
|
||||
UI::setup_quit_handler(&mut siv);
|
||||
siv.set_global_callback(cursive::event::Event::CtrlChar('k'), UI::clear_handler);
|
||||
|
||||
drop(inner);
|
||||
drop(siv);
|
||||
@ -832,6 +859,16 @@ impl UI {
|
||||
inner.ui_state.peers_state.set(peers);
|
||||
let _ = inner.cb_sink.send(Box::new(UI::update_cb));
|
||||
}
|
||||
pub fn set_config(&mut self, config: VeilidConfigInner) {
|
||||
let mut inner = self.inner.borrow_mut();
|
||||
inner.ui_state.node_id.set(
|
||||
config
|
||||
.network
|
||||
.node_id
|
||||
.map(|x| x.encode())
|
||||
.unwrap_or("<unknown>".to_owned()),
|
||||
);
|
||||
}
|
||||
pub fn set_connection_state(&mut self, state: ConnectionState) {
|
||||
let mut inner = self.inner.borrow_mut();
|
||||
inner.ui_state.connection_state.set(state);
|
||||
|
@ -58,7 +58,11 @@ digest = "0.9.0"
|
||||
rtnetlink = { version = "^0", default-features = false, optional = true }
|
||||
async-std-resolver = { version = "^0", optional = true }
|
||||
trust-dns-resolver = { version = "^0", optional = true }
|
||||
|
||||
keyvaluedb = { path = "../external/keyvaluedb/keyvaluedb" }
|
||||
serde_bytes = { version = "^0" }
|
||||
#rkyv = { version = "^0", default_features = false, features = ["std", "alloc", "strict", "size_64", "validation"] }
|
||||
rkyv = { git = "https://github.com/crioux/rkyv.git", branch = "issue_326", default_features = false, features = ["std", "alloc", "strict", "size_64", "validation"] }
|
||||
bytecheck = "^0"
|
||||
|
||||
# Dependencies for native builds only
|
||||
# Linux, Windows, Mac, iOS, Android
|
||||
@ -72,7 +76,6 @@ async-tungstenite = { version = "^0", features = ["async-tls"] }
|
||||
maplit = "^1"
|
||||
config = { version = "^0", features = ["yaml"] }
|
||||
keyring-manager = { path = "../external/keyring-manager" }
|
||||
lru = "^0"
|
||||
async-tls = "^0.11"
|
||||
igd = { path = "../external/rust-igd" }
|
||||
webpki = "^0"
|
||||
@ -83,7 +86,6 @@ futures-util = { version = "^0", default-features = false, features = ["async-aw
|
||||
keyvaluedb-sqlite = { path = "../external/keyvaluedb/keyvaluedb-sqlite" }
|
||||
data-encoding = { version = "^2" }
|
||||
serde = { version = "^1", features = ["derive" ] }
|
||||
serde_cbor = { version = "^0" }
|
||||
serde_json = { version = "^1" }
|
||||
socket2 = "^0"
|
||||
bugsalot = "^0"
|
||||
@ -96,13 +98,10 @@ nix = "^0"
|
||||
wasm-bindgen = "^0"
|
||||
js-sys = "^0"
|
||||
wasm-bindgen-futures = "^0"
|
||||
hashbrown = "^0"
|
||||
lru = {version = "^0", features = ["hashbrown"] }
|
||||
no-std-net = { path = "../external/no-std-net", features = ["serde"] }
|
||||
keyvaluedb-web = { path = "../external/keyvaluedb/keyvaluedb-web" }
|
||||
data-encoding = { version = "^2", default_features = false, features = ["alloc"] }
|
||||
serde = { version = "^1", default-features = false, features = ["derive", "alloc"] }
|
||||
serde_cbor = { version = "^0", default-features = false, features = ["alloc"] }
|
||||
serde_json = { version = "^1", default-features = false, features = ["alloc"] }
|
||||
getrandom = { version = "^0", features = ["js"] }
|
||||
ws_stream_wasm = "^0"
|
||||
|
@ -3,14 +3,14 @@
|
||||
# IDs And Hashes
|
||||
##############################
|
||||
|
||||
struct Curve25519PublicKey {
|
||||
struct Key256 @0xdde44e3286f6a90d {
|
||||
u0 @0 :UInt64;
|
||||
u1 @1 :UInt64;
|
||||
u2 @2 :UInt64;
|
||||
u3 @3 :UInt64;
|
||||
}
|
||||
|
||||
struct Ed25519Signature {
|
||||
struct Signature512 @0x806749043a129c12 {
|
||||
u0 @0 :UInt64;
|
||||
u1 @1 :UInt64;
|
||||
u2 @2 :UInt64;
|
||||
@ -21,79 +21,72 @@ struct Ed25519Signature {
|
||||
u7 @7 :UInt64;
|
||||
}
|
||||
|
||||
struct XChaCha20Poly1305Nonce {
|
||||
struct Nonce24 @0xb6260db25d8d7dfc {
|
||||
u0 @0 :UInt64;
|
||||
u1 @1 :UInt64;
|
||||
u2 @2 :UInt64;
|
||||
}
|
||||
|
||||
struct BLAKE3Hash {
|
||||
u0 @0 :UInt64;
|
||||
u1 @1 :UInt64;
|
||||
u2 @2 :UInt64;
|
||||
u3 @3 :UInt64;
|
||||
}
|
||||
|
||||
using NodeID = Curve25519PublicKey;
|
||||
using RoutePublicKey = Curve25519PublicKey;
|
||||
using ValueID = Curve25519PublicKey;
|
||||
using Nonce = XChaCha20Poly1305Nonce;
|
||||
using Signature = Ed25519Signature;
|
||||
using BlockID = BLAKE3Hash;
|
||||
using NodeID = Key256;
|
||||
using RoutePublicKey = Key256;
|
||||
using ValueID = Key256;
|
||||
using Nonce = Nonce24;
|
||||
using Signature = Signature512;
|
||||
using BlockID = Key256;
|
||||
using TunnelID = UInt64;
|
||||
|
||||
# Node Dial Info
|
||||
################################################################
|
||||
|
||||
struct AddressIPV4 {
|
||||
struct AddressIPV4 @0xdb8769881266a6a0 {
|
||||
addr @0 :UInt32; # Address in big endian format
|
||||
}
|
||||
|
||||
struct AddressIPV6 {
|
||||
struct AddressIPV6 @0xb35d6e6011dc5c20 {
|
||||
addr0 @0 :UInt32; # \
|
||||
addr1 @1 :UInt32; # \ Address in big
|
||||
addr2 @2 :UInt32; # / endian format
|
||||
addr3 @3 :UInt32; # /
|
||||
}
|
||||
|
||||
struct Address {
|
||||
struct Address @0x812706e9e57d108b {
|
||||
union {
|
||||
ipv4 @0 :AddressIPV4;
|
||||
ipv6 @1 :AddressIPV6;
|
||||
}
|
||||
}
|
||||
|
||||
struct SocketAddress {
|
||||
struct SocketAddress @0x82df4272f4dd3a62 {
|
||||
address @0 :Address;
|
||||
port @1 :UInt16;
|
||||
}
|
||||
|
||||
enum ProtocolKind {
|
||||
enum ProtocolKind @0xde0bf5787c067d5a {
|
||||
udp @0;
|
||||
ws @1;
|
||||
wss @2;
|
||||
tcp @3;
|
||||
}
|
||||
|
||||
struct DialInfoUDP {
|
||||
struct DialInfoUDP @0xbb38a8b8b7024a7c {
|
||||
socketAddress @0 :SocketAddress;
|
||||
}
|
||||
|
||||
struct DialInfoTCP {
|
||||
struct DialInfoTCP @0x9e0a9371b9a9f7fc {
|
||||
socketAddress @0 :SocketAddress;
|
||||
}
|
||||
|
||||
struct DialInfoWS {
|
||||
struct DialInfoWS @0xd7795f7a92ab15b0 {
|
||||
socketAddress @0 :SocketAddress;
|
||||
request @1 :Text;
|
||||
}
|
||||
|
||||
struct DialInfoWSS {
|
||||
struct DialInfoWSS @0xe639faa41b7d7b04 {
|
||||
socketAddress @0 :SocketAddress;
|
||||
request @1 :Text;
|
||||
}
|
||||
|
||||
struct DialInfo {
|
||||
struct DialInfo @0xe1cd1c39fc2defdf {
|
||||
union {
|
||||
udp @0 :DialInfoUDP;
|
||||
tcp @1 :DialInfoTCP;
|
||||
@ -102,20 +95,15 @@ struct DialInfo {
|
||||
}
|
||||
}
|
||||
|
||||
struct NodeDialInfo {
|
||||
nodeId @0 :NodeID; # node id
|
||||
dialInfo @1 :DialInfo; # how to get to the node
|
||||
}
|
||||
|
||||
# Signals
|
||||
##############################
|
||||
|
||||
struct SignalInfoHolePunch {
|
||||
struct SignalInfoHolePunch @0xeeb9ab6861890c9a {
|
||||
receipt @0 :Data; # receipt to return with hole punch
|
||||
peerInfo @1 :PeerInfo; # peer info of the signal sender for hole punch attempt
|
||||
}
|
||||
|
||||
struct SignalInfoReverseConnect {
|
||||
struct SignalInfoReverseConnect @0xd9ebd3bd0d46e013 {
|
||||
receipt @0 :Data; # receipt to return with reverse connect
|
||||
peerInfo @1 :PeerInfo; # peer info of the signal sender for reverse connect attempt
|
||||
}
|
||||
@ -123,29 +111,38 @@ struct SignalInfoReverseConnect {
|
||||
# Private Routes
|
||||
##############################
|
||||
|
||||
struct RouteHopData {
|
||||
struct RouteHopData @0x8ce231f9d1b7adf2 {
|
||||
nonce @0 :Nonce; # nonce for encrypted blob
|
||||
blob @1 :Data; # encrypted blob with ENC(nonce,DH(PK,SK))
|
||||
# can be one of:
|
||||
# if more hops remain in this route: RouteHop (0 byte appended as key)
|
||||
# if end of safety route and starting private route: PrivateRoute (1 byte appended as key)
|
||||
# if this is a safety route RouteHopData, there is a single byte tag appended to the end of the encrypted blob
|
||||
# it can be one of:
|
||||
# if more hops remain in this route: RouteHop (0 byte appended as tag)
|
||||
# if end of safety route and starting private route: PrivateRoute (1 byte appended as tag)
|
||||
# if this is a private route RouteHopData, only can decode to RouteHop, no tag is appended
|
||||
}
|
||||
|
||||
struct RouteHop {
|
||||
dialInfo @0 :NodeDialInfo; # dial info for this hop
|
||||
nextHop @1 :RouteHopData; # Optional: next hop in encrypted blob
|
||||
# Null means no next hop, at destination (only used in private route, safety routes must enclose a stub private route)
|
||||
struct RouteHop @0xf8f672d75cce0c3b {
|
||||
node :union {
|
||||
nodeId @0 :NodeID; # node id only for established routes
|
||||
peerInfo @1 :PeerInfo; # full peer info for this hop to establish the route
|
||||
}
|
||||
nextHop @2 :RouteHopData; # optional: If this the end of a private route, this field will not exist
|
||||
# if this is a safety route routehop, this field is not optional and must exist
|
||||
}
|
||||
|
||||
struct PrivateRoute {
|
||||
struct PrivateRoute @0x8a83fccb0851e776 {
|
||||
publicKey @0 :RoutePublicKey; # private route public key (unique per private route)
|
||||
hopCount @1 :UInt8; # Count of hops left in the private route
|
||||
firstHop @2 :RouteHop; # Optional: first hop in the private route
|
||||
hopCount @1 :UInt8; # Count of hops left in the private route (for timeout calculation purposes only)
|
||||
hops :union {
|
||||
firstHop @2 :RouteHop; # first hop of a private route is unencrypted (hopcount > 0)
|
||||
data @3 :RouteHopData; # private route has more hops (hopcount > 0 && hopcount < total_hopcount)
|
||||
empty @4 :Void; # private route has ended (hopcount = 0)
|
||||
}
|
||||
}
|
||||
|
||||
struct SafetyRoute {
|
||||
struct SafetyRoute @0xf554734d07cb5d59 {
|
||||
publicKey @0 :RoutePublicKey; # safety route public key (unique per safety route)
|
||||
hopCount @1 :UInt8; # Count of hops left in the safety route
|
||||
hopCount @1 :UInt8; # Count of hops left in the safety route (for timeout calculation purposes only)
|
||||
hops :union {
|
||||
data @2 :RouteHopData; # safety route has more hops
|
||||
private @3 :PrivateRoute; # safety route has ended and private route follows
|
||||
@ -157,7 +154,7 @@ struct SafetyRoute {
|
||||
|
||||
using ValueSeqNum = UInt32; # sequence numbers for values
|
||||
|
||||
struct ValueKey {
|
||||
struct ValueKey @0xe64b0992c21a0736 {
|
||||
publicKey @0 :ValueID; # the location of the value
|
||||
subkey @1 :Text; # the name of the subkey (or empty if the whole key)
|
||||
}
|
||||
@ -167,21 +164,22 @@ struct ValueKey {
|
||||
# seq @1 :ValueSeqNum; # the sequence number of the value subkey
|
||||
# }
|
||||
|
||||
struct ValueData {
|
||||
data @0 :Data; # value or subvalue contents in CBOR format
|
||||
struct ValueData @0xb4b7416f169f2a3d {
|
||||
data @0 :Data; # value or subvalue contents
|
||||
seq @1 :ValueSeqNum; # sequence number of value
|
||||
}
|
||||
|
||||
# Operations
|
||||
##############################
|
||||
|
||||
enum NetworkClass {
|
||||
inboundCapable @0; # I = Inbound capable without relay, may require signal
|
||||
outboundOnly @1; # O = Outbound only, inbound relay required except with reverse connect signal
|
||||
webApp @2; # W = PWA, outbound relay is required in most cases
|
||||
enum NetworkClass @0x8cebfc2a6230717f {
|
||||
invalid @0; # X = Invalid network class, network is not yet set up
|
||||
inboundCapable @1; # I = Inbound capable without relay, may require signal
|
||||
outboundOnly @2; # O = Outbound only, inbound relay required except with reverse connect signal
|
||||
webApp @3; # W = PWA, outbound relay is required in most cases
|
||||
}
|
||||
|
||||
enum DialInfoClass {
|
||||
enum DialInfoClass @0x880005edfdd38b1e {
|
||||
direct @0; # D = Directly reachable with public IP and no firewall, with statically configured port
|
||||
mapped @1; # M = Directly reachable with via portmap behind any NAT or firewalled with dynamically negotiated port
|
||||
fullConeNAT @2; # F = Directly reachable device without portmap behind full-cone NAT
|
||||
@ -190,12 +188,12 @@ enum DialInfoClass {
|
||||
portRestrictedNAT @5; # P = Device without portmap behind address-and-port restricted NAT
|
||||
}
|
||||
|
||||
struct DialInfoDetail {
|
||||
struct DialInfoDetail @0x96423aa1d67b74d8 {
|
||||
dialInfo @0 :DialInfo;
|
||||
class @1 :DialInfoClass;
|
||||
}
|
||||
|
||||
struct PublicInternetNodeStatus {
|
||||
struct PublicInternetNodeStatus @0x9c9d7f1f12eb088f {
|
||||
willRoute @0 :Bool;
|
||||
willTunnel @1 :Bool;
|
||||
willSignal @2 :Bool;
|
||||
@ -203,225 +201,253 @@ struct PublicInternetNodeStatus {
|
||||
willValidateDialInfo @4 :Bool;
|
||||
}
|
||||
|
||||
struct LocalNetworkNodeStatus {
|
||||
struct LocalNetworkNodeStatus @0x957f5bfed2d0b5a5 {
|
||||
willRelay @0 :Bool;
|
||||
willValidateDialInfo @1 :Bool;
|
||||
}
|
||||
|
||||
struct NodeStatus {
|
||||
struct NodeStatus @0xd36b9e7a3bf3330d {
|
||||
union {
|
||||
publicInternet @0 :PublicInternetNodeStatus;
|
||||
localNetwork @1 :LocalNetworkNodeStatus;
|
||||
}
|
||||
}
|
||||
|
||||
struct ProtocolTypeSet {
|
||||
struct ProtocolTypeSet @0x82f12f55a1b73326 {
|
||||
udp @0 :Bool;
|
||||
tcp @1 :Bool;
|
||||
ws @2 :Bool;
|
||||
wss @3 :Bool;
|
||||
}
|
||||
|
||||
struct AddressTypeSet {
|
||||
struct AddressTypeSet @0x9f52d5430d349e6b {
|
||||
ipv4 @0 :Bool;
|
||||
ipv6 @1 :Bool;
|
||||
}
|
||||
|
||||
struct NodeInfo {
|
||||
struct SenderInfo @0x8a4464fab4b1d101 {
|
||||
socketAddress @0 :SocketAddress; # socket address that for the sending peer
|
||||
}
|
||||
|
||||
struct NodeInfo @0xe125d847e3f9f419 {
|
||||
networkClass @0 :NetworkClass; # network class of this node
|
||||
outboundProtocols @1 :ProtocolTypeSet; # protocols that can go outbound
|
||||
addressTypes @2 :AddressTypeSet; # address types supported
|
||||
minVersion @3 :UInt8; # minimum protocol version for rpc
|
||||
maxVersion @4 :UInt8; # maximum protocol version for rpc
|
||||
dialInfoDetailList @5 :List(DialInfoDetail); # inbound dial info details for this node
|
||||
relayPeerInfo @6 :PeerInfo; # (optional) relay peer info for this node
|
||||
}
|
||||
|
||||
struct SignedNodeInfo {
|
||||
struct SignedDirectNodeInfo @0xe0e7ea3e893a3dd7 {
|
||||
nodeInfo @0 :NodeInfo; # node info
|
||||
signature @1 :Signature; # signature
|
||||
timestamp @2 :UInt64; # when signed node info was generated
|
||||
timestamp @1 :UInt64; # when signed node info was generated
|
||||
signature @2 :Signature; # signature
|
||||
}
|
||||
|
||||
struct SenderInfo {
|
||||
socketAddress @0 :SocketAddress; # socket address was available for peer
|
||||
struct SignedRelayedNodeInfo @0xb39e8428ccd87cbb {
|
||||
nodeInfo @0 :NodeInfo; # node info
|
||||
relayId @1 :NodeID; # node id for relay
|
||||
relayInfo @2 :SignedDirectNodeInfo; # signed node info for relay
|
||||
timestamp @3 :UInt64; # when signed node info was generated
|
||||
signature @4 :Signature; # signature
|
||||
}
|
||||
|
||||
struct PeerInfo {
|
||||
struct SignedNodeInfo @0xd2478ce5f593406a {
|
||||
union {
|
||||
direct @0 :SignedDirectNodeInfo; # node info for nodes reachable without a relay
|
||||
relayed @1 :SignedRelayedNodeInfo; # node info for nodes requiring a relay
|
||||
}
|
||||
}
|
||||
|
||||
struct PeerInfo @0xfe2d722d5d3c4bcb {
|
||||
nodeId @0 :NodeID; # node id for 'closer peer'
|
||||
signedNodeInfo @1 :SignedNodeInfo; # signed node info for 'closer peer'
|
||||
}
|
||||
|
||||
struct RoutedOperation {
|
||||
signatures @0 :List(Signature); # signatures from nodes that have handled the private route
|
||||
nonce @1 :Nonce; # nonce Xmsg
|
||||
data @2 :Data; # Operation encrypted with ENC(Xmsg,DH(PKapr,SKbsr))
|
||||
struct RoutedOperation @0xcbcb8535b839e9dd {
|
||||
version @0 :UInt8; # crypto version in use for the data
|
||||
signatures @1 :List(Signature); # signatures from nodes that have handled the private route
|
||||
nonce @2 :Nonce; # nonce Xmsg
|
||||
data @3 :Data; # operation encrypted with ENC(Xmsg,DH(PKapr,SKbsr))
|
||||
}
|
||||
|
||||
struct OperationStatusQ {
|
||||
nodeStatus @0 :NodeStatus; # node status update about the statusq sender
|
||||
struct OperationStatusQ @0x865d80cea70d884a {
|
||||
nodeStatus @0 :NodeStatus; # Optional: node status update about the statusq sender
|
||||
}
|
||||
|
||||
struct OperationStatusA {
|
||||
nodeStatus @0 :NodeStatus; # returned node status
|
||||
senderInfo @1 :SenderInfo; # info about StatusQ sender from the perspective of the replier
|
||||
struct OperationStatusA @0xb306f407fa812a55 {
|
||||
nodeStatus @0 :NodeStatus; # Optional: returned node status
|
||||
senderInfo @1 :SenderInfo; # Optional: info about StatusQ sender from the perspective of the replier
|
||||
}
|
||||
|
||||
struct OperationValidateDialInfo {
|
||||
struct OperationValidateDialInfo @0xbc716ad7d5d060c8 {
|
||||
dialInfo @0 :DialInfo; # dial info to use for the receipt
|
||||
receipt @1 :Data; # receipt to return to dial info to prove it is reachable
|
||||
redirect @2 :Bool; # request a different node do the validate
|
||||
}
|
||||
|
||||
struct OperationReturnReceipt {
|
||||
struct OperationReturnReceipt @0xeb0fb5b5a9160eeb {
|
||||
receipt @0 :Data; # receipt being returned to its origin
|
||||
}
|
||||
|
||||
struct OperationFindNodeQ {
|
||||
struct OperationFindNodeQ @0xfdef788fe9623bcd {
|
||||
nodeId @0 :NodeID; # node id to locate
|
||||
}
|
||||
|
||||
struct OperationFindNodeA {
|
||||
struct OperationFindNodeA @0xa84cf2fb40c77089 {
|
||||
peers @0 :List(PeerInfo); # returned 'closer peer' information
|
||||
}
|
||||
|
||||
struct OperationRoute {
|
||||
struct OperationRoute @0x96741859ce6ac7dd {
|
||||
safetyRoute @0 :SafetyRoute; # Where this should go
|
||||
operation @1 :RoutedOperation; # The operation to be routed
|
||||
}
|
||||
|
||||
struct OperationNodeInfoUpdate {
|
||||
struct OperationNodeInfoUpdate @0xc9647b32a48b66ce {
|
||||
signedNodeInfo @0 :SignedNodeInfo; # Our signed node info
|
||||
}
|
||||
|
||||
struct OperationGetValueQ {
|
||||
|
||||
struct OperationAppCallQ @0xade67b9f09784507 {
|
||||
message @0 :Data; # Opaque request to application
|
||||
}
|
||||
|
||||
struct OperationAppCallA @0xf7c797ac85f214b8 {
|
||||
message @0 :Data; # Opaque response from application
|
||||
}
|
||||
|
||||
struct OperationAppMessage @0x9baf542d81b411f5 {
|
||||
message @0 :Data; # Opaque message to application
|
||||
}
|
||||
|
||||
struct OperationGetValueQ @0xf88a5b6da5eda5d0 {
|
||||
key @0 :ValueKey; # key for value to get
|
||||
}
|
||||
|
||||
struct OperationGetValueA {
|
||||
struct OperationGetValueA @0xd896bb46f2e0249f {
|
||||
union {
|
||||
data @0 :ValueData; # the value if successful
|
||||
peers @1 :List(PeerInfo); # returned 'closer peer' information if not successful
|
||||
}
|
||||
}
|
||||
|
||||
struct OperationSetValueQ {
|
||||
struct OperationSetValueQ @0xbac06191ff8bdbc5 {
|
||||
key @0 :ValueKey; # key for value to update
|
||||
value @1 :ValueData; # value or subvalue contents in CBOR format (older or equal seq number gets dropped)
|
||||
value @1 :ValueData; # value or subvalue contents (older or equal seq number gets dropped)
|
||||
}
|
||||
|
||||
struct OperationSetValueA {
|
||||
struct OperationSetValueA @0x9378d0732dc95be2 {
|
||||
union {
|
||||
data @0 :ValueData; # the new value if successful, may be a different value than what was set if the seq number was lower or equal
|
||||
peers @1 :List(PeerInfo); # returned 'closer peer' information if not successful
|
||||
}
|
||||
}
|
||||
|
||||
struct OperationWatchValueQ {
|
||||
struct OperationWatchValueQ @0xf9a5a6c547b9b228 {
|
||||
key @0 :ValueKey; # key for value to watch
|
||||
}
|
||||
|
||||
struct OperationWatchValueA {
|
||||
struct OperationWatchValueA @0xa726cab7064ba893 {
|
||||
expiration @0 :UInt64; # timestamp when this watch will expire in usec since epoch (0 if watch failed)
|
||||
peers @1 :List(PeerInfo); # returned list of other nodes to ask that could propagate watches
|
||||
}
|
||||
|
||||
struct OperationValueChanged {
|
||||
struct OperationValueChanged @0xd1c59ebdd8cc1bf6 {
|
||||
key @0 :ValueKey; # key for value that changed
|
||||
value @1 :ValueData; # value or subvalue contents in CBOR format with sequence number
|
||||
value @1 :ValueData; # value or subvalue contents with sequence number
|
||||
}
|
||||
|
||||
struct OperationSupplyBlockQ {
|
||||
struct OperationSupplyBlockQ @0xadbf4c542d749971 {
|
||||
blockId @0 :BlockID; # hash of the block we can supply
|
||||
}
|
||||
|
||||
struct OperationSupplyBlockA {
|
||||
struct OperationSupplyBlockA @0xf003822e83b5c0d7 {
|
||||
union {
|
||||
expiration @0 :UInt64; # when the block supplier entry will need to be refreshed
|
||||
peers @1 :List(PeerInfo); # returned 'closer peer' information if not successful
|
||||
}
|
||||
}
|
||||
|
||||
struct OperationFindBlockQ {
|
||||
struct OperationFindBlockQ @0xaf4353ff004c7156 {
|
||||
blockId @0 :BlockID; # hash of the block to locate
|
||||
}
|
||||
|
||||
struct OperationFindBlockA {
|
||||
struct OperationFindBlockA @0xc51455bc4915465d {
|
||||
data @0 :Data; # Optional: the actual block data if we have that block ourselves
|
||||
# null if we don't have a block to return
|
||||
suppliers @1 :List(PeerInfo); # returned list of suppliers if we have them
|
||||
peers @2 :List(PeerInfo); # returned 'closer peer' information
|
||||
}
|
||||
|
||||
struct OperationSignal {
|
||||
struct OperationSignal @0xd4f94f2a5d207e49 {
|
||||
union {
|
||||
holePunch @0 :SignalInfoHolePunch;
|
||||
reverseConnect @1 :SignalInfoReverseConnect;
|
||||
}
|
||||
}
|
||||
|
||||
enum TunnelEndpointMode {
|
||||
enum TunnelEndpointMode @0xef06f4c29beb7458 {
|
||||
raw @0; # raw tunnel
|
||||
turn @1; # turn tunnel
|
||||
}
|
||||
|
||||
enum TunnelError {
|
||||
enum TunnelError @0xb82c6bfb1ec38c7c {
|
||||
badId @0; # Tunnel ID was rejected
|
||||
noEndpoint @1; # Endpoint was unreachable
|
||||
rejectedMode @2; # Endpoint couldn't provide mode
|
||||
noCapacity @3; # Endpoint is full
|
||||
}
|
||||
|
||||
struct TunnelEndpoint {
|
||||
struct TunnelEndpoint @0xc2602aa983cc337d {
|
||||
mode @0 :TunnelEndpointMode; # what kind of endpoint this is
|
||||
description @1 :Text; # endpoint description (TODO)
|
||||
}
|
||||
|
||||
struct FullTunnel {
|
||||
struct FullTunnel @0x9821c3dc75373f63 {
|
||||
id @0 :TunnelID; # tunnel id to use everywhere
|
||||
timeout @1 :UInt64; # duration from last data when this expires if no data is sent or received
|
||||
local @2 :TunnelEndpoint; # local endpoint
|
||||
remote @3 :TunnelEndpoint; # remote endpoint
|
||||
}
|
||||
|
||||
struct PartialTunnel {
|
||||
struct PartialTunnel @0x827a7ebc02be2fc8 {
|
||||
id @0 :TunnelID; # tunnel id to use everywhere
|
||||
timeout @1 :UInt64; # timestamp when this expires if not completed
|
||||
local @2 :TunnelEndpoint; # local endpoint
|
||||
}
|
||||
|
||||
struct OperationStartTunnelQ {
|
||||
struct OperationStartTunnelQ @0xa9c49afce44187af {
|
||||
id @0 :TunnelID; # tunnel id to use everywhere
|
||||
localMode @1 :TunnelEndpointMode; # what kind of local endpoint mode is being requested
|
||||
depth @2 :UInt8; # the number of nodes in the tunnel
|
||||
}
|
||||
|
||||
struct OperationStartTunnelA {
|
||||
struct OperationStartTunnelA @0x818162e4cc61bf1e {
|
||||
union {
|
||||
partial @0 :PartialTunnel; # the first half of the tunnel
|
||||
error @1 :TunnelError; # if we didn't start the tunnel, why not
|
||||
}
|
||||
}
|
||||
|
||||
struct OperationCompleteTunnelQ {
|
||||
struct OperationCompleteTunnelQ @0xe978594588eb950b {
|
||||
id @0 :TunnelID; # tunnel id to use everywhere
|
||||
localMode @1 :TunnelEndpointMode; # what kind of local endpoint mode is being requested
|
||||
depth @2 :UInt8; # the number of nodes in the tunnel
|
||||
endpoint @3 :TunnelEndpoint; # the remote endpoint to complete
|
||||
}
|
||||
|
||||
struct OperationCompleteTunnelA {
|
||||
struct OperationCompleteTunnelA @0x84090791bb765f2a {
|
||||
union {
|
||||
tunnel @0 :FullTunnel; # the tunnel description
|
||||
error @1 :TunnelError; # if we didn't complete the tunnel, why not
|
||||
}
|
||||
}
|
||||
|
||||
struct OperationCancelTunnelQ {
|
||||
struct OperationCancelTunnelQ @0xae2811ae0a003738 {
|
||||
id @0 :TunnelID; # the tunnel id to cancel
|
||||
}
|
||||
|
||||
struct OperationCancelTunnelA {
|
||||
struct OperationCancelTunnelA @0xbba23c992eff97bc {
|
||||
union {
|
||||
tunnel @0 :TunnelID; # the tunnel id that was cancelled
|
||||
error @1 :TunnelError; # if we couldn't cancel, why not
|
||||
@ -429,7 +455,7 @@ struct OperationCancelTunnelA {
|
||||
}
|
||||
|
||||
# Things that want an answer
|
||||
struct Question {
|
||||
struct Question @0xd8510bc33492ef70 {
|
||||
respondTo :union {
|
||||
sender @0 :Void; # sender
|
||||
privateRoute @1 :PrivateRoute; # embedded private route to be used for reply
|
||||
@ -445,16 +471,17 @@ struct Question {
|
||||
watchValueQ @6 :OperationWatchValueQ;
|
||||
supplyBlockQ @7 :OperationSupplyBlockQ;
|
||||
findBlockQ @8 :OperationFindBlockQ;
|
||||
appCallQ @9 :OperationAppCallQ;
|
||||
|
||||
# Tunnel operations
|
||||
startTunnelQ @9 :OperationStartTunnelQ;
|
||||
completeTunnelQ @10 :OperationCompleteTunnelQ;
|
||||
cancelTunnelQ @11 :OperationCancelTunnelQ;
|
||||
startTunnelQ @10 :OperationStartTunnelQ;
|
||||
completeTunnelQ @11 :OperationCompleteTunnelQ;
|
||||
cancelTunnelQ @12 :OperationCancelTunnelQ;
|
||||
}
|
||||
}
|
||||
|
||||
# Things that don't want an answer
|
||||
struct Statement {
|
||||
struct Statement @0x990e20828f404ae1 {
|
||||
detail :union {
|
||||
# Direct operations
|
||||
validateDialInfo @0 :OperationValidateDialInfo;
|
||||
@ -465,11 +492,12 @@ struct Statement {
|
||||
valueChanged @3 :OperationValueChanged;
|
||||
signal @4 :OperationSignal;
|
||||
returnReceipt @5 :OperationReturnReceipt;
|
||||
appMessage @6 :OperationAppMessage;
|
||||
}
|
||||
}
|
||||
|
||||
# Things that are answers
|
||||
struct Answer {
|
||||
struct Answer @0xacacb8b6988c1058 {
|
||||
detail :union {
|
||||
# Direct operations
|
||||
statusA @0 :OperationStatusA;
|
||||
@ -481,15 +509,16 @@ struct Answer {
|
||||
watchValueA @4 :OperationWatchValueA;
|
||||
supplyBlockA @5 :OperationSupplyBlockA;
|
||||
findBlockA @6 :OperationFindBlockA;
|
||||
appCallA @7 :OperationAppCallA;
|
||||
|
||||
# Tunnel operations
|
||||
startTunnelA @7 :OperationStartTunnelA;
|
||||
completeTunnelA @8 :OperationCompleteTunnelA;
|
||||
cancelTunnelA @9 :OperationCancelTunnelA;
|
||||
startTunnelA @8 :OperationStartTunnelA;
|
||||
completeTunnelA @9 :OperationCompleteTunnelA;
|
||||
cancelTunnelA @10 :OperationCancelTunnelA;
|
||||
}
|
||||
}
|
||||
|
||||
struct Operation {
|
||||
struct Operation @0xbf2811c435403c3b {
|
||||
opId @0 :UInt64; # Random RPC ID. Must be random to foil reply forgery attacks.
|
||||
senderNodeInfo @1 :SignedNodeInfo; # (optional) SignedNodeInfo for the sender to be cached by the receiver.
|
||||
kind :union {
|
||||
|
@ -103,7 +103,7 @@ impl<S: Subscriber + for<'a> registry::LookupSpan<'a>> Layer<S> for ApiTracingLa
|
||||
None
|
||||
};
|
||||
|
||||
(inner.update_callback)(VeilidUpdate::Log(VeilidStateLog {
|
||||
(inner.update_callback)(VeilidUpdate::Log(VeilidLog {
|
||||
log_level,
|
||||
message,
|
||||
backtrace,
|
||||
|
@ -1,15 +1,16 @@
|
||||
use crate::callback_state_machine::*;
|
||||
use crate::dht::Crypto;
|
||||
use crate::crypto::Crypto;
|
||||
use crate::network_manager::*;
|
||||
use crate::routing_table::*;
|
||||
use crate::xx::*;
|
||||
use crate::*;
|
||||
use core::convert::TryFrom;
|
||||
use core::fmt;
|
||||
use rkyv::{Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize};
|
||||
use serde::*;
|
||||
|
||||
state_machine! {
|
||||
derive(Debug, PartialEq, Eq, Clone, Copy, Serialize, Deserialize)
|
||||
derive(Debug, PartialEq, Eq, Clone, Copy, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize,)
|
||||
pub Attachment(Detached)
|
||||
//---
|
||||
Detached(AttachRequested) => Attaching [StartAttachment],
|
||||
@ -102,48 +103,77 @@ impl TryFrom<String> for AttachmentState {
|
||||
}
|
||||
|
||||
pub struct AttachmentManagerInner {
|
||||
config: VeilidConfig,
|
||||
attachment_machine: CallbackStateMachine<Attachment>,
|
||||
network_manager: NetworkManager,
|
||||
maintain_peers: bool,
|
||||
attach_timestamp: Option<u64>,
|
||||
update_callback: Option<UpdateCallback>,
|
||||
attachment_maintainer_jh: Option<MustJoinHandle<()>>,
|
||||
}
|
||||
|
||||
pub struct AttachmentManagerUnlockedInner {
|
||||
config: VeilidConfig,
|
||||
network_manager: NetworkManager,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct AttachmentManager {
|
||||
inner: Arc<Mutex<AttachmentManagerInner>>,
|
||||
unlocked_inner: Arc<AttachmentManagerUnlockedInner>,
|
||||
}
|
||||
|
||||
impl AttachmentManager {
|
||||
fn new_inner(
|
||||
fn new_unlocked_inner(
|
||||
config: VeilidConfig,
|
||||
protected_store: ProtectedStore,
|
||||
table_store: TableStore,
|
||||
block_store: BlockStore,
|
||||
crypto: Crypto,
|
||||
) -> AttachmentManagerInner {
|
||||
AttachmentManagerInner {
|
||||
) -> AttachmentManagerUnlockedInner {
|
||||
AttachmentManagerUnlockedInner {
|
||||
config: config.clone(),
|
||||
network_manager: NetworkManager::new(
|
||||
config,
|
||||
protected_store,
|
||||
table_store,
|
||||
block_store,
|
||||
crypto,
|
||||
),
|
||||
}
|
||||
}
|
||||
fn new_inner() -> AttachmentManagerInner {
|
||||
AttachmentManagerInner {
|
||||
attachment_machine: CallbackStateMachine::new(),
|
||||
network_manager: NetworkManager::new(config, table_store, crypto),
|
||||
maintain_peers: false,
|
||||
attach_timestamp: None,
|
||||
update_callback: None,
|
||||
attachment_maintainer_jh: None,
|
||||
}
|
||||
}
|
||||
pub fn new(config: VeilidConfig, table_store: TableStore, crypto: Crypto) -> Self {
|
||||
pub fn new(
|
||||
config: VeilidConfig,
|
||||
protected_store: ProtectedStore,
|
||||
table_store: TableStore,
|
||||
block_store: BlockStore,
|
||||
crypto: Crypto,
|
||||
) -> Self {
|
||||
Self {
|
||||
inner: Arc::new(Mutex::new(Self::new_inner(config, table_store, crypto))),
|
||||
inner: Arc::new(Mutex::new(Self::new_inner())),
|
||||
unlocked_inner: Arc::new(Self::new_unlocked_inner(
|
||||
config,
|
||||
protected_store,
|
||||
table_store,
|
||||
block_store,
|
||||
crypto,
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn config(&self) -> VeilidConfig {
|
||||
self.inner.lock().config.clone()
|
||||
self.unlocked_inner.config.clone()
|
||||
}
|
||||
|
||||
pub fn network_manager(&self) -> NetworkManager {
|
||||
self.inner.lock().network_manager.clone()
|
||||
self.unlocked_inner.network_manager.clone()
|
||||
}
|
||||
|
||||
pub fn is_attached(&self) -> bool {
|
||||
@ -202,9 +232,10 @@ impl AttachmentManager {
|
||||
AttachmentManager::translate_attachment_state(&inner.attachment_machine.state());
|
||||
|
||||
// get reliable peer count from routing table
|
||||
let routing_table = inner.network_manager.routing_table();
|
||||
let routing_table = self.network_manager().routing_table();
|
||||
let health = routing_table.get_routing_table_health();
|
||||
let routing_table_config = &inner.config.get().network.routing_table;
|
||||
let config = self.config();
|
||||
let routing_table_config = &config.get().network.routing_table;
|
||||
|
||||
let new_peer_state_input =
|
||||
AttachmentManager::translate_routing_table_health(health, routing_table_config);
|
||||
@ -223,11 +254,8 @@ impl AttachmentManager {
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
async fn attachment_maintainer(self) {
|
||||
debug!("attachment starting");
|
||||
let netman = {
|
||||
let mut inner = self.inner.lock();
|
||||
inner.attach_timestamp = Some(intf::get_timestamp());
|
||||
inner.network_manager.clone()
|
||||
};
|
||||
self.inner.lock().attach_timestamp = Some(intf::get_timestamp());
|
||||
let netman = self.network_manager();
|
||||
|
||||
let mut restart;
|
||||
loop {
|
||||
@ -286,7 +314,7 @@ impl AttachmentManager {
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
pub async fn init(&self, update_callback: UpdateCallback) -> EyreResult<()> {
|
||||
trace!("init");
|
||||
let network_manager = {
|
||||
{
|
||||
let mut inner = self.inner.lock();
|
||||
inner.update_callback = Some(update_callback.clone());
|
||||
let update_callback2 = update_callback.clone();
|
||||
@ -297,10 +325,9 @@ impl AttachmentManager {
|
||||
}))
|
||||
},
|
||||
));
|
||||
inner.network_manager.clone()
|
||||
};
|
||||
|
||||
network_manager.init(update_callback).await?;
|
||||
self.network_manager().init(update_callback).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -309,30 +336,33 @@ impl AttachmentManager {
|
||||
pub async fn terminate(&self) {
|
||||
// Ensure we detached
|
||||
self.detach().await;
|
||||
let network_manager = {
|
||||
let inner = self.inner.lock();
|
||||
inner.network_manager.clone()
|
||||
};
|
||||
network_manager.terminate().await;
|
||||
let mut inner = self.inner.lock();
|
||||
inner.update_callback = None;
|
||||
self.network_manager().terminate().await;
|
||||
self.inner.lock().update_callback = None;
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self))]
|
||||
fn attach(&self) {
|
||||
// Create long-running connection maintenance routine
|
||||
let this = self.clone();
|
||||
self.inner.lock().maintain_peers = true;
|
||||
self.inner.lock().attachment_maintainer_jh =
|
||||
Some(intf::spawn(this.attachment_maintainer()));
|
||||
let mut inner = self.inner.lock();
|
||||
if inner.attachment_maintainer_jh.is_some() {
|
||||
return;
|
||||
}
|
||||
inner.maintain_peers = true;
|
||||
inner.attachment_maintainer_jh = Some(intf::spawn(self.clone().attachment_maintainer()));
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self))]
|
||||
async fn detach(&self) {
|
||||
let attachment_maintainer_jh = self.inner.lock().attachment_maintainer_jh.take();
|
||||
if let Some(jh) = attachment_maintainer_jh {
|
||||
let attachment_maintainer_jh = {
|
||||
let mut inner = self.inner.lock();
|
||||
let attachment_maintainer_jh = inner.attachment_maintainer_jh.take();
|
||||
if attachment_maintainer_jh.is_some() {
|
||||
// Terminate long-running connection maintenance routine
|
||||
self.inner.lock().maintain_peers = false;
|
||||
inner.maintain_peers = false;
|
||||
}
|
||||
attachment_maintainer_jh
|
||||
};
|
||||
if let Some(jh) = attachment_maintainer_jh {
|
||||
jh.await;
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
use crate::api_tracing_layer::*;
|
||||
use crate::attachment_manager::*;
|
||||
use crate::dht::Crypto;
|
||||
use crate::crypto::Crypto;
|
||||
use crate::veilid_api::*;
|
||||
use crate::veilid_config::*;
|
||||
use crate::xx::*;
|
||||
@ -103,7 +103,13 @@ impl ServicesContext {
|
||||
// Set up attachment manager
|
||||
trace!("init attachment manager");
|
||||
let update_callback = self.update_callback.clone();
|
||||
let attachment_manager = AttachmentManager::new(self.config.clone(), table_store, crypto);
|
||||
let attachment_manager = AttachmentManager::new(
|
||||
self.config.clone(),
|
||||
protected_store,
|
||||
table_store,
|
||||
block_store,
|
||||
crypto,
|
||||
);
|
||||
if let Err(e) = attachment_manager.init(update_callback).await {
|
||||
self.shutdown().await;
|
||||
return Err(e);
|
||||
@ -171,7 +177,7 @@ impl VeilidCoreContext {
|
||||
// Set up config from callback
|
||||
trace!("setup config with callback");
|
||||
let mut config = VeilidConfig::new();
|
||||
config.setup(config_callback)?;
|
||||
config.setup(config_callback, update_callback.clone())?;
|
||||
|
||||
Self::new_common(update_callback, config).await
|
||||
}
|
||||
@ -184,7 +190,7 @@ impl VeilidCoreContext {
|
||||
// Set up config from callback
|
||||
trace!("setup config with json");
|
||||
let mut config = VeilidConfig::new();
|
||||
config.setup_from_json(config_json)?;
|
||||
config.setup_from_json(config_json, update_callback.clone())?;
|
||||
Self::new_common(update_callback, config).await
|
||||
}
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
#![allow(dead_code)]
|
||||
#![allow(clippy::absurd_extreme_comparisons)]
|
||||
use super::crypto::*;
|
||||
use super::key::*;
|
||||
use super::*;
|
||||
use crate::routing_table::VersionRange;
|
||||
use crate::xx::*;
|
||||
use crate::*;
|
||||
use core::convert::TryInto;
|
||||
@ -38,8 +38,6 @@ use core::convert::TryInto;
|
||||
pub const MAX_ENVELOPE_SIZE: usize = 65507;
|
||||
pub const MIN_ENVELOPE_SIZE: usize = 0x6A + 0x40; // Header + Signature
|
||||
pub const ENVELOPE_MAGIC: &[u8; 4] = b"VLID";
|
||||
pub const MIN_VERSION: u8 = 0u8;
|
||||
pub const MAX_VERSION: u8 = 0u8;
|
||||
pub type EnvelopeNonce = [u8; 24];
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Default)]
|
||||
@ -61,15 +59,12 @@ impl Envelope {
|
||||
sender_id: DHTKey,
|
||||
recipient_id: DHTKey,
|
||||
) -> Self {
|
||||
assert!(sender_id.valid);
|
||||
assert!(recipient_id.valid);
|
||||
|
||||
assert!(version >= MIN_VERSION);
|
||||
assert!(version <= MAX_VERSION);
|
||||
assert!(version >= MIN_CRYPTO_VERSION);
|
||||
assert!(version <= MAX_CRYPTO_VERSION);
|
||||
Self {
|
||||
version,
|
||||
min_version: MIN_VERSION,
|
||||
max_version: MAX_VERSION,
|
||||
min_version: MIN_CRYPTO_VERSION,
|
||||
max_version: MAX_CRYPTO_VERSION,
|
||||
timestamp,
|
||||
nonce,
|
||||
sender_id,
|
||||
@ -94,9 +89,9 @@ impl Envelope {
|
||||
|
||||
// Check version
|
||||
let version = data[0x04];
|
||||
if version > MAX_VERSION || version < MIN_VERSION {
|
||||
if version > MAX_CRYPTO_VERSION || version < MIN_CRYPTO_VERSION {
|
||||
return Err(VeilidAPIError::parse_error(
|
||||
"unsupported protocol version",
|
||||
"unsupported cryptography version",
|
||||
version,
|
||||
));
|
||||
}
|
||||
@ -208,15 +203,6 @@ impl Envelope {
|
||||
body: &[u8],
|
||||
node_id_secret: &DHTKeySecret,
|
||||
) -> Result<Vec<u8>, VeilidAPIError> {
|
||||
// Ensure sender node id is valid
|
||||
if !self.sender_id.valid {
|
||||
return Err(VeilidAPIError::generic("sender id is invalid"));
|
||||
}
|
||||
// Ensure recipient node id is valid
|
||||
if !self.recipient_id.valid {
|
||||
return Err(VeilidAPIError::generic("recipient id is invalid"));
|
||||
}
|
||||
|
||||
// Ensure body isn't too long
|
||||
let envelope_size: usize = body.len() + MIN_ENVELOPE_SIZE;
|
||||
if envelope_size > MAX_ENVELOPE_SIZE {
|
||||
@ -274,8 +260,11 @@ impl Envelope {
|
||||
self.version
|
||||
}
|
||||
|
||||
pub fn get_min_max_version(&self) -> (u8, u8) {
|
||||
(self.min_version, self.max_version)
|
||||
pub fn get_min_max_version(&self) -> VersionRange {
|
||||
VersionRange {
|
||||
min: self.min_version,
|
||||
max: self.max_version,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_timestamp(&self) -> u64 {
|
@ -2,29 +2,35 @@ use crate::veilid_rng::*;
|
||||
use crate::xx::*;
|
||||
use crate::*;
|
||||
|
||||
use core::cmp::{Eq, Ord, Ordering, PartialEq, PartialOrd};
|
||||
use core::cmp::{Eq, Ord, PartialEq, PartialOrd};
|
||||
use core::convert::{TryFrom, TryInto};
|
||||
use core::fmt;
|
||||
use core::hash::{Hash, Hasher};
|
||||
use core::hash::Hash;
|
||||
|
||||
use data_encoding::BASE64URL_NOPAD;
|
||||
use digest::generic_array::typenum::U64;
|
||||
use digest::{Digest, Output};
|
||||
use ed25519_dalek::{Keypair, PublicKey, Signature};
|
||||
use generic_array::GenericArray;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use rkyv::{Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize};
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
|
||||
/// Length of a DHT key in bytes
|
||||
#[allow(dead_code)]
|
||||
pub const DHT_KEY_LENGTH: usize = 32;
|
||||
/// Length of a DHT key in bytes after encoding to base64url
|
||||
#[allow(dead_code)]
|
||||
pub const DHT_KEY_LENGTH_ENCODED: usize = 43;
|
||||
/// Length of a DHT secret in bytes
|
||||
#[allow(dead_code)]
|
||||
pub const DHT_KEY_SECRET_LENGTH: usize = 32;
|
||||
/// Length of a DHT secret in bytes after encoding to base64url
|
||||
#[allow(dead_code)]
|
||||
pub const DHT_KEY_SECRET_LENGTH_ENCODED: usize = 43;
|
||||
/// Length of a DHT signature in bytes
|
||||
#[allow(dead_code)]
|
||||
/// Length of a DHT signature in bytes after encoding to base64url
|
||||
pub const DHT_SIGNATURE_LENGTH: usize = 64;
|
||||
#[allow(dead_code)]
|
||||
pub const DHT_SIGNATURE_LENGTH_ENCODED: usize = 86;
|
||||
@ -33,33 +39,47 @@ pub const DHT_SIGNATURE_LENGTH_ENCODED: usize = 86;
|
||||
|
||||
macro_rules! byte_array_type {
|
||||
($name:ident, $size:expr) => {
|
||||
#[derive(Clone, Copy)]
|
||||
#[derive(
|
||||
Clone,
|
||||
Copy,
|
||||
Hash,
|
||||
Eq,
|
||||
PartialEq,
|
||||
PartialOrd,
|
||||
Ord,
|
||||
RkyvArchive,
|
||||
RkyvSerialize,
|
||||
RkyvDeserialize,
|
||||
)]
|
||||
#[archive_attr(repr(C), derive(CheckBytes, Hash, Eq, PartialEq, PartialOrd, Ord))]
|
||||
pub struct $name {
|
||||
pub bytes: [u8; $size],
|
||||
pub valid: bool,
|
||||
}
|
||||
|
||||
impl Serialize for $name {
|
||||
impl Default for $name {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
bytes: [0u8; $size],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl serde::Serialize for $name {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
let s: String;
|
||||
if self.valid {
|
||||
s = self.encode();
|
||||
} else {
|
||||
s = "".to_owned();
|
||||
}
|
||||
s.serialize(serializer)
|
||||
let s = self.encode();
|
||||
serde::Serialize::serialize(&s, serializer)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for $name {
|
||||
impl<'de> serde::Deserialize<'de> for $name {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
let s = String::deserialize(deserializer)?;
|
||||
let s = <String as serde::Deserialize>::deserialize(deserializer)?;
|
||||
if s == "" {
|
||||
return Ok($name::default());
|
||||
}
|
||||
@ -69,28 +89,19 @@ macro_rules! byte_array_type {
|
||||
|
||||
impl $name {
|
||||
pub fn new(bytes: [u8; $size]) -> Self {
|
||||
Self { bytes, valid: true }
|
||||
Self { bytes }
|
||||
}
|
||||
|
||||
pub fn try_from_vec(v: Vec<u8>) -> Result<Self, VeilidAPIError> {
|
||||
let mut this = Self {
|
||||
bytes: [0u8; $size],
|
||||
valid: true,
|
||||
};
|
||||
|
||||
if v.len() != $size {
|
||||
apibail_generic!(format!(
|
||||
let vl = v.len();
|
||||
Ok(Self {
|
||||
bytes: v.try_into().map_err(|_| {
|
||||
VeilidAPIError::generic(format!(
|
||||
"Expected a Vec of length {} but it was {}",
|
||||
$size,
|
||||
v.len()
|
||||
));
|
||||
}
|
||||
|
||||
for n in 0..v.len() {
|
||||
this.bytes[n] = v[n];
|
||||
}
|
||||
|
||||
Ok(this)
|
||||
$size, vl
|
||||
))
|
||||
})?,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn bit(&self, index: usize) -> bool {
|
||||
@ -136,14 +147,13 @@ macro_rules! byte_array_type {
|
||||
}
|
||||
|
||||
pub fn encode(&self) -> String {
|
||||
assert!(self.valid);
|
||||
BASE64URL_NOPAD.encode(&self.bytes)
|
||||
}
|
||||
|
||||
pub fn try_decode(input: &str) -> Result<Self, VeilidAPIError> {
|
||||
pub fn try_decode<S: AsRef<str>>(input: S) -> Result<Self, VeilidAPIError> {
|
||||
let mut bytes = [0u8; $size];
|
||||
|
||||
let res = BASE64URL_NOPAD.decode_len(input.len());
|
||||
let res = BASE64URL_NOPAD.decode_len(input.as_ref().len());
|
||||
match res {
|
||||
Ok(v) => {
|
||||
if v != $size {
|
||||
@ -155,103 +165,38 @@ macro_rules! byte_array_type {
|
||||
}
|
||||
}
|
||||
|
||||
let res = BASE64URL_NOPAD.decode_mut(input.as_bytes(), &mut bytes);
|
||||
let res = BASE64URL_NOPAD.decode_mut(input.as_ref().as_bytes(), &mut bytes);
|
||||
match res {
|
||||
Ok(_) => Ok(Self::new(bytes)),
|
||||
Err(_) => apibail_generic!("Failed to decode"),
|
||||
}
|
||||
}
|
||||
}
|
||||
impl PartialOrd for $name {
|
||||
fn partial_cmp(&self, other: &$name) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
impl Ord for $name {
|
||||
fn cmp(&self, other: &$name) -> Ordering {
|
||||
if !self.valid && !other.valid {
|
||||
return Ordering::Equal;
|
||||
}
|
||||
if !self.valid && other.valid {
|
||||
return Ordering::Less;
|
||||
}
|
||||
if self.valid && !other.valid {
|
||||
return Ordering::Greater;
|
||||
}
|
||||
|
||||
for n in 0..$size {
|
||||
if self.bytes[n] < other.bytes[n] {
|
||||
return Ordering::Less;
|
||||
}
|
||||
if self.bytes[n] > other.bytes[n] {
|
||||
return Ordering::Greater;
|
||||
}
|
||||
}
|
||||
Ordering::Equal
|
||||
}
|
||||
}
|
||||
impl PartialEq<$name> for $name {
|
||||
fn eq(&self, other: &$name) -> bool {
|
||||
if self.valid != other.valid {
|
||||
return false;
|
||||
}
|
||||
for n in 0..$size {
|
||||
if self.bytes[n] != other.bytes[n] {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
}
|
||||
impl Eq for $name {}
|
||||
impl Hash for $name {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.valid.hash(state);
|
||||
if self.valid {
|
||||
self.bytes.hash(state);
|
||||
}
|
||||
}
|
||||
}
|
||||
impl Default for $name {
|
||||
fn default() -> Self {
|
||||
let mut this = $name::new([0u8; $size]);
|
||||
this.valid = false;
|
||||
this
|
||||
}
|
||||
}
|
||||
impl fmt::Display for $name {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", String::from(self))
|
||||
//write!(f, "{}", String::from(self))
|
||||
write!(f, "{}", self.encode())
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for $name {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, concat!(stringify!($name), "("))?;
|
||||
write!(
|
||||
f,
|
||||
"{}",
|
||||
if self.valid {
|
||||
self.encode()
|
||||
} else {
|
||||
"".to_owned()
|
||||
}
|
||||
)?;
|
||||
write!(f, "{}", self.encode())?;
|
||||
write!(f, ")")
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&$name> for String {
|
||||
fn from(value: &$name) -> Self {
|
||||
if !value.valid {
|
||||
return "".to_owned();
|
||||
}
|
||||
let mut s = String::new();
|
||||
for n in 0..($size / 8) {
|
||||
let b: [u8; 8] = value.bytes[n * 8..(n + 1) * 8].try_into().unwrap();
|
||||
s.push_str(hex::encode(b).as_str());
|
||||
}
|
||||
s
|
||||
// let mut s = String::new();
|
||||
// for n in 0..($size / 8) {
|
||||
// let b: [u8; 8] = value.bytes[n * 8..(n + 1) * 8].try_into().unwrap();
|
||||
// s.push_str(hex::encode(b).as_str());
|
||||
// }
|
||||
// s
|
||||
value.encode()
|
||||
}
|
||||
}
|
||||
|
||||
@ -265,20 +210,18 @@ macro_rules! byte_array_type {
|
||||
impl TryFrom<&str> for $name {
|
||||
type Error = VeilidAPIError;
|
||||
fn try_from(value: &str) -> Result<Self, Self::Error> {
|
||||
let mut out = $name::default();
|
||||
if value == "" {
|
||||
return Ok(out);
|
||||
}
|
||||
if value.len() != ($size * 2) {
|
||||
apibail_generic!(concat!(stringify!($name), " is incorrect length"));
|
||||
}
|
||||
match hex::decode_to_slice(value, &mut out.bytes) {
|
||||
Ok(_) => {
|
||||
out.valid = true;
|
||||
Ok(out)
|
||||
}
|
||||
Err(err) => Err(VeilidAPIError::generic(err)),
|
||||
}
|
||||
// let mut out = $name::default();
|
||||
// if value == "" {
|
||||
// return Ok(out);
|
||||
// }
|
||||
// if value.len() != ($size * 2) {
|
||||
// apibail_generic!(concat!(stringify!($name), " is incorrect length"));
|
||||
// }
|
||||
// match hex::decode_to_slice(value, &mut out.bytes) {
|
||||
// Ok(_) => Ok(out),
|
||||
// Err(err) => Err(VeilidAPIError::generic(err)),
|
||||
// }
|
||||
Self::try_decode(value)
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -374,9 +317,6 @@ pub fn sign(
|
||||
dht_key_secret: &DHTKeySecret,
|
||||
data: &[u8],
|
||||
) -> Result<DHTSignature, VeilidAPIError> {
|
||||
assert!(dht_key.valid);
|
||||
assert!(dht_key_secret.valid);
|
||||
|
||||
let mut kpb: [u8; DHT_KEY_SECRET_LENGTH + DHT_KEY_LENGTH] =
|
||||
[0u8; DHT_KEY_SECRET_LENGTH + DHT_KEY_LENGTH];
|
||||
|
||||
@ -401,8 +341,6 @@ pub fn verify(
|
||||
data: &[u8],
|
||||
signature: &DHTSignature,
|
||||
) -> Result<(), VeilidAPIError> {
|
||||
assert!(dht_key.valid);
|
||||
assert!(signature.valid);
|
||||
let pk = PublicKey::from_bytes(&dht_key.bytes)
|
||||
.map_err(|e| VeilidAPIError::parse_error("Public key is invalid", e))?;
|
||||
let sig = Signature::from_bytes(&signature.bytes)
|
||||
@ -421,7 +359,6 @@ pub fn generate_hash(data: &[u8]) -> DHTKey {
|
||||
}
|
||||
|
||||
pub fn validate_hash(data: &[u8], dht_key: &DHTKey) -> bool {
|
||||
assert!(dht_key.valid);
|
||||
let bytes = *blake3::hash(data).as_bytes();
|
||||
|
||||
bytes == dht_key.bytes
|
||||
@ -439,8 +376,6 @@ pub fn validate_key(dht_key: &DHTKey, dht_key_secret: &DHTKeySecret) -> bool {
|
||||
}
|
||||
|
||||
pub fn distance(key1: &DHTKey, key2: &DHTKey) -> DHTKeyDistance {
|
||||
assert!(key1.valid);
|
||||
assert!(key2.valid);
|
||||
let mut bytes = [0u8; DHT_KEY_LENGTH];
|
||||
|
||||
for (n, byte) in bytes.iter_mut().enumerate() {
|
@ -1,4 +1,18 @@
|
||||
use super::key::*;
|
||||
mod envelope;
|
||||
mod key;
|
||||
mod receipt;
|
||||
mod value;
|
||||
|
||||
pub mod tests;
|
||||
|
||||
pub use envelope::*;
|
||||
pub use key::*;
|
||||
pub use receipt::*;
|
||||
pub use value::*;
|
||||
|
||||
pub const MIN_CRYPTO_VERSION: u8 = 0u8;
|
||||
pub const MAX_CRYPTO_VERSION: u8 = 0u8;
|
||||
|
||||
use crate::xx::*;
|
||||
use crate::*;
|
||||
use chacha20::cipher::{KeyIvInit, StreamCipher};
|
||||
@ -98,19 +112,19 @@ impl Crypto {
|
||||
let (table_store, node_id) = {
|
||||
let mut inner = self.inner.lock();
|
||||
let c = self.config.get();
|
||||
inner.node_id = c.network.node_id;
|
||||
inner.node_id_secret = c.network.node_id_secret;
|
||||
inner.node_id = c.network.node_id.unwrap();
|
||||
inner.node_id_secret = c.network.node_id_secret.unwrap();
|
||||
(inner.table_store.clone(), c.network.node_id)
|
||||
};
|
||||
|
||||
// load caches if they are valid for this node id
|
||||
let mut db = table_store.open("crypto_caches", 1).await?;
|
||||
let caches_valid = match db.load(0, b"node_id").await? {
|
||||
Some(v) => v.as_slice() == node_id.bytes,
|
||||
let caches_valid = match db.load(0, b"node_id")? {
|
||||
Some(v) => v.as_slice() == node_id.unwrap().bytes,
|
||||
None => false,
|
||||
};
|
||||
if caches_valid {
|
||||
if let Some(b) = db.load(0, b"dh_cache").await? {
|
||||
if let Some(b) = db.load(0, b"dh_cache")? {
|
||||
let mut inner = self.inner.lock();
|
||||
bytes_to_cache(&b, &mut inner.dh_cache);
|
||||
}
|
||||
@ -118,7 +132,7 @@ impl Crypto {
|
||||
drop(db);
|
||||
table_store.delete("crypto_caches").await?;
|
||||
db = table_store.open("crypto_caches", 1).await?;
|
||||
db.store(0, b"node_id", &node_id.bytes).await?;
|
||||
db.store(0, b"node_id", &node_id.unwrap().bytes)?;
|
||||
}
|
||||
|
||||
// Schedule flushing
|
||||
@ -145,7 +159,7 @@ impl Crypto {
|
||||
};
|
||||
|
||||
let db = table_store.open("crypto_caches", 1).await?;
|
||||
db.store(0, b"dh_cache", &cache_bytes).await?;
|
||||
db.store(0, b"dh_cache", &cache_bytes)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -206,8 +220,6 @@ impl Crypto {
|
||||
// These are safe to use regardless of initialization status
|
||||
|
||||
pub fn compute_dh(key: &DHTKey, secret: &DHTKeySecret) -> Result<SharedSecret, VeilidAPIError> {
|
||||
assert!(key.valid);
|
||||
assert!(secret.valid);
|
||||
let pk_ed = ed::PublicKey::from_bytes(&key.bytes).map_err(VeilidAPIError::internal)?;
|
||||
let pk_xd = Self::ed25519_to_x25519_pk(&pk_ed)?;
|
||||
let sk_ed = ed::SecretKey::from_bytes(&secret.bytes).map_err(VeilidAPIError::internal)?;
|
@ -1,7 +1,6 @@
|
||||
#![allow(dead_code)]
|
||||
#![allow(clippy::absurd_extreme_comparisons)]
|
||||
use super::envelope::{MAX_VERSION, MIN_VERSION};
|
||||
use super::key::*;
|
||||
use super::*;
|
||||
use crate::xx::*;
|
||||
use crate::*;
|
||||
use core::convert::TryInto;
|
||||
@ -59,7 +58,6 @@ impl Receipt {
|
||||
sender_id: DHTKey,
|
||||
extra_data: D,
|
||||
) -> Result<Self, VeilidAPIError> {
|
||||
assert!(sender_id.valid);
|
||||
if extra_data.as_ref().len() > MAX_EXTRA_DATA_SIZE {
|
||||
return Err(VeilidAPIError::parse_error(
|
||||
"extra data too large for receipt",
|
||||
@ -90,9 +88,9 @@ impl Receipt {
|
||||
|
||||
// Check version
|
||||
let version = data[0x04];
|
||||
if version > MAX_VERSION || version < MIN_VERSION {
|
||||
if version > MAX_CRYPTO_VERSION || version < MIN_CRYPTO_VERSION {
|
||||
return Err(VeilidAPIError::parse_error(
|
||||
"unsupported protocol version",
|
||||
"unsupported cryptography version",
|
||||
version,
|
||||
));
|
||||
}
|
||||
@ -152,11 +150,6 @@ impl Receipt {
|
||||
}
|
||||
|
||||
pub fn to_signed_data(&self, secret: &DHTKeySecret) -> Result<Vec<u8>, VeilidAPIError> {
|
||||
// Ensure sender node id is valid
|
||||
if !self.sender_id.valid {
|
||||
return Err(VeilidAPIError::internal("sender id is invalid"));
|
||||
}
|
||||
|
||||
// Ensure extra data isn't too long
|
||||
let receipt_size: usize = self.extra_data.len() + MIN_RECEIPT_SIZE;
|
||||
if receipt_size > MAX_RECEIPT_SIZE {
|
@ -88,9 +88,7 @@ pub async fn test_key_conversions() {
|
||||
// Test default key
|
||||
let (dht_key, dht_key_secret) = (key::DHTKey::default(), key::DHTKeySecret::default());
|
||||
assert_eq!(dht_key.bytes, EMPTY_KEY);
|
||||
assert!(!dht_key.valid);
|
||||
assert_eq!(dht_key_secret.bytes, EMPTY_KEY_SECRET);
|
||||
assert!(!dht_key_secret.valid);
|
||||
let dht_key_string = String::from(&dht_key);
|
||||
trace!("dht_key_string: {:?}", dht_key_string);
|
||||
let dht_key_string2 = String::from(&dht_key);
|
||||
@ -140,13 +138,13 @@ pub async fn test_key_conversions() {
|
||||
|
||||
// Assert string roundtrip
|
||||
assert_eq!(String::from(&dht_key2_back), dht_key2_string);
|
||||
assert!(key::DHTKey::try_from("") == Ok(key::DHTKey::default()));
|
||||
assert!(key::DHTKeySecret::try_from("") == Ok(key::DHTKeySecret::default()));
|
||||
// These conversions should fail
|
||||
assert!(key::DHTKey::try_from("whatever").is_err());
|
||||
assert!(key::DHTKeySecret::try_from("whatever").is_err());
|
||||
assert!(key::DHTKey::try_from("").is_err());
|
||||
assert!(key::DHTKeySecret::try_from("").is_err());
|
||||
assert!(key::DHTKey::try_from(" ").is_err());
|
||||
assert!(key::DHTKeySecret::try_from(" ").is_err());
|
||||
assert!(key::DHTKey::try_from(
|
||||
"qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq"
|
||||
)
|
@ -1,13 +0,0 @@
|
||||
mod crypto;
|
||||
mod envelope;
|
||||
mod key;
|
||||
mod receipt;
|
||||
mod value;
|
||||
|
||||
pub mod tests;
|
||||
|
||||
pub use crypto::*;
|
||||
pub use envelope::*;
|
||||
pub use key::*;
|
||||
pub use receipt::*;
|
||||
pub use value::*;
|
@ -2,6 +2,7 @@ use crate::xx::*;
|
||||
use crate::*;
|
||||
use data_encoding::BASE64URL_NOPAD;
|
||||
use keyring_manager::*;
|
||||
use rkyv::{Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize};
|
||||
use std::path::Path;
|
||||
|
||||
pub struct ProtectedStoreInner {
|
||||
@ -31,15 +32,18 @@ impl ProtectedStore {
|
||||
#[instrument(level = "trace", skip(self), err)]
|
||||
pub async fn delete_all(&self) -> EyreResult<()> {
|
||||
// Delete all known keys
|
||||
if self.remove_user_secret_string("node_id").await? {
|
||||
if self.remove_user_secret("node_id").await? {
|
||||
debug!("deleted protected_store key 'node_id'");
|
||||
}
|
||||
if self.remove_user_secret_string("node_id_secret").await? {
|
||||
if self.remove_user_secret("node_id_secret").await? {
|
||||
debug!("deleted protected_store key 'node_id_secret'");
|
||||
}
|
||||
if self.remove_user_secret_string("_test_key").await? {
|
||||
if self.remove_user_secret("_test_key").await? {
|
||||
debug!("deleted protected_store key '_test_key'");
|
||||
}
|
||||
if self.remove_user_secret("RouteSpecStore").await? {
|
||||
debug!("deleted protected_store key 'RouteSpecStore'");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -139,19 +143,60 @@ impl ProtectedStore {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self), ret, err)]
|
||||
pub async fn remove_user_secret_string(&self, key: &str) -> EyreResult<bool> {
|
||||
let inner = self.inner.lock();
|
||||
match inner
|
||||
.keyring_manager
|
||||
.as_ref()
|
||||
.ok_or_else(|| eyre!("Protected store not initialized"))?
|
||||
.with_keyring(&self.service_name(), key, |kr| kr.delete_value())
|
||||
#[instrument(level = "trace", skip(self, value))]
|
||||
pub async fn save_user_secret_rkyv<T>(&self, key: &str, value: &T) -> EyreResult<bool>
|
||||
where
|
||||
T: RkyvSerialize<rkyv::ser::serializers::AllocSerializer<1024>>,
|
||||
{
|
||||
Ok(_) => Ok(true),
|
||||
Err(KeyringError::NoPasswordFound) => Ok(false),
|
||||
Err(e) => Err(eyre!("Failed to remove user secret: {}", e)),
|
||||
let v = to_rkyv(value)?;
|
||||
self.save_user_secret(&key, &v).await
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self, value))]
|
||||
pub async fn save_user_secret_json<T>(&self, key: &str, value: &T) -> EyreResult<bool>
|
||||
where
|
||||
T: serde::Serialize,
|
||||
{
|
||||
let v = serde_json::to_vec(value)?;
|
||||
self.save_user_secret(&key, &v).await
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self))]
|
||||
pub async fn load_user_secret_rkyv<T>(&self, key: &str) -> EyreResult<Option<T>>
|
||||
where
|
||||
T: RkyvArchive,
|
||||
<T as RkyvArchive>::Archived:
|
||||
for<'t> bytecheck::CheckBytes<rkyv::validation::validators::DefaultValidator<'t>>,
|
||||
<T as RkyvArchive>::Archived:
|
||||
RkyvDeserialize<T, rkyv::de::deserializers::SharedDeserializeMap>,
|
||||
{
|
||||
let out = self.load_user_secret(key).await?;
|
||||
let b = match out {
|
||||
Some(v) => v,
|
||||
None => {
|
||||
return Ok(None);
|
||||
}
|
||||
};
|
||||
|
||||
let obj = from_rkyv(b)?;
|
||||
Ok(Some(obj))
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self))]
|
||||
pub async fn load_user_secret_json<T>(&self, key: &str) -> EyreResult<Option<T>>
|
||||
where
|
||||
T: for<'de> serde::de::Deserialize<'de>,
|
||||
{
|
||||
let out = self.load_user_secret(key).await?;
|
||||
let b = match out {
|
||||
Some(v) => v,
|
||||
None => {
|
||||
return Ok(None);
|
||||
}
|
||||
};
|
||||
|
||||
let obj = serde_json::from_slice(&b)?;
|
||||
Ok(Some(obj))
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self, value), ret, err)]
|
||||
@ -195,6 +240,16 @@ impl ProtectedStore {
|
||||
|
||||
#[instrument(level = "trace", skip(self), ret, err)]
|
||||
pub async fn remove_user_secret(&self, key: &str) -> EyreResult<bool> {
|
||||
self.remove_user_secret_string(key).await
|
||||
let inner = self.inner.lock();
|
||||
match inner
|
||||
.keyring_manager
|
||||
.as_ref()
|
||||
.ok_or_else(|| eyre!("Protected store not initialized"))?
|
||||
.with_keyring(&self.service_name(), key, |kr| kr.delete_value())
|
||||
{
|
||||
Ok(_) => Ok(true),
|
||||
Err(KeyringError::NoPasswordFound) => Ok(false),
|
||||
Err(e) => Err(eyre!("Failed to remove user secret: {}", e)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -8,6 +8,8 @@ struct TableStoreInner {
|
||||
opened: BTreeMap<String, Weak<Mutex<TableDBInner>>>,
|
||||
}
|
||||
|
||||
/// Veilid Table Storage
|
||||
/// Database for storing key value pairs persistently across runs
|
||||
#[derive(Clone)]
|
||||
pub struct TableStore {
|
||||
config: VeilidConfig,
|
||||
@ -20,31 +22,38 @@ impl TableStore {
|
||||
opened: BTreeMap::new(),
|
||||
}
|
||||
}
|
||||
pub fn new(config: VeilidConfig) -> Self {
|
||||
pub(crate) fn new(config: VeilidConfig) -> Self {
|
||||
Self {
|
||||
config,
|
||||
inner: Arc::new(Mutex::new(Self::new_inner())),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn delete_all(&self) -> EyreResult<()> {
|
||||
// Delete all known keys
|
||||
self.delete("crypto_caches").await?;
|
||||
/// Delete all known tables
|
||||
pub async fn delete_all(&self) {
|
||||
if let Err(e) = self.delete("crypto_caches").await {
|
||||
error!("failed to delete 'crypto_caches': {}", e);
|
||||
}
|
||||
if let Err(e) = self.delete("RouteSpecStore").await {
|
||||
error!("failed to delete 'RouteSpecStore': {}", e);
|
||||
}
|
||||
if let Err(e) = self.delete("routing_table").await {
|
||||
error!("failed to delete 'routing_table': {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn init(&self) -> EyreResult<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn init(&self) -> EyreResult<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn terminate(&self) {
|
||||
pub(crate) async fn terminate(&self) {
|
||||
assert!(
|
||||
self.inner.lock().opened.is_empty(),
|
||||
"all open databases should have been closed"
|
||||
);
|
||||
}
|
||||
|
||||
pub fn on_table_db_drop(&self, table: String) {
|
||||
pub(crate) fn on_table_db_drop(&self, table: String) {
|
||||
let mut inner = self.inner.lock();
|
||||
if inner.opened.remove(&table).is_none() {
|
||||
unreachable!("should have removed an item");
|
||||
@ -82,6 +91,8 @@ impl TableStore {
|
||||
})
|
||||
}
|
||||
|
||||
/// Get or create a TableDB database table. If the column count is greater than an
|
||||
/// existing TableDB's column count, the database will be upgraded to add the missing columns
|
||||
pub async fn open(&self, name: &str, column_count: u32) -> EyreResult<TableDB> {
|
||||
let table_name = self.get_table_name(name)?;
|
||||
|
||||
@ -121,6 +132,7 @@ impl TableStore {
|
||||
Ok(table_db)
|
||||
}
|
||||
|
||||
/// Delete a TableDB table by name
|
||||
pub async fn delete(&self, name: &str) -> EyreResult<bool> {
|
||||
let table_name = self.get_table_name(name)?;
|
||||
|
||||
|
@ -1,12 +1,14 @@
|
||||
use crate::xx::*;
|
||||
use crate::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use rkyv::{Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize};
|
||||
|
||||
cfg_if! {
|
||||
if #[cfg(target_arch = "wasm32")] {
|
||||
use keyvaluedb_web::*;
|
||||
use keyvaluedb::*;
|
||||
} else {
|
||||
use keyvaluedb_sqlite::*;
|
||||
use keyvaluedb::*;
|
||||
}
|
||||
}
|
||||
|
||||
@ -28,7 +30,7 @@ pub struct TableDB {
|
||||
}
|
||||
|
||||
impl TableDB {
|
||||
pub fn new(table: String, table_store: TableStore, database: Database) -> Self {
|
||||
pub(super) fn new(table: String, table_store: TableStore, database: Database) -> Self {
|
||||
Self {
|
||||
inner: Arc::new(Mutex::new(TableDBInner {
|
||||
table,
|
||||
@ -38,22 +40,24 @@ impl TableDB {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn try_new_from_weak_inner(weak_inner: Weak<Mutex<TableDBInner>>) -> Option<Self> {
|
||||
pub(super) fn try_new_from_weak_inner(weak_inner: Weak<Mutex<TableDBInner>>) -> Option<Self> {
|
||||
weak_inner.upgrade().map(|table_db_inner| Self {
|
||||
inner: table_db_inner,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn weak_inner(&self) -> Weak<Mutex<TableDBInner>> {
|
||||
pub(super) fn weak_inner(&self) -> Weak<Mutex<TableDBInner>> {
|
||||
Arc::downgrade(&self.inner)
|
||||
}
|
||||
|
||||
pub async fn get_column_count(&self) -> EyreResult<u32> {
|
||||
/// Get the total number of columns in the TableDB
|
||||
pub fn get_column_count(&self) -> EyreResult<u32> {
|
||||
let db = &self.inner.lock().database;
|
||||
db.num_columns().wrap_err("failed to get column count: {}")
|
||||
}
|
||||
|
||||
pub async fn get_keys(&self, col: u32) -> EyreResult<Vec<Box<[u8]>>> {
|
||||
/// Get the list of keys in a column of the TableDB
|
||||
pub fn get_keys(&self, col: u32) -> EyreResult<Vec<Box<[u8]>>> {
|
||||
let db = &self.inner.lock().database;
|
||||
let mut out: Vec<Box<[u8]>> = Vec::new();
|
||||
db.iter(col, None, &mut |kv| {
|
||||
@ -64,18 +68,29 @@ impl TableDB {
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
pub async fn store(&self, col: u32, key: &[u8], value: &[u8]) -> EyreResult<()> {
|
||||
/// Start a TableDB write transaction. The transaction object must be committed or rolled back before dropping.
|
||||
pub fn transact<'a>(&'a self) -> TableDBTransaction<'a> {
|
||||
let dbt = {
|
||||
let db = &self.inner.lock().database;
|
||||
db.transaction()
|
||||
};
|
||||
TableDBTransaction::new(self, dbt)
|
||||
}
|
||||
|
||||
/// Store a key with a value in a column in the TableDB. Performs a single transaction immediately.
|
||||
pub fn store(&self, col: u32, key: &[u8], value: &[u8]) -> EyreResult<()> {
|
||||
let db = &self.inner.lock().database;
|
||||
let mut dbt = db.transaction();
|
||||
dbt.put(col, key, value);
|
||||
db.write(dbt).wrap_err("failed to store key")
|
||||
}
|
||||
|
||||
pub async fn store_cbor<T>(&self, col: u32, key: &[u8], value: &T) -> EyreResult<()>
|
||||
/// Store a key in rkyv format with a value in a column in the TableDB. Performs a single transaction immediately.
|
||||
pub fn store_rkyv<T>(&self, col: u32, key: &[u8], value: &T) -> EyreResult<()>
|
||||
where
|
||||
T: Serialize,
|
||||
T: RkyvSerialize<rkyv::ser::serializers::AllocSerializer<1024>>,
|
||||
{
|
||||
let v = serde_cbor::to_vec(value).wrap_err("couldn't store as CBOR")?;
|
||||
let v = to_rkyv(value)?;
|
||||
|
||||
let db = &self.inner.lock().database;
|
||||
let mut dbt = db.transaction();
|
||||
@ -83,14 +98,33 @@ impl TableDB {
|
||||
db.write(dbt).wrap_err("failed to store key")
|
||||
}
|
||||
|
||||
pub async fn load(&self, col: u32, key: &[u8]) -> EyreResult<Option<Vec<u8>>> {
|
||||
/// Store a key in json format with a value in a column in the TableDB. Performs a single transaction immediately.
|
||||
pub fn store_json<T>(&self, col: u32, key: &[u8], value: &T) -> EyreResult<()>
|
||||
where
|
||||
T: serde::Serialize,
|
||||
{
|
||||
let v = serde_json::to_vec(value)?;
|
||||
|
||||
let db = &self.inner.lock().database;
|
||||
let mut dbt = db.transaction();
|
||||
dbt.put(col, key, v.as_slice());
|
||||
db.write(dbt).wrap_err("failed to store key")
|
||||
}
|
||||
|
||||
/// Read a key from a column in the TableDB immediately.
|
||||
pub fn load(&self, col: u32, key: &[u8]) -> EyreResult<Option<Vec<u8>>> {
|
||||
let db = &self.inner.lock().database;
|
||||
db.get(col, key).wrap_err("failed to get key")
|
||||
}
|
||||
|
||||
pub async fn load_cbor<T>(&self, col: u32, key: &[u8]) -> EyreResult<Option<T>>
|
||||
/// Read an rkyv key from a column in the TableDB immediately
|
||||
pub fn load_rkyv<T>(&self, col: u32, key: &[u8]) -> EyreResult<Option<T>>
|
||||
where
|
||||
T: for<'de> Deserialize<'de>,
|
||||
T: RkyvArchive,
|
||||
<T as RkyvArchive>::Archived:
|
||||
for<'t> bytecheck::CheckBytes<rkyv::validation::validators::DefaultValidator<'t>>,
|
||||
<T as RkyvArchive>::Archived:
|
||||
RkyvDeserialize<T, rkyv::de::deserializers::SharedDeserializeMap>,
|
||||
{
|
||||
let db = &self.inner.lock().database;
|
||||
let out = db.get(col, key).wrap_err("failed to get key")?;
|
||||
@ -100,11 +134,29 @@ impl TableDB {
|
||||
return Ok(None);
|
||||
}
|
||||
};
|
||||
let obj = serde_cbor::from_slice::<T>(&b).wrap_err("failed to deserialize")?;
|
||||
let obj = from_rkyv(b)?;
|
||||
Ok(Some(obj))
|
||||
}
|
||||
|
||||
pub async fn delete(&self, col: u32, key: &[u8]) -> EyreResult<bool> {
|
||||
/// Read an serde-json key from a column in the TableDB immediately
|
||||
pub fn load_json<T>(&self, col: u32, key: &[u8]) -> EyreResult<Option<T>>
|
||||
where
|
||||
T: for<'de> serde::Deserialize<'de>,
|
||||
{
|
||||
let db = &self.inner.lock().database;
|
||||
let out = db.get(col, key).wrap_err("failed to get key")?;
|
||||
let b = match out {
|
||||
Some(v) => v,
|
||||
None => {
|
||||
return Ok(None);
|
||||
}
|
||||
};
|
||||
let obj = serde_json::from_slice(&b)?;
|
||||
Ok(Some(obj))
|
||||
}
|
||||
|
||||
/// Delete key with from a column in the TableDB
|
||||
pub fn delete(&self, col: u32, key: &[u8]) -> EyreResult<bool> {
|
||||
let db = &self.inner.lock().database;
|
||||
let found = db.get(col, key).wrap_err("failed to get key")?;
|
||||
match found {
|
||||
@ -118,3 +170,76 @@ impl TableDB {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
/// A TableDB transaction
|
||||
/// Atomically commits a group of writes or deletes to the TableDB
|
||||
pub struct TableDBTransaction<'a> {
|
||||
db: &'a TableDB,
|
||||
dbt: Option<DBTransaction>,
|
||||
_phantom: core::marker::PhantomData<&'a ()>,
|
||||
}
|
||||
|
||||
impl<'a> TableDBTransaction<'a> {
|
||||
fn new(db: &'a TableDB, dbt: DBTransaction) -> Self {
|
||||
Self {
|
||||
db,
|
||||
dbt: Some(dbt),
|
||||
_phantom: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Commit the transaction. Performs all actions atomically.
|
||||
pub fn commit(mut self) -> EyreResult<()> {
|
||||
self.db
|
||||
.inner
|
||||
.lock()
|
||||
.database
|
||||
.write(self.dbt.take().unwrap())
|
||||
.wrap_err("commit failed")
|
||||
}
|
||||
|
||||
/// Rollback the transaction. Does nothing to the TableDB.
|
||||
pub fn rollback(mut self) {
|
||||
self.dbt = None;
|
||||
}
|
||||
|
||||
/// Store a key with a value in a column in the TableDB
|
||||
pub fn store(&mut self, col: u32, key: &[u8], value: &[u8]) {
|
||||
self.dbt.as_mut().unwrap().put(col, key, value);
|
||||
}
|
||||
|
||||
/// Store a key in rkyv format with a value in a column in the TableDB
|
||||
pub fn store_rkyv<T>(&mut self, col: u32, key: &[u8], value: &T) -> EyreResult<()>
|
||||
where
|
||||
T: RkyvSerialize<rkyv::ser::serializers::AllocSerializer<1024>>,
|
||||
{
|
||||
let v = to_rkyv(value)?;
|
||||
self.dbt.as_mut().unwrap().put(col, key, v.as_slice());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Store a key in rkyv format with a value in a column in the TableDB
|
||||
pub fn store_json<T>(&mut self, col: u32, key: &[u8], value: &T) -> EyreResult<()>
|
||||
where
|
||||
T: serde::Serialize,
|
||||
{
|
||||
let v = serde_json::to_vec(value)?;
|
||||
self.dbt.as_mut().unwrap().put(col, key, v.as_slice());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Delete key with from a column in the TableDB
|
||||
pub fn delete(&mut self, col: u32, key: &[u8]) {
|
||||
self.dbt.as_mut().unwrap().delete(col, key);
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Drop for TableDBTransaction<'a> {
|
||||
fn drop(&mut self) {
|
||||
if self.dbt.is_some() {
|
||||
warn!("Dropped transaction without commit or rollback");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -3,52 +3,45 @@ use crate::xx::*;
|
||||
use crate::*;
|
||||
use data_encoding::BASE64URL_NOPAD;
|
||||
use js_sys::*;
|
||||
use send_wrapper::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use wasm_bindgen_futures::*;
|
||||
use web_sys::*;
|
||||
|
||||
#[wasm_bindgen]
|
||||
extern "C" {
|
||||
#[wasm_bindgen(catch, js_name = setPassword, js_namespace = ["global", "wasmhost", "keytar"])]
|
||||
fn keytar_setPassword(service: &str, account: &str, password: &str)
|
||||
-> Result<Promise, JsValue>;
|
||||
#[wasm_bindgen(catch, js_name = getPassword, js_namespace = ["global", "wasmhost", "keytar"])]
|
||||
fn keytar_getPassword(service: &str, account: &str) -> Result<Promise, JsValue>;
|
||||
#[wasm_bindgen(catch, js_name = deletePassword, js_namespace = ["global", "wasmhost", "keytar"])]
|
||||
fn keytar_deletePassword(service: &str, account: &str) -> Result<Promise, JsValue>;
|
||||
}
|
||||
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct ProtectedStore {
|
||||
config: VeilidConfig,
|
||||
}
|
||||
|
||||
impl ProtectedStore {
|
||||
|
||||
pub fn new(config: VeilidConfig) -> Self {
|
||||
Self {
|
||||
config,
|
||||
}
|
||||
Self { config }
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self), err)]
|
||||
pub async fn delete_all(&self) -> EyreResult<()> {
|
||||
// Delete all known keys
|
||||
if self.remove_user_secret_string("node_id").await? {
|
||||
if self.remove_user_secret("node_id").await? {
|
||||
debug!("deleted protected_store key 'node_id'");
|
||||
}
|
||||
if self.remove_user_secret_string("node_id_secret").await? {
|
||||
if self.remove_user_secret("node_id_secret").await? {
|
||||
debug!("deleted protected_store key 'node_id_secret'");
|
||||
}
|
||||
if self.remove_user_secret_string("_test_key").await? {
|
||||
if self.remove_user_secret("_test_key").await? {
|
||||
debug!("deleted protected_store key '_test_key'");
|
||||
}
|
||||
if self.remove_user_secret("RouteSpecStore").await? {
|
||||
debug!("deleted protected_store key 'RouteSpecStore'");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self), err)]
|
||||
pub async fn init(&self) -> EyreResult<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub async fn terminate(&self) {}
|
||||
|
||||
fn keyring_name(&self) -> String {
|
||||
@ -69,32 +62,9 @@ impl ProtectedStore {
|
||||
}
|
||||
}
|
||||
|
||||
//#[instrument(level = "trace", skip(self, value), ret, err)]
|
||||
pub async fn save_user_secret_string(&self, key: &str, value: &str) -> EyreResult<bool> {
|
||||
if is_nodejs() {
|
||||
let prev = match JsFuture::from(
|
||||
keytar_getPassword(self.keyring_name().as_str(), key)
|
||||
.map_err(map_jsvalue_error)
|
||||
.wrap_err("exception thrown")?,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(v) => v.is_truthy(),
|
||||
Err(_) => false,
|
||||
};
|
||||
|
||||
match JsFuture::from(
|
||||
keytar_setPassword(self.keyring_name().as_str(), key, value)
|
||||
.map_err(map_jsvalue_error)
|
||||
.wrap_err("exception thrown")?,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(_) => {}
|
||||
Err(_) => bail!("Failed to set password"),
|
||||
}
|
||||
|
||||
Ok(prev)
|
||||
} else if is_browser() {
|
||||
if is_browser() {
|
||||
let win = match window() {
|
||||
Some(w) => w,
|
||||
None => {
|
||||
@ -134,25 +104,9 @@ impl ProtectedStore {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self), err)]
|
||||
pub async fn load_user_secret_string(&self, key: &str) -> EyreResult<Option<String>> {
|
||||
if is_nodejs() {
|
||||
let prev = match JsFuture::from(
|
||||
keytar_getPassword(self.keyring_name().as_str(), key)
|
||||
.map_err(map_jsvalue_error)
|
||||
.wrap_err("exception thrown")?,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(p) => p,
|
||||
Err(_) => JsValue::UNDEFINED,
|
||||
};
|
||||
|
||||
if prev.is_undefined() || prev.is_null() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
Ok(prev.as_string())
|
||||
} else if is_browser() {
|
||||
if is_browser() {
|
||||
let win = match window() {
|
||||
Some(w) => w,
|
||||
None => {
|
||||
@ -181,19 +135,78 @@ impl ProtectedStore {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn remove_user_secret_string(&self, key: &str) -> EyreResult<bool> {
|
||||
if is_nodejs() {
|
||||
match JsFuture::from(
|
||||
keytar_deletePassword(self.keyring_name().as_str(), key)
|
||||
.map_err(map_jsvalue_error)
|
||||
.wrap_err("exception thrown")?,
|
||||
)
|
||||
.await
|
||||
#[instrument(level = "trace", skip(self, value))]
|
||||
pub async fn save_user_secret_frozen<T>(&self, key: &str, value: &T) -> EyreResult<bool>
|
||||
where
|
||||
T: RkyvSerialize<rkyv::ser::serializers::AllocSerializer<1024>>,
|
||||
{
|
||||
Ok(v) => Ok(v.is_truthy()),
|
||||
Err(_) => bail!("Failed to delete"),
|
||||
let v = to_frozen(value)?;
|
||||
self.save_user_secret(&key, &v).await
|
||||
}
|
||||
} else if is_browser() {
|
||||
|
||||
#[instrument(level = "trace", skip(self))]
|
||||
pub async fn load_user_secret_frozen<T>(&self, key: &str) -> EyreResult<Option<T>>
|
||||
where
|
||||
T: RkyvArchive,
|
||||
<T as RkyvArchive>::Archived:
|
||||
for<'t> bytecheck::CheckBytes<rkyv::validation::validators::DefaultValidator<'t>>,
|
||||
<T as RkyvArchive>::Archived:
|
||||
rkyv::Deserialize<T, rkyv::de::deserializers::SharedDeserializeMap>,
|
||||
{
|
||||
let out = self.load_user_secret(key).await?;
|
||||
let b = match out {
|
||||
Some(v) => v,
|
||||
None => {
|
||||
return Ok(None);
|
||||
}
|
||||
};
|
||||
|
||||
let obj = from_frozen(&b)?;
|
||||
Ok(Some(obj))
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self, value), ret, err)]
|
||||
pub async fn save_user_secret(&self, key: &str, value: &[u8]) -> EyreResult<bool> {
|
||||
let mut s = BASE64URL_NOPAD.encode(value);
|
||||
s.push('!');
|
||||
|
||||
self.save_user_secret_string(key, s.as_str()).await
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self), err)]
|
||||
pub async fn load_user_secret(&self, key: &str) -> EyreResult<Option<Vec<u8>>> {
|
||||
let mut s = match self.load_user_secret_string(key).await? {
|
||||
Some(s) => s,
|
||||
None => {
|
||||
return Ok(None);
|
||||
}
|
||||
};
|
||||
|
||||
if s.pop() != Some('!') {
|
||||
bail!("User secret is not a buffer");
|
||||
}
|
||||
|
||||
let mut bytes = Vec::<u8>::new();
|
||||
let res = BASE64URL_NOPAD.decode_len(s.len());
|
||||
match res {
|
||||
Ok(l) => {
|
||||
bytes.resize(l, 0u8);
|
||||
}
|
||||
Err(_) => {
|
||||
bail!("Failed to decode");
|
||||
}
|
||||
}
|
||||
|
||||
let res = BASE64URL_NOPAD.decode_mut(s.as_bytes(), &mut bytes);
|
||||
match res {
|
||||
Ok(_) => Ok(Some(bytes)),
|
||||
Err(_) => bail!("Failed to decode"),
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self), ret, err)]
|
||||
pub async fn remove_user_secret(&self, key: &str) -> EyreResult<bool> {
|
||||
if is_browser() {
|
||||
let win = match window() {
|
||||
Some(w) => w,
|
||||
None => {
|
||||
@ -231,45 +244,4 @@ impl ProtectedStore {
|
||||
unimplemented!();
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn save_user_secret(&self, key: &str, value: &[u8]) -> EyreResult<bool> {
|
||||
let mut s = BASE64URL_NOPAD.encode(value);
|
||||
s.push('!');
|
||||
|
||||
self.save_user_secret_string(key, s.as_str()).await
|
||||
}
|
||||
|
||||
pub async fn load_user_secret(&self, key: &str) -> EyreResult<Option<Vec<u8>>> {
|
||||
let mut s = match self.load_user_secret_string(key).await? {
|
||||
Some(s) => s,
|
||||
None => {
|
||||
return Ok(None);
|
||||
}
|
||||
};
|
||||
|
||||
if s.pop() != Some('!') {
|
||||
bail!("User secret is not a buffer");
|
||||
}
|
||||
|
||||
let mut bytes = Vec::<u8>::new();
|
||||
let res = BASE64URL_NOPAD.decode_len(s.len());
|
||||
match res {
|
||||
Ok(l) => {
|
||||
bytes.resize(l, 0u8);
|
||||
}
|
||||
Err(_) => {
|
||||
bail!("Failed to decode");
|
||||
}
|
||||
}
|
||||
|
||||
let res = BASE64URL_NOPAD.decode_mut(s.as_bytes(), &mut bytes);
|
||||
match res {
|
||||
Ok(_) => Ok(Some(bytes)),
|
||||
Err(_) => bail!("Failed to decode"),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn remove_user_secret(&self, key: &str) -> EyreResult<bool> {
|
||||
self.remove_user_secret_string(key).await
|
||||
}
|
||||
}
|
@ -19,10 +19,8 @@ extern "C" {
|
||||
pub fn get_timestamp() -> u64 {
|
||||
if utils::is_browser() {
|
||||
return (Date::now() * 1000.0f64) as u64;
|
||||
} else if utils::is_nodejs() {
|
||||
return (Date::now() * 1000.0f64) as u64;
|
||||
} else {
|
||||
panic!("WASM requires browser or nodejs environment");
|
||||
panic!("WASM requires browser environment");
|
||||
}
|
||||
}
|
||||
|
||||
@ -85,18 +83,22 @@ pub fn spawn<Out>(future: impl Future<Output = Out> + Send + 'static) -> MustJoi
|
||||
where
|
||||
Out: Send + 'static,
|
||||
{
|
||||
MustJoinHandle::new(Bindgen
|
||||
MustJoinHandle::new(
|
||||
Bindgen
|
||||
.spawn_handle(future)
|
||||
.expect("wasm-bindgen-futures spawn should never error out"))
|
||||
.expect("wasm-bindgen-futures spawn should never error out"),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn spawn_local<Out>(future: impl Future<Output = Out> + 'static) -> MustJoinHandle<Out>
|
||||
where
|
||||
Out: 'static,
|
||||
{
|
||||
MustJoinHandle::new(Bindgen
|
||||
MustJoinHandle::new(
|
||||
Bindgen
|
||||
.spawn_handle_local(future)
|
||||
.expect("wasm-bindgen-futures spawn_local should never error out"))
|
||||
.expect("wasm-bindgen-futures spawn_local should never error out"),
|
||||
)
|
||||
}
|
||||
|
||||
// pub fn spawn_with_local_set<Out>(
|
||||
@ -114,10 +116,10 @@ where
|
||||
{
|
||||
Bindgen
|
||||
.spawn_handle_local(future)
|
||||
.expect("wasm-bindgen-futures spawn_local should never error out").detach()
|
||||
.expect("wasm-bindgen-futures spawn_local should never error out")
|
||||
.detach()
|
||||
}
|
||||
|
||||
|
||||
pub fn interval<F, FUT>(freq_ms: u32, callback: F) -> SendPinBoxFuture<()>
|
||||
where
|
||||
F: Fn() -> FUT + Send + Sync + 'static,
|
||||
@ -177,20 +179,12 @@ pub async fn get_outbound_relay_peer() -> Option<crate::veilid_api::PeerInfo> {
|
||||
// // .unwrap();
|
||||
// // });
|
||||
|
||||
// // JsFuture::from(promise).await.unwrap();
|
||||
// } else if utils::is_nodejs() {
|
||||
// // let promise = Promise::new(&mut |yes, _| {
|
||||
// // nodejs_global_set_timeout_with_callback_and_timeout_and_arguments_0(&yes, millis)
|
||||
// // .unwrap();
|
||||
// // });
|
||||
|
||||
// // JsFuture::from(promise).await.unwrap();
|
||||
// } else {
|
||||
// panic!("WASM requires browser or nodejs environment");
|
||||
// panic!("WASM requires browser environment");
|
||||
// }
|
||||
// }
|
||||
|
||||
|
||||
pub async fn txt_lookup<S: AsRef<str>>(_host: S) -> EyreResult<Vec<String>> {
|
||||
bail!("wasm does not support txt lookup")
|
||||
}
|
||||
|
@ -22,7 +22,7 @@ impl TableStore {
|
||||
opened: BTreeMap::new(),
|
||||
}
|
||||
}
|
||||
pub fn new(config: VeilidConfig) -> Self {
|
||||
pub(crate) fn new(config: VeilidConfig) -> Self {
|
||||
Self {
|
||||
config,
|
||||
inner: Arc::new(Mutex::new(Self::new_inner())),
|
||||
@ -30,12 +30,25 @@ impl TableStore {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn init(&self) -> EyreResult<()> {
|
||||
/// Delete all known tables
|
||||
pub async fn delete_all(&self) {
|
||||
if let Err(e) = self.delete("crypto_caches").await {
|
||||
error!("failed to delete 'crypto_caches': {}", e);
|
||||
}
|
||||
if let Err(e) = self.delete("RouteSpecStore").await {
|
||||
error!("failed to delete 'RouteSpecStore': {}", e);
|
||||
}
|
||||
if let Err(e) = self.delete("routing_table").await {
|
||||
error!("failed to delete 'routing_table': {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn init(&self) -> EyreResult<()> {
|
||||
let _async_guard = self.async_lock.lock().await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn terminate(&self) {
|
||||
pub(crate) async fn terminate(&self) {
|
||||
let _async_guard = self.async_lock.lock().await;
|
||||
assert!(
|
||||
self.inner.lock().opened.len() == 0,
|
||||
@ -43,7 +56,7 @@ impl TableStore {
|
||||
);
|
||||
}
|
||||
|
||||
pub fn on_table_db_drop(&self, table: String) {
|
||||
pub(crate) fn on_table_db_drop(&self, table: String) {
|
||||
let mut inner = self.inner.lock();
|
||||
match inner.opened.remove(&table) {
|
||||
Some(_) => (),
|
||||
@ -69,6 +82,8 @@ impl TableStore {
|
||||
})
|
||||
}
|
||||
|
||||
/// Get or create a TableDB database table. If the column count is greater than an
|
||||
/// existing TableDB's column count, the database will be upgraded to add the missing columns
|
||||
pub async fn open(&self, name: &str, column_count: u32) -> EyreResult<TableDB> {
|
||||
let _async_guard = self.async_lock.lock().await;
|
||||
let table_name = self.get_table_name(name)?;
|
||||
@ -89,7 +104,10 @@ impl TableStore {
|
||||
let db = Database::open(table_name.clone(), column_count)
|
||||
.await
|
||||
.wrap_err("failed to open tabledb")?;
|
||||
info!("opened table store '{}' with table name '{:?}' with {} columns", name, table_name, column_count);
|
||||
info!(
|
||||
"opened table store '{}' with table name '{:?}' with {} columns",
|
||||
name, table_name, column_count
|
||||
);
|
||||
|
||||
let table_db = TableDB::new(table_name.clone(), self.clone(), db);
|
||||
|
||||
@ -101,6 +119,7 @@ impl TableStore {
|
||||
Ok(table_db)
|
||||
}
|
||||
|
||||
/// Delete a TableDB table by name
|
||||
pub async fn delete(&self, name: &str) -> EyreResult<bool> {
|
||||
let _async_guard = self.async_lock.lock().await;
|
||||
trace!("TableStore::delete {}", name);
|
||||
@ -117,9 +136,7 @@ impl TableStore {
|
||||
}
|
||||
}
|
||||
|
||||
if utils::is_nodejs() {
|
||||
unimplemented!();
|
||||
} else if utils::is_browser() {
|
||||
if utils::is_browser() {
|
||||
let out = match Database::delete(table_name.clone()).await {
|
||||
Ok(_) => true,
|
||||
Err(_) => false,
|
||||
|
@ -15,21 +15,6 @@ extern "C" {
|
||||
pub fn alert(s: &str);
|
||||
}
|
||||
|
||||
pub fn is_nodejs() -> bool {
|
||||
static CACHE: AtomicI8 = AtomicI8::new(-1);
|
||||
let cache = CACHE.load(Ordering::Relaxed);
|
||||
if cache != -1 {
|
||||
return cache != 0;
|
||||
}
|
||||
|
||||
let res = js_sys::eval("process.release.name === 'node'")
|
||||
.map(|res| res.is_truthy())
|
||||
.unwrap_or_default();
|
||||
|
||||
CACHE.store(res as i8, Ordering::Relaxed);
|
||||
res
|
||||
}
|
||||
|
||||
pub fn is_browser() -> bool {
|
||||
static CACHE: AtomicI8 = AtomicI8::new(-1);
|
||||
let cache = CACHE.load(Ordering::Relaxed);
|
||||
@ -60,24 +45,6 @@ pub fn is_browser() -> bool {
|
||||
// res
|
||||
// }
|
||||
|
||||
// pub fn node_require(module: &str) -> JsValue {
|
||||
// if !is_nodejs() {
|
||||
// return JsValue::UNDEFINED;
|
||||
// }
|
||||
|
||||
// let mut home = env!("CARGO_MANIFEST_DIR");
|
||||
// if home.len() == 0 {
|
||||
// home = ".";
|
||||
// }
|
||||
|
||||
// match js_sys::eval(format!("require(\"{}/{}\")", home, module).as_str()) {
|
||||
// Ok(v) => v,
|
||||
// Err(e) => {
|
||||
// panic!("node_require failed: {:?}", e);
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
#[derive(ThisError, Debug, Clone, Eq, PartialEq)]
|
||||
#[error("JsValue error")]
|
||||
pub struct JsValueError(String);
|
||||
|
@ -22,7 +22,7 @@ mod api_tracing_layer;
|
||||
mod attachment_manager;
|
||||
mod callback_state_machine;
|
||||
mod core_context;
|
||||
mod dht;
|
||||
mod crypto;
|
||||
mod intf;
|
||||
mod network_manager;
|
||||
mod receipt_manager;
|
||||
@ -64,12 +64,14 @@ pub fn veilid_version() -> (u32, u32, u32) {
|
||||
#[cfg(target_os = "android")]
|
||||
pub use intf::utils::android::{veilid_core_setup_android, veilid_core_setup_android_no_log};
|
||||
|
||||
pub static DEFAULT_LOG_IGNORE_LIST: [&str; 19] = [
|
||||
pub static DEFAULT_LOG_IGNORE_LIST: [&str; 21] = [
|
||||
"mio",
|
||||
"h2",
|
||||
"hyper",
|
||||
"tower",
|
||||
"tonic",
|
||||
"tokio",
|
||||
"runtime",
|
||||
"tokio_util",
|
||||
"want",
|
||||
"serial_test",
|
||||
|
@ -4,7 +4,7 @@ use super::*;
|
||||
pub struct ConnectionHandle {
|
||||
id: u64,
|
||||
descriptor: ConnectionDescriptor,
|
||||
channel: flume::Sender<Vec<u8>>,
|
||||
channel: flume::Sender<(Option<Id>, Vec<u8>)>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
@ -17,7 +17,7 @@ impl ConnectionHandle {
|
||||
pub(super) fn new(
|
||||
id: u64,
|
||||
descriptor: ConnectionDescriptor,
|
||||
channel: flume::Sender<Vec<u8>>,
|
||||
channel: flume::Sender<(Option<Id>, Vec<u8>)>,
|
||||
) -> Self {
|
||||
Self {
|
||||
id,
|
||||
@ -34,16 +34,22 @@ impl ConnectionHandle {
|
||||
self.descriptor.clone()
|
||||
}
|
||||
|
||||
#[instrument(level="trace", skip(self, message), fields(message.len = message.len()))]
|
||||
pub fn send(&self, message: Vec<u8>) -> ConnectionHandleSendResult {
|
||||
match self.channel.send(message) {
|
||||
match self.channel.send((Span::current().id(), message)) {
|
||||
Ok(()) => ConnectionHandleSendResult::Sent,
|
||||
Err(e) => ConnectionHandleSendResult::NotSent(e.0),
|
||||
Err(e) => ConnectionHandleSendResult::NotSent(e.0 .1),
|
||||
}
|
||||
}
|
||||
#[instrument(level="trace", skip(self, message), fields(message.len = message.len()))]
|
||||
pub async fn send_async(&self, message: Vec<u8>) -> ConnectionHandleSendResult {
|
||||
match self.channel.send_async(message).await {
|
||||
match self
|
||||
.channel
|
||||
.send_async((Span::current().id(), message))
|
||||
.await
|
||||
{
|
||||
Ok(()) => ConnectionHandleSendResult::Sent,
|
||||
Err(e) => ConnectionHandleSendResult::NotSent(e.0),
|
||||
Err(e) => ConnectionHandleSendResult::NotSent(e.0 .1),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -26,6 +26,7 @@ struct ConnectionManagerArc {
|
||||
connection_initial_timeout_ms: u32,
|
||||
connection_inactivity_timeout_ms: u32,
|
||||
connection_table: ConnectionTable,
|
||||
address_lock_table: AsyncTagLockTable<SocketAddr>,
|
||||
inner: Mutex<Option<ConnectionManagerInner>>,
|
||||
}
|
||||
impl core::fmt::Debug for ConnectionManagerArc {
|
||||
@ -69,6 +70,7 @@ impl ConnectionManager {
|
||||
connection_initial_timeout_ms,
|
||||
connection_inactivity_timeout_ms,
|
||||
connection_table: ConnectionTable::new(config),
|
||||
address_lock_table: AsyncTagLockTable::new(),
|
||||
inner: Mutex::new(None),
|
||||
}
|
||||
}
|
||||
@ -140,6 +142,7 @@ impl ConnectionManager {
|
||||
// Internal routine to register new connection atomically.
|
||||
// Registers connection in the connection table for later access
|
||||
// and spawns a message processing loop for the connection
|
||||
#[instrument(level = "trace", skip(self, inner), ret, err)]
|
||||
fn on_new_protocol_network_connection(
|
||||
&self,
|
||||
inner: &mut ConnectionManagerInner,
|
||||
@ -195,6 +198,7 @@ impl ConnectionManager {
|
||||
}
|
||||
|
||||
// Returns a network connection if one already is established
|
||||
//#[instrument(level = "trace", skip(self), ret)]
|
||||
pub fn get_connection(&self, descriptor: ConnectionDescriptor) -> Option<ConnectionHandle> {
|
||||
self.arc
|
||||
.connection_table
|
||||
@ -228,21 +232,29 @@ impl ConnectionManager {
|
||||
});
|
||||
// Wait for the killed connections to end their recv loops
|
||||
let did_kill = !killed.is_empty();
|
||||
for k in killed {
|
||||
for mut k in killed {
|
||||
k.close();
|
||||
k.await;
|
||||
}
|
||||
did_kill
|
||||
}
|
||||
|
||||
// Called when we want to create a new connection or get the current one that already exists
|
||||
// This will kill off any connections that are in conflict with the new connection to be made
|
||||
// in order to make room for the new connection in the system's connection table
|
||||
// This routine needs to be atomic, or connections may exist in the table that are not established
|
||||
/// Called when we want to create a new connection or get the current one that already exists
|
||||
/// This will kill off any connections that are in conflict with the new connection to be made
|
||||
/// in order to make room for the new connection in the system's connection table
|
||||
/// This routine needs to be atomic, or connections may exist in the table that are not established
|
||||
#[instrument(level = "trace", skip(self), ret, err)]
|
||||
pub async fn get_or_create_connection(
|
||||
&self,
|
||||
local_addr: Option<SocketAddr>,
|
||||
dial_info: DialInfo,
|
||||
) -> EyreResult<NetworkResult<ConnectionHandle>> {
|
||||
// Async lock on the remote address for atomicity per remote
|
||||
let peer_address = dial_info.to_peer_address();
|
||||
let remote_addr = peer_address.to_socket_addr();
|
||||
|
||||
let _lock_guard = self.arc.address_lock_table.lock_tag(remote_addr).await;
|
||||
|
||||
log_net!(
|
||||
"== get_or_create_connection local_addr={:?} dial_info={:?}",
|
||||
local_addr.green(),
|
||||
@ -253,21 +265,12 @@ impl ConnectionManager {
|
||||
let did_kill = self.kill_off_colliding_connections(&dial_info).await;
|
||||
let mut retry_count = if did_kill { 2 } else { 0 };
|
||||
|
||||
// Make a connection descriptor for this dialinfo
|
||||
let peer_address = dial_info.to_peer_address();
|
||||
let descriptor = match local_addr {
|
||||
Some(la) => {
|
||||
ConnectionDescriptor::new(peer_address, SocketAddress::from_socket_addr(la))
|
||||
}
|
||||
None => ConnectionDescriptor::new_no_local(peer_address),
|
||||
};
|
||||
|
||||
// If any connection to this remote exists that has the same protocol, return it
|
||||
// Any connection will do, we don't have to match the local address
|
||||
if let Some(conn) = self
|
||||
.arc
|
||||
.connection_table
|
||||
.get_last_connection_by_remote(descriptor.remote())
|
||||
.get_last_connection_by_remote(peer_address)
|
||||
{
|
||||
log_net!(
|
||||
"== Returning existing connection local_addr={:?} peer_address={:?}",
|
||||
@ -288,6 +291,23 @@ impl ConnectionManager {
|
||||
.await;
|
||||
match result_net_res {
|
||||
Ok(net_res) => {
|
||||
// If the connection 'already exists', then try one last time to return a connection from the table, in case
|
||||
// an 'accept' happened at literally the same time as our connect
|
||||
if net_res.is_already_exists() {
|
||||
if let Some(conn) = self
|
||||
.arc
|
||||
.connection_table
|
||||
.get_last_connection_by_remote(peer_address)
|
||||
{
|
||||
log_net!(
|
||||
"== Returning existing connection in race local_addr={:?} peer_address={:?}",
|
||||
local_addr.green(),
|
||||
peer_address.green()
|
||||
);
|
||||
|
||||
return Ok(NetworkResult::Value(conn));
|
||||
}
|
||||
}
|
||||
if net_res.is_value() || retry_count == 0 {
|
||||
break net_res;
|
||||
}
|
||||
@ -351,7 +371,7 @@ impl ConnectionManager {
|
||||
|
||||
// Called by low-level network when any connection-oriented protocol connection appears
|
||||
// either from incoming connections.
|
||||
#[cfg_attr(target_os = "wasm32", allow(dead_code))]
|
||||
#[cfg_attr(target_arch = "wasm32", allow(dead_code))]
|
||||
pub(super) async fn on_accepted_protocol_network_connection(
|
||||
&self,
|
||||
protocol_connection: ProtocolNetworkConnection,
|
||||
@ -378,6 +398,7 @@ impl ConnectionManager {
|
||||
|
||||
// Callback from network connection receive loop when it exits
|
||||
// cleans up the entry in the connection table
|
||||
#[instrument(level = "trace", skip(self))]
|
||||
pub(super) async fn report_connection_finished(&self, connection_id: u64) {
|
||||
// Get channel sender
|
||||
let sender = {
|
||||
|
@ -72,6 +72,7 @@ impl ConnectionTable {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self))]
|
||||
pub async fn join(&self) {
|
||||
let mut unord = {
|
||||
let mut inner = self.inner.lock();
|
||||
@ -90,6 +91,7 @@ impl ConnectionTable {
|
||||
while unord.next().await.is_some() {}
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self), ret, err)]
|
||||
pub fn add_connection(
|
||||
&self,
|
||||
network_connection: NetworkConnection,
|
||||
@ -142,7 +144,7 @@ impl ConnectionTable {
|
||||
let mut out_conn = None;
|
||||
if inner.conn_by_id[protocol_index].len() > inner.max_connections[protocol_index] {
|
||||
if let Some((lruk, lru_conn)) = inner.conn_by_id[protocol_index].remove_lru() {
|
||||
debug!("connection lru out: {:?}", lru_conn);
|
||||
log_net!(debug "connection lru out: {:?}", lru_conn);
|
||||
out_conn = Some(lru_conn);
|
||||
Self::remove_connection_records(&mut *inner, lruk);
|
||||
}
|
||||
@ -156,6 +158,8 @@ impl ConnectionTable {
|
||||
Ok(out_conn)
|
||||
}
|
||||
|
||||
//#[instrument(level = "trace", skip(self), ret)]
|
||||
#[allow(dead_code)]
|
||||
pub fn get_connection_by_id(&self, id: NetworkConnectionId) -> Option<ConnectionHandle> {
|
||||
let mut inner = self.inner.lock();
|
||||
let protocol_index = *inner.protocol_index_by_id.get(&id)?;
|
||||
@ -163,6 +167,7 @@ impl ConnectionTable {
|
||||
Some(out.get_handle())
|
||||
}
|
||||
|
||||
//#[instrument(level = "trace", skip(self), ret)]
|
||||
pub fn get_connection_by_descriptor(
|
||||
&self,
|
||||
descriptor: ConnectionDescriptor,
|
||||
@ -175,6 +180,7 @@ impl ConnectionTable {
|
||||
Some(out.get_handle())
|
||||
}
|
||||
|
||||
//#[instrument(level = "trace", skip(self), ret)]
|
||||
pub fn get_last_connection_by_remote(&self, remote: PeerAddress) -> Option<ConnectionHandle> {
|
||||
let mut inner = self.inner.lock();
|
||||
|
||||
@ -184,7 +190,9 @@ impl ConnectionTable {
|
||||
Some(out.get_handle())
|
||||
}
|
||||
|
||||
pub fn _get_connection_ids_by_remote(&self, remote: PeerAddress) -> Vec<NetworkConnectionId> {
|
||||
//#[instrument(level = "trace", skip(self), ret)]
|
||||
#[allow(dead_code)]
|
||||
pub fn get_connection_ids_by_remote(&self, remote: PeerAddress) -> Vec<NetworkConnectionId> {
|
||||
let inner = self.inner.lock();
|
||||
inner
|
||||
.ids_by_remote
|
||||
@ -219,6 +227,7 @@ impl ConnectionTable {
|
||||
inner.conn_by_id.iter().fold(0, |acc, c| acc + c.len())
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(inner), ret)]
|
||||
fn remove_connection_records(
|
||||
inner: &mut ConnectionTableInner,
|
||||
id: NetworkConnectionId,
|
||||
@ -251,6 +260,7 @@ impl ConnectionTable {
|
||||
conn
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self), ret)]
|
||||
pub fn remove_connection_by_id(&self, id: NetworkConnectionId) -> Option<NetworkConnection> {
|
||||
let mut inner = self.inner.lock();
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -40,11 +40,9 @@ struct NetworkInner {
|
||||
/// such as dhcp release or change of address or interfaces being added or removed
|
||||
network_needs_restart: bool,
|
||||
/// the calculated protocol configuration for inbound/outbound protocols
|
||||
protocol_config: Option<ProtocolConfig>,
|
||||
protocol_config: ProtocolConfig,
|
||||
/// set of statically configured protocols with public dialinfo
|
||||
static_public_dialinfo: ProtocolTypeSet,
|
||||
/// network class per routing domain
|
||||
network_class: [Option<NetworkClass>; RoutingDomain::count()],
|
||||
/// join handles for all the low level network background tasks
|
||||
join_handles: Vec<MustJoinHandle<()>>,
|
||||
/// stop source for shutting down the low level network background tasks
|
||||
@ -65,8 +63,6 @@ struct NetworkInner {
|
||||
enable_ipv6_local: bool,
|
||||
/// set if we need to calculate our public dial info again
|
||||
needs_public_dial_info_check: bool,
|
||||
/// set during the actual execution of the public dial info check to ensure we don't do it more than once
|
||||
doing_public_dial_info_check: bool,
|
||||
/// the punishment closure to enax
|
||||
public_dial_info_check_punishment: Option<Box<dyn FnOnce() + Send + 'static>>,
|
||||
/// udp socket record for bound-first sockets, which are used to guarantee a port is available before
|
||||
@ -118,11 +114,9 @@ impl Network {
|
||||
network_started: false,
|
||||
network_needs_restart: false,
|
||||
needs_public_dial_info_check: false,
|
||||
doing_public_dial_info_check: false,
|
||||
public_dial_info_check_punishment: None,
|
||||
protocol_config: None,
|
||||
protocol_config: Default::default(),
|
||||
static_public_dialinfo: ProtocolTypeSet::empty(),
|
||||
network_class: [None, None],
|
||||
join_handles: Vec::new(),
|
||||
stop_source: None,
|
||||
udp_port: 0u16,
|
||||
@ -462,8 +456,10 @@ impl Network {
|
||||
|
||||
// receive single response
|
||||
let mut out = vec![0u8; MAX_MESSAGE_SIZE];
|
||||
let (recv_len, recv_addr) =
|
||||
network_result_try!(timeout(timeout_ms, h.recv_message(&mut out))
|
||||
let (recv_len, recv_addr) = network_result_try!(timeout(
|
||||
timeout_ms,
|
||||
h.recv_message(&mut out).instrument(Span::current())
|
||||
)
|
||||
.await
|
||||
.into_network_result())
|
||||
.wrap_err("recv_message failure")?;
|
||||
@ -618,7 +614,7 @@ impl Network {
|
||||
|
||||
/////////////////////////////////////////////////////////////////
|
||||
|
||||
pub fn get_protocol_config(&self) -> Option<ProtocolConfig> {
|
||||
pub fn get_protocol_config(&self) -> ProtocolConfig {
|
||||
self.inner.lock().protocol_config
|
||||
}
|
||||
|
||||
@ -732,7 +728,8 @@ impl Network {
|
||||
family_local,
|
||||
}
|
||||
};
|
||||
inner.protocol_config = Some(protocol_config);
|
||||
inner.protocol_config = protocol_config;
|
||||
|
||||
protocol_config
|
||||
};
|
||||
|
||||
@ -769,27 +766,37 @@ impl Network {
|
||||
// that we have ports available to us
|
||||
self.free_bound_first_ports();
|
||||
|
||||
// If we have static public dialinfo, upgrade our network class
|
||||
// set up the routing table's network config
|
||||
// if we have static public dialinfo, upgrade our network class
|
||||
|
||||
editor_public_internet.setup_network(
|
||||
protocol_config.inbound,
|
||||
protocol_config.outbound,
|
||||
protocol_config.family_global,
|
||||
);
|
||||
editor_local_network.setup_network(
|
||||
protocol_config.inbound,
|
||||
protocol_config.outbound,
|
||||
protocol_config.family_local,
|
||||
);
|
||||
let detect_address_changes = {
|
||||
let c = self.config.get();
|
||||
c.network.detect_address_changes
|
||||
};
|
||||
|
||||
if !detect_address_changes {
|
||||
let mut inner = self.inner.lock();
|
||||
let inner = self.inner.lock();
|
||||
if !inner.static_public_dialinfo.is_empty() {
|
||||
inner.network_class[RoutingDomain::PublicInternet as usize] =
|
||||
Some(NetworkClass::InboundCapable);
|
||||
editor_public_internet.set_network_class(Some(NetworkClass::InboundCapable));
|
||||
}
|
||||
}
|
||||
|
||||
info!("network started");
|
||||
self.inner.lock().network_started = true;
|
||||
|
||||
// commit routing table edits
|
||||
editor_public_internet.commit().await;
|
||||
editor_local_network.commit().await;
|
||||
|
||||
info!("network started");
|
||||
self.inner.lock().network_started = true;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -861,21 +868,11 @@ impl Network {
|
||||
inner.public_dial_info_check_punishment = punishment;
|
||||
}
|
||||
|
||||
fn needs_public_dial_info_check(&self) -> bool {
|
||||
pub fn needs_public_dial_info_check(&self) -> bool {
|
||||
let inner = self.inner.lock();
|
||||
inner.needs_public_dial_info_check
|
||||
}
|
||||
|
||||
pub fn doing_public_dial_info_check(&self) -> bool {
|
||||
let inner = self.inner.lock();
|
||||
inner.doing_public_dial_info_check
|
||||
}
|
||||
|
||||
pub fn get_network_class(&self, routing_domain: RoutingDomain) -> Option<NetworkClass> {
|
||||
let inner = self.inner.lock();
|
||||
inner.network_class[routing_domain as usize]
|
||||
}
|
||||
|
||||
//////////////////////////////////////////
|
||||
|
||||
#[instrument(level = "trace", skip(self), err)]
|
||||
@ -937,6 +934,7 @@ impl Network {
|
||||
// If we need to figure out our network class, tick the task for it
|
||||
if detect_address_changes {
|
||||
let public_internet_network_class = self
|
||||
.routing_table()
|
||||
.get_network_class(RoutingDomain::PublicInternet)
|
||||
.unwrap_or(NetworkClass::Invalid);
|
||||
let needs_public_dial_info_check = self.needs_public_dial_info_check();
|
||||
|
@ -3,6 +3,10 @@ use futures_util::stream::FuturesUnordered;
|
||||
use futures_util::FutureExt;
|
||||
use stop_token::future::FutureExt as StopTokenFutureExt;
|
||||
|
||||
const PORT_MAP_VALIDATE_TRY_COUNT: usize = 3;
|
||||
const PORT_MAP_VALIDATE_DELAY_MS: u32 = 500;
|
||||
const PORT_MAP_TRY_COUNT: usize = 3;
|
||||
|
||||
struct DetectedPublicDialInfo {
|
||||
dial_info: DialInfo,
|
||||
class: DialInfoClass,
|
||||
@ -79,7 +83,7 @@ impl DiscoveryContext {
|
||||
async fn request_public_address(&self, node_ref: NodeRef) -> Option<SocketAddress> {
|
||||
let rpc = self.routing_table.rpc_processor();
|
||||
|
||||
let res = network_result_value_or_log!(debug match rpc.rpc_call_status(node_ref.clone()).await {
|
||||
let res = network_result_value_or_log!(debug match rpc.rpc_call_status(Destination::direct(node_ref.clone())).await {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
log_net!(error
|
||||
@ -98,7 +102,7 @@ impl DiscoveryContext {
|
||||
node_ref,
|
||||
res.answer
|
||||
);
|
||||
res.answer.socket_address
|
||||
res.answer.map(|si| si.socket_address)
|
||||
}
|
||||
|
||||
// find fast peers with a particular address type, and ask them to tell us what our external address is
|
||||
@ -125,22 +129,24 @@ impl DiscoveryContext {
|
||||
RoutingDomain::PublicInternet,
|
||||
dial_info_filter.clone(),
|
||||
);
|
||||
let disallow_relays_filter = move |e: &BucketEntryInner| {
|
||||
if let Some(n) = e.node_info(RoutingDomain::PublicInternet) {
|
||||
n.relay_peer_info.is_none()
|
||||
let disallow_relays_filter = Box::new(
|
||||
move |rti: &RoutingTableInner, _k: DHTKey, v: Option<Arc<BucketEntry>>| {
|
||||
let v = v.unwrap();
|
||||
v.with(rti, |_rti, e| {
|
||||
if let Some(n) = e.signed_node_info(RoutingDomain::PublicInternet) {
|
||||
n.relay_id().is_none()
|
||||
} else {
|
||||
false
|
||||
}
|
||||
};
|
||||
let filter = RoutingTable::combine_entry_filters(
|
||||
inbound_dial_info_entry_filter,
|
||||
disallow_relays_filter,
|
||||
);
|
||||
})
|
||||
},
|
||||
) as RoutingTableEntryFilter;
|
||||
let filters = VecDeque::from([inbound_dial_info_entry_filter, disallow_relays_filter]);
|
||||
|
||||
// Find public nodes matching this filter
|
||||
let peers = self
|
||||
.routing_table
|
||||
.find_fast_public_nodes_filtered(node_count, filter);
|
||||
.find_fast_public_nodes_filtered(node_count, filters);
|
||||
if peers.is_empty() {
|
||||
log_net!(
|
||||
"no external address detection peers of type {:?}:{:?}",
|
||||
@ -219,13 +225,7 @@ impl DiscoveryContext {
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self), ret)]
|
||||
async fn try_port_mapping(&self) -> Option<DialInfo> {
|
||||
let (enable_upnp, _enable_natpmp) = {
|
||||
let c = self.net.config.get();
|
||||
(c.network.upnp, c.network.natpmp)
|
||||
};
|
||||
|
||||
if enable_upnp {
|
||||
async fn try_upnp_port_mapping(&self) -> Option<DialInfo> {
|
||||
let (pt, llpt, at, external_address_1, node_1, local_port) = {
|
||||
let inner = self.inner.lock();
|
||||
let pt = inner.protocol_type.unwrap();
|
||||
@ -237,33 +237,80 @@ impl DiscoveryContext {
|
||||
(pt, llpt, at, external_address_1, node_1, local_port)
|
||||
};
|
||||
|
||||
if let Some(mapped_external_address) = self
|
||||
let mut tries = 0;
|
||||
loop {
|
||||
tries += 1;
|
||||
|
||||
// Attempt a port mapping. If this doesn't succeed, it's not going to
|
||||
let Some(mapped_external_address) = self
|
||||
.net
|
||||
.unlocked_inner
|
||||
.igd_manager
|
||||
.map_any_port(llpt, at, local_port, Some(external_address_1.to_ip_addr()))
|
||||
.await
|
||||
.await else
|
||||
{
|
||||
// make dial info from the port mapping
|
||||
let external_mapped_dial_info = self
|
||||
.make_dial_info(SocketAddress::from_socket_addr(mapped_external_address), pt);
|
||||
return None;
|
||||
};
|
||||
|
||||
// ensure people can reach us. if we're firewalled off, this is useless
|
||||
// Make dial info from the port mapping
|
||||
let external_mapped_dial_info =
|
||||
self.make_dial_info(SocketAddress::from_socket_addr(mapped_external_address), pt);
|
||||
|
||||
// Attempt to validate the port mapping
|
||||
let mut validate_tries = 0;
|
||||
loop {
|
||||
validate_tries += 1;
|
||||
|
||||
// Ensure people can reach us. If we're firewalled off, this is useless
|
||||
if self
|
||||
.validate_dial_info(node_1.clone(), external_mapped_dial_info.clone(), false)
|
||||
.await
|
||||
{
|
||||
return Some(external_mapped_dial_info);
|
||||
}
|
||||
|
||||
if validate_tries == PORT_MAP_VALIDATE_TRY_COUNT {
|
||||
log_net!(debug "UPNP port mapping succeeded but port {}/{} is still unreachable.\nretrying\n",
|
||||
local_port, match llpt {
|
||||
LowLevelProtocolType::UDP => "udp",
|
||||
LowLevelProtocolType::TCP => "tcp",
|
||||
});
|
||||
intf::sleep(PORT_MAP_VALIDATE_DELAY_MS).await
|
||||
} else {
|
||||
// release the mapping if we're still unreachable
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Release the mapping if we're still unreachable
|
||||
let _ = self
|
||||
.net
|
||||
.unlocked_inner
|
||||
.igd_manager
|
||||
.unmap_port(llpt, at, external_address_1.port())
|
||||
.await;
|
||||
|
||||
if tries == PORT_MAP_TRY_COUNT {
|
||||
warn!("UPNP port mapping succeeded but port {}/{} is still unreachable.\nYou may need to add a local firewall allowed port on this machine.\n",
|
||||
local_port, match llpt {
|
||||
LowLevelProtocolType::UDP => "udp",
|
||||
LowLevelProtocolType::TCP => "tcp",
|
||||
}
|
||||
);
|
||||
break;
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self), ret)]
|
||||
async fn try_port_mapping(&self) -> Option<DialInfo> {
|
||||
let (enable_upnp, _enable_natpmp) = {
|
||||
let c = self.net.config.get();
|
||||
(c.network.upnp, c.network.natpmp)
|
||||
};
|
||||
|
||||
if enable_upnp {
|
||||
return self.try_upnp_port_mapping().await;
|
||||
}
|
||||
|
||||
None
|
||||
@ -375,17 +422,6 @@ impl DiscoveryContext {
|
||||
// If we know we are behind NAT check what kind
|
||||
#[instrument(level = "trace", skip(self), ret, err)]
|
||||
pub async fn protocol_process_nat(&self) -> EyreResult<bool> {
|
||||
let (node_1, external_1_dial_info, external_1_address, protocol_type, address_type) = {
|
||||
let inner = self.inner.lock();
|
||||
(
|
||||
inner.node_1.as_ref().unwrap().clone(),
|
||||
inner.external_1_dial_info.as_ref().unwrap().clone(),
|
||||
inner.external_1_address.unwrap(),
|
||||
inner.protocol_type.unwrap(),
|
||||
inner.address_type.unwrap(),
|
||||
)
|
||||
};
|
||||
|
||||
// Attempt a port mapping via all available and enabled mechanisms
|
||||
// Try this before the direct mapping in the event that we are restarting
|
||||
// and may not have recorded a mapping created the last time
|
||||
@ -397,8 +433,30 @@ impl DiscoveryContext {
|
||||
// No more retries
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
// XXX: is this necessary?
|
||||
// Redo our external_1 dial info detection because a failed port mapping attempt
|
||||
// may cause it to become invalid
|
||||
// Get our external address from some fast node, call it node 1
|
||||
// if !self.protocol_get_external_address_1().await {
|
||||
// // If we couldn't get an external address, then we should just try the whole network class detection again later
|
||||
// return Ok(false);
|
||||
// }
|
||||
|
||||
// Get the external dial info for our use here
|
||||
let (node_1, external_1_dial_info, external_1_address, protocol_type, address_type) = {
|
||||
let inner = self.inner.lock();
|
||||
(
|
||||
inner.node_1.as_ref().unwrap().clone(),
|
||||
inner.external_1_dial_info.as_ref().unwrap().clone(),
|
||||
inner.external_1_address.unwrap(),
|
||||
inner.protocol_type.unwrap(),
|
||||
inner.address_type.unwrap(),
|
||||
)
|
||||
};
|
||||
|
||||
// Do a validate_dial_info on the external address from a redirected node
|
||||
else if self
|
||||
if self
|
||||
.validate_dial_info(node_1.clone(), external_1_dial_info.clone(), true)
|
||||
.await
|
||||
{
|
||||
@ -592,12 +650,14 @@ impl Network {
|
||||
_l: u64,
|
||||
_t: u64,
|
||||
) -> EyreResult<()> {
|
||||
let routing_table = self.routing_table();
|
||||
|
||||
// Figure out if we can optimize TCP/WS checking since they are often on the same port
|
||||
let (protocol_config, existing_network_class, tcp_same_port) = {
|
||||
let inner = self.inner.lock();
|
||||
let protocol_config = inner.protocol_config.unwrap_or_default();
|
||||
let protocol_config = inner.protocol_config;
|
||||
let existing_network_class =
|
||||
inner.network_class[RoutingDomain::PublicInternet as usize];
|
||||
routing_table.get_network_class(RoutingDomain::PublicInternet);
|
||||
let tcp_same_port = if protocol_config.inbound.contains(ProtocolType::TCP)
|
||||
&& protocol_config.inbound.contains(ProtocolType::WS)
|
||||
{
|
||||
@ -607,7 +667,6 @@ impl Network {
|
||||
};
|
||||
(protocol_config, existing_network_class, tcp_same_port)
|
||||
};
|
||||
let routing_table = self.routing_table();
|
||||
|
||||
// Process all protocol and address combinations
|
||||
let mut futures = FuturesUnordered::new();
|
||||
@ -628,6 +687,7 @@ impl Network {
|
||||
}
|
||||
Some(vec![udpv4_context])
|
||||
}
|
||||
.instrument(trace_span!("do_public_dial_info_check UDPv4"))
|
||||
.boxed(),
|
||||
);
|
||||
}
|
||||
@ -647,6 +707,7 @@ impl Network {
|
||||
}
|
||||
Some(vec![udpv6_context])
|
||||
}
|
||||
.instrument(trace_span!("do_public_dial_info_check UDPv6"))
|
||||
.boxed(),
|
||||
);
|
||||
}
|
||||
@ -669,6 +730,7 @@ impl Network {
|
||||
}
|
||||
Some(vec![tcpv4_context])
|
||||
}
|
||||
.instrument(trace_span!("do_public_dial_info_check TCPv4"))
|
||||
.boxed(),
|
||||
);
|
||||
}
|
||||
@ -688,6 +750,7 @@ impl Network {
|
||||
}
|
||||
Some(vec![wsv4_context])
|
||||
}
|
||||
.instrument(trace_span!("do_public_dial_info_check WSv4"))
|
||||
.boxed(),
|
||||
);
|
||||
}
|
||||
@ -710,6 +773,7 @@ impl Network {
|
||||
}
|
||||
Some(vec![tcpv6_context])
|
||||
}
|
||||
.instrument(trace_span!("do_public_dial_info_check TCPv6"))
|
||||
.boxed(),
|
||||
);
|
||||
}
|
||||
@ -729,6 +793,7 @@ impl Network {
|
||||
}
|
||||
Some(vec![wsv6_context])
|
||||
}
|
||||
.instrument(trace_span!("do_public_dial_info_check WSv6"))
|
||||
.boxed(),
|
||||
);
|
||||
}
|
||||
@ -825,17 +890,16 @@ impl Network {
|
||||
|
||||
// Is the network class different?
|
||||
if existing_network_class != new_network_class {
|
||||
self.inner.lock().network_class[RoutingDomain::PublicInternet as usize] =
|
||||
new_network_class;
|
||||
editor.set_network_class(new_network_class);
|
||||
changed = true;
|
||||
log_net!(debug "PublicInternet network class changed to {:?}", new_network_class);
|
||||
}
|
||||
} else if existing_network_class.is_some() {
|
||||
// Network class could not be determined
|
||||
editor.clear_dial_info_details();
|
||||
self.inner.lock().network_class[RoutingDomain::PublicInternet as usize] = None;
|
||||
editor.set_network_class(None);
|
||||
changed = true;
|
||||
log_net!(debug "network class cleared");
|
||||
log_net!(debug "PublicInternet network class cleared");
|
||||
}
|
||||
|
||||
// Punish nodes that told us our public address had changed when it didn't
|
||||
@ -857,15 +921,11 @@ impl Network {
|
||||
l: u64,
|
||||
t: u64,
|
||||
) -> EyreResult<()> {
|
||||
// Note that we are doing the public dial info check
|
||||
// We don't have to check this for concurrency, since this routine is run in a TickTask/SingleFuture
|
||||
self.inner.lock().doing_public_dial_info_check = true;
|
||||
|
||||
// Do the public dial info check
|
||||
let out = self.do_public_dial_info_check(stop_token, l, t).await;
|
||||
|
||||
// Done with public dial info check
|
||||
self.inner.lock().doing_public_dial_info_check = false;
|
||||
self.inner.lock().needs_public_dial_info_check = false;
|
||||
|
||||
out
|
||||
}
|
||||
|
@ -42,7 +42,8 @@ impl Network {
|
||||
&self,
|
||||
tls_acceptor: &TlsAcceptor,
|
||||
stream: AsyncPeekStream,
|
||||
addr: SocketAddr,
|
||||
peer_addr: SocketAddr,
|
||||
local_addr: SocketAddr,
|
||||
protocol_handlers: &[Box<dyn ProtocolAcceptHandler>],
|
||||
tls_connection_initial_timeout_ms: u32,
|
||||
) -> EyreResult<Option<ProtocolNetworkConnection>> {
|
||||
@ -65,18 +66,20 @@ impl Network {
|
||||
.wrap_err("tls initial timeout")?
|
||||
.wrap_err("failed to peek tls stream")?;
|
||||
|
||||
self.try_handlers(ps, addr, protocol_handlers).await
|
||||
self.try_handlers(ps, peer_addr, local_addr, protocol_handlers)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn try_handlers(
|
||||
&self,
|
||||
stream: AsyncPeekStream,
|
||||
addr: SocketAddr,
|
||||
peer_addr: SocketAddr,
|
||||
local_addr: SocketAddr,
|
||||
protocol_accept_handlers: &[Box<dyn ProtocolAcceptHandler>],
|
||||
) -> EyreResult<Option<ProtocolNetworkConnection>> {
|
||||
for ah in protocol_accept_handlers.iter() {
|
||||
if let Some(nc) = ah
|
||||
.on_accept(stream.clone(), addr)
|
||||
.on_accept(stream.clone(), peer_addr, local_addr)
|
||||
.await
|
||||
.wrap_err("io error")?
|
||||
{
|
||||
@ -105,21 +108,35 @@ impl Network {
|
||||
}
|
||||
};
|
||||
|
||||
// XXX
|
||||
// warn!(
|
||||
// "DEBUGACCEPT: local={} remote={}",
|
||||
// tcp_stream.local_addr().unwrap(),
|
||||
// tcp_stream.peer_addr().unwrap(),
|
||||
// );
|
||||
|
||||
let listener_state = listener_state.clone();
|
||||
let connection_manager = connection_manager.clone();
|
||||
|
||||
// Limit the number of connections from the same IP address
|
||||
// and the number of total connections
|
||||
let addr = match tcp_stream.peer_addr() {
|
||||
let peer_addr = match tcp_stream.peer_addr() {
|
||||
Ok(addr) => addr,
|
||||
Err(e) => {
|
||||
log_net!(debug "failed to get peer address: {}", e);
|
||||
return;
|
||||
}
|
||||
};
|
||||
let local_addr = match tcp_stream.local_addr() {
|
||||
Ok(addr) => addr,
|
||||
Err(e) => {
|
||||
log_net!(debug "failed to get local address: {}", e);
|
||||
return;
|
||||
}
|
||||
};
|
||||
// XXX limiting here instead for connection table? may be faster and avoids tls negotiation
|
||||
|
||||
log_net!("TCP connection from: {}", addr);
|
||||
log_net!("TCP connection from: {}", peer_addr);
|
||||
|
||||
// Create a stream we can peek on
|
||||
#[cfg(feature = "rt-tokio")]
|
||||
@ -139,7 +156,7 @@ impl Network {
|
||||
{
|
||||
// If we fail to get a packet within the connection initial timeout
|
||||
// then we punt this connection
|
||||
log_net!("connection initial timeout from: {:?}", addr);
|
||||
log_net!("connection initial timeout from: {:?}", peer_addr);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -152,29 +169,30 @@ impl Network {
|
||||
self.try_tls_handlers(
|
||||
ls.tls_acceptor.as_ref().unwrap(),
|
||||
ps,
|
||||
addr,
|
||||
peer_addr,
|
||||
local_addr,
|
||||
&ls.tls_protocol_handlers,
|
||||
tls_connection_initial_timeout_ms,
|
||||
)
|
||||
.await
|
||||
} else {
|
||||
self.try_handlers(ps, addr, &ls.protocol_accept_handlers)
|
||||
self.try_handlers(ps, peer_addr, local_addr, &ls.protocol_accept_handlers)
|
||||
.await
|
||||
};
|
||||
|
||||
let conn = match conn {
|
||||
Ok(Some(c)) => {
|
||||
log_net!("protocol handler found for {:?}: {:?}", addr, c);
|
||||
log_net!("protocol handler found for {:?}: {:?}", peer_addr, c);
|
||||
c
|
||||
}
|
||||
Ok(None) => {
|
||||
// No protocol handlers matched? drop it.
|
||||
log_net!(debug "no protocol handler for connection from {:?}", addr);
|
||||
log_net!(debug "no protocol handler for connection from {:?}", peer_addr);
|
||||
return;
|
||||
}
|
||||
Err(e) => {
|
||||
// Failed to negotiate connection? drop it.
|
||||
log_net!(debug "failed to negotiate connection from {:?}: {}", addr, e);
|
||||
log_net!(debug "failed to negotiate connection from {:?}: {}", peer_addr, e);
|
||||
return;
|
||||
}
|
||||
};
|
||||
@ -311,7 +329,6 @@ impl Network {
|
||||
.push(new_protocol_accept_handler(
|
||||
self.network_manager().config(),
|
||||
true,
|
||||
addr,
|
||||
));
|
||||
} else {
|
||||
ls.write()
|
||||
@ -319,7 +336,6 @@ impl Network {
|
||||
.push(new_protocol_accept_handler(
|
||||
self.network_manager().config(),
|
||||
false,
|
||||
addr,
|
||||
));
|
||||
}
|
||||
|
||||
|
@ -34,6 +34,7 @@ cfg_if! {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", ret, err)]
|
||||
pub fn new_unbound_shared_udp_socket(domain: Domain) -> io::Result<Socket> {
|
||||
let socket = Socket::new(domain, Type::DGRAM, Some(Protocol::UDP))?;
|
||||
if domain == Domain::IPV6 {
|
||||
@ -49,6 +50,7 @@ pub fn new_unbound_shared_udp_socket(domain: Domain) -> io::Result<Socket> {
|
||||
Ok(socket)
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", ret, err)]
|
||||
pub fn new_bound_shared_udp_socket(local_address: SocketAddr) -> io::Result<Socket> {
|
||||
let domain = Domain::for_address(local_address);
|
||||
let socket = new_unbound_shared_udp_socket(domain)?;
|
||||
@ -60,6 +62,7 @@ pub fn new_bound_shared_udp_socket(local_address: SocketAddr) -> io::Result<Sock
|
||||
Ok(socket)
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", ret, err)]
|
||||
pub fn new_bound_first_udp_socket(local_address: SocketAddr) -> io::Result<Socket> {
|
||||
let domain = Domain::for_address(local_address);
|
||||
let socket = Socket::new(domain, Type::DGRAM, Some(Protocol::UDP))?;
|
||||
@ -93,6 +96,7 @@ pub fn new_bound_first_udp_socket(local_address: SocketAddr) -> io::Result<Socke
|
||||
Ok(socket)
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", ret, err)]
|
||||
pub fn new_unbound_shared_tcp_socket(domain: Domain) -> io::Result<Socket> {
|
||||
let socket = Socket::new(domain, Type::STREAM, Some(Protocol::TCP))?;
|
||||
if let Err(e) = socket.set_linger(Some(core::time::Duration::from_secs(0))) {
|
||||
@ -114,6 +118,7 @@ pub fn new_unbound_shared_tcp_socket(domain: Domain) -> io::Result<Socket> {
|
||||
Ok(socket)
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", ret, err)]
|
||||
pub fn new_bound_shared_tcp_socket(local_address: SocketAddr) -> io::Result<Socket> {
|
||||
let domain = Domain::for_address(local_address);
|
||||
let socket = new_unbound_shared_tcp_socket(domain)?;
|
||||
@ -125,6 +130,7 @@ pub fn new_bound_shared_tcp_socket(local_address: SocketAddr) -> io::Result<Sock
|
||||
Ok(socket)
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", ret, err)]
|
||||
pub fn new_bound_first_tcp_socket(local_address: SocketAddr) -> io::Result<Socket> {
|
||||
let domain = Domain::for_address(local_address);
|
||||
|
||||
@ -166,6 +172,8 @@ pub fn new_bound_first_tcp_socket(local_address: SocketAddr) -> io::Result<Socke
|
||||
}
|
||||
|
||||
// Non-blocking connect is tricky when you want to start with a prepared socket
|
||||
// Errors should not be logged as they are valid conditions for this function
|
||||
#[instrument(level = "trace", ret)]
|
||||
pub async fn nonblocking_connect(
|
||||
socket: Socket,
|
||||
addr: SocketAddr,
|
||||
@ -185,7 +193,6 @@ pub async fn nonblocking_connect(
|
||||
Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => Ok(()),
|
||||
Err(e) => Err(e),
|
||||
}?;
|
||||
|
||||
let async_stream = Async::new(std::net::TcpStream::from(socket))?;
|
||||
|
||||
// The stream becomes writable when connected
|
||||
|
@ -87,11 +87,11 @@ impl RawTcpNetworkConnection {
|
||||
Ok(NetworkResult::Value(out))
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", err, skip(self), fields(network_result))]
|
||||
// #[instrument(level = "trace", err, skip(self), fields(network_result))]
|
||||
pub async fn recv(&self) -> io::Result<NetworkResult<Vec<u8>>> {
|
||||
let mut stream = self.stream.clone();
|
||||
let out = Self::recv_internal(&mut stream).await?;
|
||||
tracing::Span::current().record("network_result", &tracing::field::display(&out));
|
||||
//tracing::Span::current().record("network_result", &tracing::field::display(&out));
|
||||
Ok(out)
|
||||
}
|
||||
}
|
||||
@ -99,30 +99,20 @@ impl RawTcpNetworkConnection {
|
||||
///////////////////////////////////////////////////////////
|
||||
///
|
||||
|
||||
struct RawTcpProtocolHandlerInner {
|
||||
local_address: SocketAddr,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct RawTcpProtocolHandler
|
||||
where
|
||||
Self: ProtocolAcceptHandler,
|
||||
{
|
||||
connection_initial_timeout_ms: u32,
|
||||
inner: Arc<Mutex<RawTcpProtocolHandlerInner>>,
|
||||
}
|
||||
|
||||
impl RawTcpProtocolHandler {
|
||||
fn new_inner(local_address: SocketAddr) -> RawTcpProtocolHandlerInner {
|
||||
RawTcpProtocolHandlerInner { local_address }
|
||||
}
|
||||
|
||||
pub fn new(config: VeilidConfig, local_address: SocketAddr) -> Self {
|
||||
pub fn new(config: VeilidConfig) -> Self {
|
||||
let c = config.get();
|
||||
let connection_initial_timeout_ms = c.network.connection_initial_timeout_ms;
|
||||
Self {
|
||||
connection_initial_timeout_ms,
|
||||
inner: Arc::new(Mutex::new(Self::new_inner(local_address))),
|
||||
}
|
||||
}
|
||||
|
||||
@ -131,6 +121,7 @@ impl RawTcpProtocolHandler {
|
||||
self,
|
||||
ps: AsyncPeekStream,
|
||||
socket_addr: SocketAddr,
|
||||
local_addr: SocketAddr,
|
||||
) -> io::Result<Option<ProtocolNetworkConnection>> {
|
||||
log_net!("TCP: on_accept_async: enter");
|
||||
let mut peekbuf: [u8; PEEK_DETECT_LEN] = [0u8; PEEK_DETECT_LEN];
|
||||
@ -147,9 +138,8 @@ impl RawTcpProtocolHandler {
|
||||
SocketAddress::from_socket_addr(socket_addr),
|
||||
ProtocolType::TCP,
|
||||
);
|
||||
let local_address = self.inner.lock().local_address;
|
||||
let conn = ProtocolNetworkConnection::RawTcp(RawTcpNetworkConnection::new(
|
||||
ConnectionDescriptor::new(peer_addr, SocketAddress::from_socket_addr(local_address)),
|
||||
ConnectionDescriptor::new(peer_addr, SocketAddress::from_socket_addr(local_addr)),
|
||||
ps,
|
||||
));
|
||||
|
||||
@ -158,7 +148,7 @@ impl RawTcpProtocolHandler {
|
||||
Ok(Some(conn))
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", err)]
|
||||
#[instrument(level = "trace", ret, err)]
|
||||
pub async fn connect(
|
||||
local_address: Option<SocketAddr>,
|
||||
socket_addr: SocketAddr,
|
||||
@ -202,7 +192,8 @@ impl ProtocolAcceptHandler for RawTcpProtocolHandler {
|
||||
&self,
|
||||
stream: AsyncPeekStream,
|
||||
peer_addr: SocketAddr,
|
||||
local_addr: SocketAddr,
|
||||
) -> SendPinBoxFuture<io::Result<Option<ProtocolNetworkConnection>>> {
|
||||
Box::pin(self.clone().on_accept_async(stream, peer_addr))
|
||||
Box::pin(self.clone().on_accept_async(stream, peer_addr, local_addr))
|
||||
}
|
||||
}
|
||||
|
@ -11,7 +11,7 @@ impl RawUdpProtocolHandler {
|
||||
Self { socket }
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", err, skip(self, data), fields(data.len = data.len(), ret.len, ret.from))]
|
||||
// #[instrument(level = "trace", err, skip(self, data), fields(data.len = data.len(), ret.len, ret.descriptor))]
|
||||
pub async fn recv_message(&self, data: &mut [u8]) -> io::Result<(usize, ConnectionDescriptor)> {
|
||||
let (size, descriptor) = loop {
|
||||
let (size, remote_addr) = network_result_value_or_log!(debug self.socket.recv_from(data).await.into_network_result()? => continue);
|
||||
@ -33,12 +33,12 @@ impl RawUdpProtocolHandler {
|
||||
break (size, descriptor);
|
||||
};
|
||||
|
||||
tracing::Span::current().record("ret.len", &size);
|
||||
tracing::Span::current().record("ret.from", &format!("{:?}", descriptor).as_str());
|
||||
// tracing::Span::current().record("ret.len", &size);
|
||||
// tracing::Span::current().record("ret.descriptor", &format!("{:?}", descriptor).as_str());
|
||||
Ok((size, descriptor))
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", err, skip(self, data), fields(data.len = data.len(), ret.len, ret.from))]
|
||||
#[instrument(level = "trace", err, skip(self, data), fields(data.len = data.len(), ret.len, ret.descriptor))]
|
||||
pub async fn send_message(
|
||||
&self,
|
||||
data: Vec<u8>,
|
||||
@ -67,6 +67,8 @@ impl RawUdpProtocolHandler {
|
||||
bail_io_error_other!("UDP partial send")
|
||||
}
|
||||
|
||||
tracing::Span::current().record("ret.len", &len);
|
||||
tracing::Span::current().record("ret.descriptor", &format!("{:?}", descriptor).as_str());
|
||||
Ok(NetworkResult::value(descriptor))
|
||||
}
|
||||
|
||||
|
@ -93,7 +93,7 @@ where
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", err, skip(self), fields(network_result, ret.len))]
|
||||
// #[instrument(level = "trace", err, skip(self), fields(network_result, ret.len))]
|
||||
pub async fn recv(&self) -> io::Result<NetworkResult<Vec<u8>>> {
|
||||
let out = match self.stream.clone().next().await {
|
||||
Some(Ok(Message::Binary(v))) => {
|
||||
@ -120,7 +120,7 @@ where
|
||||
)),
|
||||
};
|
||||
|
||||
tracing::Span::current().record("network_result", &tracing::field::display(&out));
|
||||
// tracing::Span::current().record("network_result", &tracing::field::display(&out));
|
||||
Ok(out)
|
||||
}
|
||||
}
|
||||
@ -129,7 +129,6 @@ where
|
||||
///
|
||||
struct WebsocketProtocolHandlerArc {
|
||||
tls: bool,
|
||||
local_address: SocketAddr,
|
||||
request_path: Vec<u8>,
|
||||
connection_initial_timeout_ms: u32,
|
||||
}
|
||||
@ -142,7 +141,7 @@ where
|
||||
arc: Arc<WebsocketProtocolHandlerArc>,
|
||||
}
|
||||
impl WebsocketProtocolHandler {
|
||||
pub fn new(config: VeilidConfig, tls: bool, local_address: SocketAddr) -> Self {
|
||||
pub fn new(config: VeilidConfig, tls: bool) -> Self {
|
||||
let c = config.get();
|
||||
let path = if tls {
|
||||
format!("GET /{}", c.network.protocol.ws.path.trim_end_matches('/'))
|
||||
@ -158,7 +157,6 @@ impl WebsocketProtocolHandler {
|
||||
Self {
|
||||
arc: Arc::new(WebsocketProtocolHandlerArc {
|
||||
tls,
|
||||
local_address,
|
||||
request_path: path.as_bytes().to_vec(),
|
||||
connection_initial_timeout_ms,
|
||||
}),
|
||||
@ -170,6 +168,7 @@ impl WebsocketProtocolHandler {
|
||||
self,
|
||||
ps: AsyncPeekStream,
|
||||
socket_addr: SocketAddr,
|
||||
local_addr: SocketAddr,
|
||||
) -> io::Result<Option<ProtocolNetworkConnection>> {
|
||||
log_net!("WS: on_accept_async: enter");
|
||||
let request_path_len = self.arc.request_path.len() + 2;
|
||||
@ -209,10 +208,7 @@ impl WebsocketProtocolHandler {
|
||||
PeerAddress::new(SocketAddress::from_socket_addr(socket_addr), protocol_type);
|
||||
|
||||
let conn = ProtocolNetworkConnection::WsAccepted(WebsocketNetworkConnection::new(
|
||||
ConnectionDescriptor::new(
|
||||
peer_addr,
|
||||
SocketAddress::from_socket_addr(self.arc.local_address),
|
||||
),
|
||||
ConnectionDescriptor::new(peer_addr, SocketAddress::from_socket_addr(local_addr)),
|
||||
ws_stream,
|
||||
));
|
||||
|
||||
@ -221,7 +217,7 @@ impl WebsocketProtocolHandler {
|
||||
Ok(Some(conn))
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", err)]
|
||||
#[instrument(level = "trace", ret, err)]
|
||||
pub async fn connect(
|
||||
local_address: Option<SocketAddr>,
|
||||
dial_info: &DialInfo,
|
||||
@ -296,7 +292,8 @@ impl ProtocolAcceptHandler for WebsocketProtocolHandler {
|
||||
&self,
|
||||
stream: AsyncPeekStream,
|
||||
peer_addr: SocketAddr,
|
||||
local_addr: SocketAddr,
|
||||
) -> SendPinBoxFuture<io::Result<Option<ProtocolNetworkConnection>>> {
|
||||
Box::pin(self.clone().on_accept_async(stream, peer_addr))
|
||||
Box::pin(self.clone().on_accept_async(stream, peer_addr, local_addr))
|
||||
}
|
||||
}
|
||||
|
@ -387,7 +387,7 @@ impl Network {
|
||||
ip_addrs,
|
||||
ws_port,
|
||||
false,
|
||||
Box::new(|c, t, a| Box::new(WebsocketProtocolHandler::new(c, t, a))),
|
||||
Box::new(|c, t| Box::new(WebsocketProtocolHandler::new(c, t))),
|
||||
)
|
||||
.await?;
|
||||
trace!("WS: listener started on {:#?}", socket_addresses);
|
||||
@ -496,7 +496,7 @@ impl Network {
|
||||
ip_addrs,
|
||||
wss_port,
|
||||
true,
|
||||
Box::new(|c, t, a| Box::new(WebsocketProtocolHandler::new(c, t, a))),
|
||||
Box::new(|c, t| Box::new(WebsocketProtocolHandler::new(c, t))),
|
||||
)
|
||||
.await?;
|
||||
trace!("WSS: listener started on {:#?}", socket_addresses);
|
||||
@ -590,7 +590,7 @@ impl Network {
|
||||
ip_addrs,
|
||||
tcp_port,
|
||||
false,
|
||||
Box::new(move |c, _, a| Box::new(RawTcpProtocolHandler::new(c, a))),
|
||||
Box::new(move |c, _| Box::new(RawTcpProtocolHandler::new(c))),
|
||||
)
|
||||
.await?;
|
||||
trace!("TCP: listener started on {:#?}", socket_addresses);
|
||||
|
@ -16,6 +16,7 @@ cfg_if::cfg_if! {
|
||||
&self,
|
||||
stream: AsyncPeekStream,
|
||||
peer_addr: SocketAddr,
|
||||
local_addr: SocketAddr,
|
||||
) -> SendPinBoxFuture<io::Result<Option<ProtocolNetworkConnection>>>;
|
||||
}
|
||||
|
||||
@ -38,7 +39,7 @@ cfg_if::cfg_if! {
|
||||
}
|
||||
|
||||
pub type NewProtocolAcceptHandler =
|
||||
dyn Fn(VeilidConfig, bool, SocketAddr) -> Box<dyn ProtocolAcceptHandler> + Send;
|
||||
dyn Fn(VeilidConfig, bool) -> Box<dyn ProtocolAcceptHandler> + Send;
|
||||
}
|
||||
}
|
||||
///////////////////////////////////////////////////////////
|
||||
@ -91,7 +92,7 @@ pub struct NetworkConnection {
|
||||
processor: Option<MustJoinHandle<()>>,
|
||||
established_time: u64,
|
||||
stats: Arc<Mutex<NetworkConnectionStats>>,
|
||||
sender: flume::Sender<Vec<u8>>,
|
||||
sender: flume::Sender<(Option<Id>, Vec<u8>)>,
|
||||
stop_source: Option<StopSource>,
|
||||
}
|
||||
|
||||
@ -120,9 +121,6 @@ impl NetworkConnection {
|
||||
protocol_connection: ProtocolNetworkConnection,
|
||||
connection_id: NetworkConnectionId,
|
||||
) -> Self {
|
||||
// Get timeout
|
||||
let network_manager = connection_manager.network_manager();
|
||||
|
||||
// Get descriptor
|
||||
let descriptor = protocol_connection.descriptor();
|
||||
|
||||
@ -181,6 +179,7 @@ impl NetworkConnection {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level="trace", skip(message, stats), fields(message.len = message.len()), ret, err)]
|
||||
async fn send_internal(
|
||||
protocol_connection: &ProtocolNetworkConnection,
|
||||
stats: Arc<Mutex<NetworkConnectionStats>>,
|
||||
@ -194,6 +193,8 @@ impl NetworkConnection {
|
||||
|
||||
Ok(NetworkResult::Value(out))
|
||||
}
|
||||
|
||||
#[instrument(level="trace", skip(stats), fields(ret.len), err)]
|
||||
async fn recv_internal(
|
||||
protocol_connection: &ProtocolNetworkConnection,
|
||||
stats: Arc<Mutex<NetworkConnectionStats>>,
|
||||
@ -204,14 +205,18 @@ impl NetworkConnection {
|
||||
let mut stats = stats.lock();
|
||||
stats.last_message_recv_time.max_assign(Some(ts));
|
||||
|
||||
tracing::Span::current().record("ret.len", out.len());
|
||||
|
||||
Ok(NetworkResult::Value(out))
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn stats(&self) -> NetworkConnectionStats {
|
||||
let stats = self.stats.lock();
|
||||
stats.clone()
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn established_time(&self) -> u64 {
|
||||
self.established_time
|
||||
}
|
||||
@ -223,7 +228,7 @@ impl NetworkConnection {
|
||||
manager_stop_token: StopToken,
|
||||
connection_id: NetworkConnectionId,
|
||||
descriptor: ConnectionDescriptor,
|
||||
receiver: flume::Receiver<Vec<u8>>,
|
||||
receiver: flume::Receiver<(Option<Id>, Vec<u8>)>,
|
||||
protocol_connection: ProtocolNetworkConnection,
|
||||
stats: Arc<Mutex<NetworkConnectionStats>>,
|
||||
) -> SendPinBoxFuture<()> {
|
||||
@ -249,7 +254,7 @@ impl NetworkConnection {
|
||||
};
|
||||
let timer = MutableFuture::new(new_timer());
|
||||
|
||||
unord.push(system_boxed(timer.clone()));
|
||||
unord.push(system_boxed(timer.clone().instrument(Span::current())));
|
||||
|
||||
loop {
|
||||
// Add another message sender future if necessary
|
||||
@ -257,13 +262,18 @@ impl NetworkConnection {
|
||||
need_sender = false;
|
||||
let sender_fut = receiver.recv_async().then(|res| async {
|
||||
match res {
|
||||
Ok(message) => {
|
||||
Ok((_span_id, message)) => {
|
||||
|
||||
let recv_span = span!(Level::TRACE, "process_connection recv");
|
||||
// xxx: causes crash (Missing otel data span extensions)
|
||||
// recv_span.follows_from(span_id);
|
||||
|
||||
// send the packet
|
||||
if let Err(e) = Self::send_internal(
|
||||
&protocol_connection,
|
||||
stats.clone(),
|
||||
message,
|
||||
)
|
||||
).instrument(recv_span)
|
||||
.await
|
||||
{
|
||||
// Sending the packet along can fail, if so, this connection is dead
|
||||
@ -280,7 +290,7 @@ impl NetworkConnection {
|
||||
}
|
||||
}
|
||||
});
|
||||
unord.push(system_boxed(sender_fut));
|
||||
unord.push(system_boxed(sender_fut.instrument(Span::current())));
|
||||
}
|
||||
|
||||
// Add another message receiver future if necessary
|
||||
@ -314,7 +324,7 @@ impl NetworkConnection {
|
||||
}
|
||||
});
|
||||
|
||||
unord.push(system_boxed(receiver_fut));
|
||||
unord.push(system_boxed(receiver_fut.instrument(Span::current())));
|
||||
}
|
||||
|
||||
// Process futures
|
||||
@ -358,7 +368,7 @@ impl NetworkConnection {
|
||||
connection_manager
|
||||
.report_connection_finished(connection_id)
|
||||
.await;
|
||||
})
|
||||
}.instrument(trace_span!("process_connection")))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
use super::*;
|
||||
|
||||
use crate::dht::*;
|
||||
use crate::crypto::*;
|
||||
use crate::xx::*;
|
||||
use futures_util::FutureExt;
|
||||
use stop_token::future::FutureExt as StopFutureExt;
|
||||
@ -39,7 +39,8 @@ impl NetworkManager {
|
||||
// Get bootstrap nodes from hostnames concurrently
|
||||
let mut unord = FuturesUnordered::new();
|
||||
for bsname in bsnames {
|
||||
unord.push(async move {
|
||||
unord.push(
|
||||
async move {
|
||||
// look up boostrap node txt records
|
||||
let bsnirecords = match intf::txt_lookup(&bsname).await {
|
||||
Err(e) => {
|
||||
@ -136,7 +137,10 @@ impl NetworkManager {
|
||||
let dial_infos = match DialInfo::try_vec_from_short(rec, hostname_str) {
|
||||
Ok(dis) => dis,
|
||||
Err(e) => {
|
||||
warn!("Couldn't resolve bootstrap node dial info {}: {}", rec, e);
|
||||
warn!(
|
||||
"Couldn't resolve bootstrap node dial info {}: {}",
|
||||
rec, e
|
||||
);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
@ -151,7 +155,9 @@ impl NetworkManager {
|
||||
bootstrap_records.push((node_id_key, bootstrap_record));
|
||||
}
|
||||
Some(bootstrap_records)
|
||||
});
|
||||
}
|
||||
.instrument(Span::current()),
|
||||
);
|
||||
}
|
||||
|
||||
let mut bsmap = BootstrapRecordMap::new();
|
||||
@ -172,6 +178,7 @@ impl NetworkManager {
|
||||
}
|
||||
|
||||
// 'direct' bootstrap task routine for systems incapable of resolving TXT records, such as browser WASM
|
||||
#[instrument(level = "trace", skip(self), err)]
|
||||
pub(super) async fn direct_bootstrap_task_routine(
|
||||
self,
|
||||
stop_token: StopToken,
|
||||
@ -201,7 +208,8 @@ impl NetworkManager {
|
||||
let routing_table = routing_table.clone();
|
||||
unord.push(
|
||||
// lets ask bootstrap to find ourselves now
|
||||
async move { routing_table.reverse_find_node(nr, true).await },
|
||||
async move { routing_table.reverse_find_node(nr, true).await }
|
||||
.instrument(Span::current()),
|
||||
);
|
||||
}
|
||||
}
|
||||
@ -216,7 +224,7 @@ impl NetworkManager {
|
||||
#[instrument(level = "trace", skip(self), err)]
|
||||
pub(super) async fn bootstrap_task_routine(self, stop_token: StopToken) -> EyreResult<()> {
|
||||
let (bootstrap, bootstrap_nodes) = {
|
||||
let c = self.config.get();
|
||||
let c = self.unlocked_inner.config.get();
|
||||
(
|
||||
c.network.bootstrap.clone(),
|
||||
c.network.bootstrap_nodes.clone(),
|
||||
@ -248,22 +256,26 @@ impl NetworkManager {
|
||||
let mut bsmap = BootstrapRecordMap::new();
|
||||
let mut bootstrap_node_dial_infos = Vec::new();
|
||||
for b in bootstrap_nodes {
|
||||
let ndis = NodeDialInfo::from_str(b.as_str())
|
||||
.wrap_err("Invalid node dial info in bootstrap entry")?;
|
||||
bootstrap_node_dial_infos.push(ndis);
|
||||
let (id_str, di_str) = b
|
||||
.split_once('@')
|
||||
.ok_or_else(|| eyre!("Invalid node dial info in bootstrap entry"))?;
|
||||
let node_id =
|
||||
NodeId::from_str(id_str).wrap_err("Invalid node id in bootstrap entry")?;
|
||||
let dial_info =
|
||||
DialInfo::from_str(di_str).wrap_err("Invalid dial info in bootstrap entry")?;
|
||||
bootstrap_node_dial_infos.push((node_id, dial_info));
|
||||
}
|
||||
for ndi in bootstrap_node_dial_infos {
|
||||
let node_id = ndi.node_id.key;
|
||||
for (node_id, dial_info) in bootstrap_node_dial_infos {
|
||||
bsmap
|
||||
.entry(node_id)
|
||||
.entry(node_id.key)
|
||||
.or_insert_with(|| BootstrapRecord {
|
||||
min_version: MIN_VERSION,
|
||||
max_version: MAX_VERSION,
|
||||
min_version: MIN_CRYPTO_VERSION,
|
||||
max_version: MAX_CRYPTO_VERSION,
|
||||
dial_info_details: Vec::new(),
|
||||
})
|
||||
.dial_info_details
|
||||
.push(DialInfoDetail {
|
||||
dial_info: ndi.dial_info,
|
||||
dial_info,
|
||||
class: DialInfoClass::Direct, // Bootstraps are always directly reachable
|
||||
});
|
||||
}
|
||||
@ -287,20 +299,20 @@ impl NetworkManager {
|
||||
if let Some(nr) = routing_table.register_node_with_signed_node_info(
|
||||
RoutingDomain::PublicInternet,
|
||||
k,
|
||||
SignedNodeInfo::with_no_signature(NodeInfo {
|
||||
SignedNodeInfo::Direct(SignedDirectNodeInfo::with_no_signature(NodeInfo {
|
||||
network_class: NetworkClass::InboundCapable, // Bootstraps are always inbound capable
|
||||
outbound_protocols: ProtocolTypeSet::only(ProtocolType::UDP), // Bootstraps do not participate in relaying and will not make outbound requests, but will have UDP enabled
|
||||
address_types: AddressTypeSet::all(), // Bootstraps are always IPV4 and IPV6 capable
|
||||
min_version: v.min_version, // Minimum protocol version specified in txt record
|
||||
max_version: v.max_version, // Maximum protocol version specified in txt record
|
||||
min_version: v.min_version, // Minimum crypto version specified in txt record
|
||||
max_version: v.max_version, // Maximum crypto version specified in txt record
|
||||
dial_info_detail_list: v.dial_info_details, // Dial info is as specified in the bootstrap list
|
||||
relay_peer_info: None, // Bootstraps never require a relay themselves
|
||||
}),
|
||||
})),
|
||||
true,
|
||||
) {
|
||||
// Add this our futures to process in parallel
|
||||
let routing_table = routing_table.clone();
|
||||
unord.push(async move {
|
||||
unord.push(
|
||||
async move {
|
||||
// Need VALID signed peer info, so ask bootstrap to find_node of itself
|
||||
// which will ensure it has the bootstrap's signed peer info as part of the response
|
||||
let _ = routing_table.find_target(nr.clone()).await;
|
||||
@ -316,7 +328,9 @@ impl NetworkManager {
|
||||
// otherwise this bootstrap is valid, lets ask it to find ourselves now
|
||||
routing_table.reverse_find_node(nr, true).await
|
||||
}
|
||||
});
|
||||
}
|
||||
.instrument(Span::current()),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@ -332,7 +346,7 @@ impl NetworkManager {
|
||||
&self,
|
||||
cur_ts: u64,
|
||||
unord: &mut FuturesUnordered<
|
||||
SendPinBoxFuture<Result<NetworkResult<Answer<SenderInfo>>, RPCError>>,
|
||||
SendPinBoxFuture<Result<NetworkResult<Answer<Option<SenderInfo>>>, RPCError>>,
|
||||
>,
|
||||
) -> EyreResult<()> {
|
||||
let rpc = self.rpc_processor();
|
||||
@ -342,7 +356,7 @@ impl NetworkManager {
|
||||
let node_refs = routing_table.get_nodes_needing_ping(RoutingDomain::PublicInternet, cur_ts);
|
||||
|
||||
// Look up any NAT mappings we may need to try to preserve with keepalives
|
||||
let mut mapped_port_info = routing_table.get_mapped_port_info();
|
||||
let mut mapped_port_info = routing_table.get_low_level_port_info();
|
||||
|
||||
// Get the PublicInternet relay if we are using one
|
||||
let opt_relay_nr = routing_table.relay_node(RoutingDomain::PublicInternet);
|
||||
@ -382,7 +396,11 @@ impl NetworkManager {
|
||||
let nr_filtered =
|
||||
nr.filtered_clone(NodeRefFilter::new().with_dial_info_filter(dif));
|
||||
log_net!("--> Keepalive ping to {:?}", nr_filtered);
|
||||
unord.push(async move { rpc.rpc_call_status(nr_filtered).await }.boxed());
|
||||
unord.push(
|
||||
async move { rpc.rpc_call_status(Destination::direct(nr_filtered)).await }
|
||||
.instrument(Span::current())
|
||||
.boxed(),
|
||||
);
|
||||
did_pings = true;
|
||||
}
|
||||
}
|
||||
@ -392,7 +410,11 @@ impl NetworkManager {
|
||||
// any mapped ports to preserve
|
||||
if !did_pings {
|
||||
let rpc = rpc.clone();
|
||||
unord.push(async move { rpc.rpc_call_status(nr).await }.boxed());
|
||||
unord.push(
|
||||
async move { rpc.rpc_call_status(Destination::direct(nr)).await }
|
||||
.instrument(Span::current())
|
||||
.boxed(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@ -406,7 +428,7 @@ impl NetworkManager {
|
||||
&self,
|
||||
cur_ts: u64,
|
||||
unord: &mut FuturesUnordered<
|
||||
SendPinBoxFuture<Result<NetworkResult<Answer<SenderInfo>>, RPCError>>,
|
||||
SendPinBoxFuture<Result<NetworkResult<Answer<Option<SenderInfo>>>, RPCError>>,
|
||||
>,
|
||||
) -> EyreResult<()> {
|
||||
let rpc = self.rpc_processor();
|
||||
@ -420,7 +442,11 @@ impl NetworkManager {
|
||||
let rpc = rpc.clone();
|
||||
|
||||
// Just do a single ping with the best protocol for all the nodes
|
||||
unord.push(async move { rpc.rpc_call_status(nr).await }.boxed());
|
||||
unord.push(
|
||||
async move { rpc.rpc_call_status(Destination::direct(nr)).await }
|
||||
.instrument(Span::current())
|
||||
.boxed(),
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -464,7 +490,7 @@ impl NetworkManager {
|
||||
let routing_table = self.routing_table();
|
||||
let mut ord = FuturesOrdered::new();
|
||||
let min_peer_count = {
|
||||
let c = self.config.get();
|
||||
let c = self.unlocked_inner.config.get();
|
||||
c.network.dht.min_peer_count as usize
|
||||
};
|
||||
|
||||
@ -472,14 +498,17 @@ impl NetworkManager {
|
||||
// even the unreliable ones, and ask them to find nodes close to our node too
|
||||
let noderefs = routing_table.find_fastest_nodes(
|
||||
min_peer_count,
|
||||
|_k, _v| true,
|
||||
|k: DHTKey, v: Option<Arc<BucketEntry>>| {
|
||||
VecDeque::new(),
|
||||
|_rti, k: DHTKey, v: Option<Arc<BucketEntry>>| {
|
||||
NodeRef::new(routing_table.clone(), k, v.unwrap().clone(), None)
|
||||
},
|
||||
);
|
||||
for nr in noderefs {
|
||||
let routing_table = routing_table.clone();
|
||||
ord.push_back(async move { routing_table.reverse_find_node(nr, false).await });
|
||||
ord.push_back(
|
||||
async move { routing_table.reverse_find_node(nr, false).await }
|
||||
.instrument(Span::current()),
|
||||
);
|
||||
}
|
||||
|
||||
// do peer minimum search in order from fastest to slowest
|
||||
@ -498,8 +527,9 @@ impl NetworkManager {
|
||||
) -> EyreResult<()> {
|
||||
// Get our node's current node info and network class and do the right thing
|
||||
let routing_table = self.routing_table();
|
||||
let node_info = routing_table.get_own_node_info(RoutingDomain::PublicInternet);
|
||||
let network_class = self.get_network_class(RoutingDomain::PublicInternet);
|
||||
let own_peer_info = routing_table.get_own_peer_info(RoutingDomain::PublicInternet);
|
||||
let own_node_info = own_peer_info.signed_node_info.node_info();
|
||||
let network_class = routing_table.get_network_class(RoutingDomain::PublicInternet);
|
||||
|
||||
// Get routing domain editor
|
||||
let mut editor = routing_table.edit_routing_domain(RoutingDomain::PublicInternet);
|
||||
@ -515,7 +545,7 @@ impl NetworkManager {
|
||||
info!("Relay node died, dropping relay {}", relay_node);
|
||||
editor.clear_relay_node();
|
||||
false
|
||||
} else if !node_info.requires_relay() {
|
||||
} else if !own_node_info.requires_relay() {
|
||||
info!(
|
||||
"Relay node no longer required, dropping relay {}",
|
||||
relay_node
|
||||
@ -531,8 +561,9 @@ impl NetworkManager {
|
||||
};
|
||||
|
||||
// Do we need a relay?
|
||||
if !has_relay && node_info.requires_relay() {
|
||||
// Do we need an outbound relay?
|
||||
if !has_relay && own_node_info.requires_relay() {
|
||||
// Do we want an outbound relay?
|
||||
let mut got_outbound_relay = false;
|
||||
if network_class.outbound_wants_relay() {
|
||||
// The outbound relay is the host of the PWA
|
||||
if let Some(outbound_relay_peerinfo) = intf::get_outbound_relay_peer().await {
|
||||
@ -545,10 +576,11 @@ impl NetworkManager {
|
||||
) {
|
||||
info!("Outbound relay node selected: {}", nr);
|
||||
editor.set_relay_node(nr);
|
||||
got_outbound_relay = true;
|
||||
}
|
||||
}
|
||||
// Otherwise we must need an inbound relay
|
||||
} else {
|
||||
}
|
||||
if !got_outbound_relay {
|
||||
// Find a node in our routing table that is an acceptable inbound relay
|
||||
if let Some(nr) =
|
||||
routing_table.find_inbound_relay(RoutingDomain::PublicInternet, cur_ts)
|
||||
@ -566,6 +598,34 @@ impl NetworkManager {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Keep private routes assigned and accessible
|
||||
#[instrument(level = "trace", skip(self), err)]
|
||||
pub(super) async fn private_route_management_task_routine(
|
||||
self,
|
||||
_stop_token: StopToken,
|
||||
_last_ts: u64,
|
||||
cur_ts: u64,
|
||||
) -> EyreResult<()> {
|
||||
// Get our node's current node info and network class and do the right thing
|
||||
let routing_table = self.routing_table();
|
||||
let own_peer_info = routing_table.get_own_peer_info(RoutingDomain::PublicInternet);
|
||||
let network_class = routing_table.get_network_class(RoutingDomain::PublicInternet);
|
||||
|
||||
// Get routing domain editor
|
||||
let mut editor = routing_table.edit_routing_domain(RoutingDomain::PublicInternet);
|
||||
|
||||
// Do we know our network class yet?
|
||||
if let Some(network_class) = network_class {
|
||||
|
||||
// see if we have any routes that need extending
|
||||
}
|
||||
|
||||
// Commit the changes
|
||||
editor.commit().await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Compute transfer statistics for the low level network
|
||||
#[instrument(level = "trace", skip(self), err)]
|
||||
pub(super) async fn rolling_transfers_task_routine(
|
||||
|
@ -52,6 +52,7 @@ pub async fn test_add_get_remove() {
|
||||
);
|
||||
|
||||
let c1 = NetworkConnection::dummy(1, a1);
|
||||
let c1b = NetworkConnection::dummy(10, a1);
|
||||
let c1h = c1.get_handle();
|
||||
let c2 = NetworkConnection::dummy(2, a2);
|
||||
let c3 = NetworkConnection::dummy(3, a3);
|
||||
@ -65,6 +66,7 @@ pub async fn test_add_get_remove() {
|
||||
assert_eq!(table.connection_count(), 0);
|
||||
assert_eq!(table.get_connection_by_descriptor(a1), None);
|
||||
table.add_connection(c1).unwrap();
|
||||
assert!(table.add_connection(c1b).is_err());
|
||||
|
||||
assert_eq!(table.connection_count(), 1);
|
||||
assert!(table.remove_connection_by_id(4).is_none());
|
||||
|
@ -10,25 +10,43 @@ use std::io;
|
||||
/////////////////////////////////////////////////////////////////
|
||||
|
||||
struct NetworkInner {
|
||||
network_manager: NetworkManager,
|
||||
network_started: bool,
|
||||
network_needs_restart: bool,
|
||||
protocol_config: Option<ProtocolConfig>,
|
||||
protocol_config: ProtocolConfig,
|
||||
}
|
||||
|
||||
struct NetworkUnlockedInner {
|
||||
// Accessors
|
||||
routing_table: RoutingTable,
|
||||
network_manager: NetworkManager,
|
||||
connection_manager: ConnectionManager,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Network {
|
||||
config: VeilidConfig,
|
||||
inner: Arc<Mutex<NetworkInner>>,
|
||||
unlocked_inner: Arc<NetworkUnlockedInner>,
|
||||
}
|
||||
|
||||
impl Network {
|
||||
fn new_inner(network_manager: NetworkManager) -> NetworkInner {
|
||||
fn new_inner() -> NetworkInner {
|
||||
NetworkInner {
|
||||
network_manager,
|
||||
network_started: false,
|
||||
network_needs_restart: false,
|
||||
protocol_config: None, //join_handle: None,
|
||||
protocol_config: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
fn new_unlocked_inner(
|
||||
network_manager: NetworkManager,
|
||||
routing_table: RoutingTable,
|
||||
connection_manager: ConnectionManager,
|
||||
) -> NetworkUnlockedInner {
|
||||
NetworkUnlockedInner {
|
||||
network_manager,
|
||||
routing_table,
|
||||
connection_manager,
|
||||
}
|
||||
}
|
||||
|
||||
@ -39,15 +57,23 @@ impl Network {
|
||||
) -> Self {
|
||||
Self {
|
||||
config: network_manager.config(),
|
||||
inner: Arc::new(Mutex::new(Self::new_inner(network_manager))),
|
||||
inner: Arc::new(Mutex::new(Self::new_inner())),
|
||||
unlocked_inner: Arc::new(Self::new_unlocked_inner(
|
||||
network_manager,
|
||||
routing_table,
|
||||
connection_manager,
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
fn network_manager(&self) -> NetworkManager {
|
||||
self.inner.lock().network_manager.clone()
|
||||
self.unlocked_inner.network_manager.clone()
|
||||
}
|
||||
fn routing_table(&self) -> RoutingTable {
|
||||
self.unlocked_inner.routing_table.clone()
|
||||
}
|
||||
fn connection_manager(&self) -> ConnectionManager {
|
||||
self.inner.lock().network_manager.connection_manager()
|
||||
self.unlocked_inner.connection_manager.clone()
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////
|
||||
@ -225,7 +251,7 @@ impl Network {
|
||||
|
||||
pub async fn startup(&self) -> EyreResult<()> {
|
||||
// get protocol config
|
||||
self.inner.lock().protocol_config = Some({
|
||||
self.inner.lock().protocol_config = {
|
||||
let c = self.config.get();
|
||||
let inbound = ProtocolTypeSet::new();
|
||||
let mut outbound = ProtocolTypeSet::new();
|
||||
@ -247,7 +273,7 @@ impl Network {
|
||||
family_global,
|
||||
family_local,
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
self.inner.lock().network_started = true;
|
||||
Ok(())
|
||||
@ -269,20 +295,26 @@ impl Network {
|
||||
trace!("stopping network");
|
||||
|
||||
// Reset state
|
||||
let network_manager = self.inner.lock().network_manager.clone();
|
||||
let routing_table = network_manager.routing_table();
|
||||
let routing_table = self.routing_table();
|
||||
|
||||
// Drop all dial info
|
||||
routing_table.clear_dial_info_details(RoutingDomain::PublicInternet);
|
||||
routing_table.clear_dial_info_details(RoutingDomain::LocalNetwork);
|
||||
let mut editor = routing_table.edit_routing_domain(RoutingDomain::PublicInternet);
|
||||
editor.disable_node_info_updates();
|
||||
editor.clear_dial_info_details();
|
||||
editor.commit().await;
|
||||
|
||||
let mut editor = routing_table.edit_routing_domain(RoutingDomain::LocalNetwork);
|
||||
editor.disable_node_info_updates();
|
||||
editor.clear_dial_info_details();
|
||||
editor.commit().await;
|
||||
|
||||
// Cancels all async background tasks by dropping join handles
|
||||
*self.inner.lock() = Self::new_inner(network_manager);
|
||||
*self.inner.lock() = Self::new_inner();
|
||||
|
||||
trace!("network stopped");
|
||||
}
|
||||
|
||||
pub fn is_usable_interface_address(&self, addr: IpAddr) -> bool {
|
||||
pub fn is_usable_interface_address(&self, _addr: IpAddr) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
@ -292,11 +324,14 @@ impl Network {
|
||||
|
||||
//////////////////////////////////////////
|
||||
|
||||
pub fn set_needs_public_dial_info_check(&self, _punishment: Option<Box<dyn FnOnce() + Send + 'static>>) {
|
||||
pub fn set_needs_public_dial_info_check(
|
||||
&self,
|
||||
_punishment: Option<Box<dyn FnOnce() + Send + 'static>>,
|
||||
) {
|
||||
//
|
||||
}
|
||||
|
||||
pub fn doing_public_dial_info_check(&self) -> bool {
|
||||
pub fn needs_public_dial_info_check(&self) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
@ -309,7 +344,7 @@ impl Network {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn get_protocol_config(&self) -> Option<ProtocolConfig> {
|
||||
pub fn get_protocol_config(&self) -> ProtocolConfig {
|
||||
self.inner.lock().protocol_config.clone()
|
||||
}
|
||||
|
||||
|
@ -75,7 +75,7 @@ impl WebsocketNetworkConnection {
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", err, skip(self), fields(network_result, ret.len))]
|
||||
// #[instrument(level = "trace", err, skip(self), fields(network_result, ret.len))]
|
||||
pub async fn recv(&self) -> io::Result<NetworkResult<Vec<u8>>> {
|
||||
let out = match SendWrapper::new(self.inner.ws_stream.clone().next()).await {
|
||||
Some(WsMessage::Binary(v)) => {
|
||||
@ -95,7 +95,7 @@ impl WebsocketNetworkConnection {
|
||||
bail_io_error_other!("WS stream closed");
|
||||
}
|
||||
};
|
||||
tracing::Span::current().record("network_result", &tracing::field::display(&out));
|
||||
// tracing::Span::current().record("network_result", &tracing::field::display(&out));
|
||||
Ok(out)
|
||||
}
|
||||
}
|
||||
@ -106,13 +106,13 @@ impl WebsocketNetworkConnection {
|
||||
pub struct WebsocketProtocolHandler {}
|
||||
|
||||
impl WebsocketProtocolHandler {
|
||||
#[instrument(level = "trace", err)]
|
||||
#[instrument(level = "trace", ret, err)]
|
||||
pub async fn connect(
|
||||
dial_info: &DialInfo,
|
||||
timeout_ms: u32,
|
||||
) -> io::Result<NetworkResult<ProtocolNetworkConnection>> {
|
||||
// Split dial info up
|
||||
let (tls, scheme) = match dial_info {
|
||||
let (_tls, scheme) = match dial_info {
|
||||
DialInfo::WS(_) => (false, "ws"),
|
||||
DialInfo::WSS(_) => (true, "wss"),
|
||||
_ => panic!("invalid dialinfo for WS/WSS protocol"),
|
||||
|
@ -1,20 +1,30 @@
|
||||
use crate::*;
|
||||
use core::fmt;
|
||||
use dht::*;
|
||||
use crypto::*;
|
||||
use futures_util::stream::{FuturesUnordered, StreamExt};
|
||||
use network_manager::*;
|
||||
use routing_table::*;
|
||||
use stop_token::future::FutureExt;
|
||||
use xx::*;
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum ReceiptEvent {
|
||||
ReturnedOutOfBand,
|
||||
ReturnedInBand { inbound_noderef: NodeRef },
|
||||
ReturnedSafety,
|
||||
ReturnedPrivate { private_route: DHTKey },
|
||||
Expired,
|
||||
Cancelled,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum ReceiptReturned {
|
||||
OutOfBand,
|
||||
InBand { inbound_noderef: NodeRef },
|
||||
Safety,
|
||||
Private { private_route: DHTKey },
|
||||
}
|
||||
|
||||
pub trait ReceiptCallback: Send + 'static {
|
||||
fn call(
|
||||
&self,
|
||||
@ -246,7 +256,7 @@ impl ReceiptManager {
|
||||
if let Some(callback) =
|
||||
Self::perform_callback(ReceiptEvent::Expired, &mut expired_record_mut)
|
||||
{
|
||||
callbacks.push(callback)
|
||||
callbacks.push(callback.instrument(Span::current()))
|
||||
}
|
||||
}
|
||||
|
||||
@ -394,17 +404,18 @@ impl ReceiptManager {
|
||||
pub async fn handle_receipt(
|
||||
&self,
|
||||
receipt: Receipt,
|
||||
inbound_noderef: Option<NodeRef>,
|
||||
receipt_returned: ReceiptReturned,
|
||||
) -> NetworkResult<()> {
|
||||
let receipt_nonce = receipt.get_nonce();
|
||||
let extra_data = receipt.get_extra_data();
|
||||
|
||||
log_rpc!(debug "<<== RECEIPT {} <- {}{}",
|
||||
receipt_nonce.encode(),
|
||||
if let Some(nr) = &inbound_noderef {
|
||||
nr.to_string()
|
||||
} else {
|
||||
"DIRECT".to_owned()
|
||||
match receipt_returned {
|
||||
ReceiptReturned::OutOfBand => "OutOfBand".to_owned(),
|
||||
ReceiptReturned::InBand { ref inbound_noderef } => format!("InBand({})", inbound_noderef),
|
||||
ReceiptReturned::Safety => "Safety".to_owned(),
|
||||
ReceiptReturned::Private { ref private_route } => format!("Private({})", private_route),
|
||||
},
|
||||
if extra_data.is_empty() {
|
||||
"".to_owned()
|
||||
@ -435,10 +446,17 @@ impl ReceiptManager {
|
||||
record_mut.returns_so_far += 1;
|
||||
|
||||
// Get the receipt event to return
|
||||
let receipt_event = if let Some(inbound_noderef) = inbound_noderef {
|
||||
ReceiptEvent::ReturnedInBand { inbound_noderef }
|
||||
} else {
|
||||
ReceiptEvent::ReturnedOutOfBand
|
||||
let receipt_event = match receipt_returned {
|
||||
ReceiptReturned::OutOfBand => ReceiptEvent::ReturnedOutOfBand,
|
||||
ReceiptReturned::Safety => ReceiptEvent::ReturnedSafety,
|
||||
ReceiptReturned::InBand {
|
||||
ref inbound_noderef,
|
||||
} => ReceiptEvent::ReturnedInBand {
|
||||
inbound_noderef: inbound_noderef.clone(),
|
||||
},
|
||||
ReceiptReturned::Private { ref private_route } => ReceiptEvent::ReturnedPrivate {
|
||||
private_route: private_route.clone(),
|
||||
},
|
||||
};
|
||||
|
||||
let callback_future = Self::perform_callback(receipt_event, &mut record_mut);
|
||||
|
@ -1,5 +1,6 @@
|
||||
use super::*;
|
||||
use core::sync::atomic::Ordering;
|
||||
use rkyv::{Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize};
|
||||
|
||||
pub struct Bucket {
|
||||
routing_table: RoutingTable,
|
||||
@ -8,6 +9,20 @@ pub struct Bucket {
|
||||
}
|
||||
pub(super) type EntriesIter<'a> = alloc::collections::btree_map::Iter<'a, DHTKey, Arc<BucketEntry>>;
|
||||
|
||||
#[derive(Debug, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
|
||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||
struct BucketEntryData {
|
||||
key: DHTKey,
|
||||
value: Vec<u8>,
|
||||
}
|
||||
|
||||
#[derive(Debug, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
|
||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||
struct BucketData {
|
||||
entries: Vec<BucketEntryData>,
|
||||
newest_entry: Option<DHTKey>,
|
||||
}
|
||||
|
||||
fn state_ordering(state: BucketEntryState) -> usize {
|
||||
match state {
|
||||
BucketEntryState::Dead => 0,
|
||||
@ -25,6 +40,36 @@ impl Bucket {
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn load_bucket(&mut self, data: Vec<u8>) -> EyreResult<()> {
|
||||
let bucket_data: BucketData = from_rkyv(data)?;
|
||||
|
||||
for e in bucket_data.entries {
|
||||
let entryinner = from_rkyv(e.value).wrap_err("failed to deserialize bucket entry")?;
|
||||
self.entries
|
||||
.insert(e.key, Arc::new(BucketEntry::new_with_inner(entryinner)));
|
||||
}
|
||||
|
||||
self.newest_entry = bucket_data.newest_entry;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
pub(super) fn save_bucket(&self) -> EyreResult<Vec<u8>> {
|
||||
let mut entries = Vec::new();
|
||||
for (k, v) in &self.entries {
|
||||
let entry_bytes = v.with_inner(|e| to_rkyv(e))?;
|
||||
entries.push(BucketEntryData {
|
||||
key: *k,
|
||||
value: entry_bytes,
|
||||
});
|
||||
}
|
||||
let bucket_data = BucketData {
|
||||
entries,
|
||||
newest_entry: self.newest_entry.clone(),
|
||||
};
|
||||
let out = to_rkyv(&bucket_data)?;
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
pub(super) fn add_entry(&mut self, node_id: DHTKey) -> NodeRef {
|
||||
log_rtab!("Node added: {}", node_id.encode());
|
||||
|
||||
@ -48,13 +93,6 @@ impl Bucket {
|
||||
// newest_entry is updated by kick_bucket()
|
||||
}
|
||||
|
||||
pub(super) fn roll_transfers(&self, last_ts: u64, cur_ts: u64) {
|
||||
// Called every ROLLING_TRANSFERS_INTERVAL_SECS
|
||||
for (_k, v) in &self.entries {
|
||||
v.with_mut(|e| e.roll_transfers(last_ts, cur_ts));
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn entry(&self, key: &DHTKey) -> Option<Arc<BucketEntry>> {
|
||||
self.entries.get(key).cloned()
|
||||
}
|
||||
@ -87,8 +125,8 @@ impl Bucket {
|
||||
if a.0 == b.0 {
|
||||
return core::cmp::Ordering::Equal;
|
||||
}
|
||||
a.1.with(|ea| {
|
||||
b.1.with(|eb| {
|
||||
a.1.with_inner(|ea| {
|
||||
b.1.with_inner(|eb| {
|
||||
let astate = state_ordering(ea.state(cur_ts));
|
||||
let bstate = state_ordering(eb.state(cur_ts));
|
||||
// first kick dead nodes, then unreliable nodes
|
||||
|
@ -1,5 +1,8 @@
|
||||
use super::*;
|
||||
use core::sync::atomic::{AtomicU32, Ordering};
|
||||
use rkyv::{
|
||||
with::Skip, Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize,
|
||||
};
|
||||
|
||||
/// Reliable pings are done with increased spacing between pings
|
||||
|
||||
@ -39,10 +42,11 @@ pub enum BucketEntryState {
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Ord, Hash)]
|
||||
struct LastConnectionKey(ProtocolType, AddressType);
|
||||
pub struct LastConnectionKey(ProtocolType, AddressType);
|
||||
|
||||
/// Bucket entry information specific to the LocalNetwork RoutingDomain
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
|
||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||
pub struct BucketEntryPublicInternet {
|
||||
/// The PublicInternet node info
|
||||
signed_node_info: Option<Box<SignedNodeInfo>>,
|
||||
@ -53,7 +57,8 @@ pub struct BucketEntryPublicInternet {
|
||||
}
|
||||
|
||||
/// Bucket entry information specific to the LocalNetwork RoutingDomain
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
|
||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||
pub struct BucketEntryLocalNetwork {
|
||||
/// The LocalNetwork node info
|
||||
signed_node_info: Option<Box<SignedNodeInfo>>,
|
||||
@ -63,19 +68,51 @@ pub struct BucketEntryLocalNetwork {
|
||||
node_status: Option<LocalNetworkNodeStatus>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
/// A range of cryptography versions supported by this entry
|
||||
#[derive(Copy, Clone, Debug, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
|
||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||
pub struct VersionRange {
|
||||
/// The minimum cryptography version supported by this entry
|
||||
pub min: u8,
|
||||
/// The maximum cryptography version supported by this entry
|
||||
pub max: u8,
|
||||
}
|
||||
|
||||
/// The data associated with each bucket entry
|
||||
#[derive(Debug, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
|
||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||
pub struct BucketEntryInner {
|
||||
min_max_version: Option<(u8, u8)>,
|
||||
/// The minimum and maximum range of cryptography versions supported by the node,
|
||||
/// inclusive of the requirements of any relay the node may be using
|
||||
min_max_version: Option<VersionRange>,
|
||||
/// If this node has updated it's SignedNodeInfo since our network
|
||||
/// and dial info has last changed, for example when our IP address changes
|
||||
/// Used to determine if we should make this entry 'live' again when we receive a signednodeinfo update that
|
||||
/// has the same timestamp, because if we change our own IP address or network class it may be possible for nodes that were
|
||||
/// unreachable may now be reachable with the same SignedNodeInfo/DialInfo
|
||||
updated_since_last_network_change: bool,
|
||||
/// The last connection descriptors used to contact this node, per protocol type
|
||||
#[with(Skip)]
|
||||
last_connections: BTreeMap<LastConnectionKey, (ConnectionDescriptor, u64)>,
|
||||
/// The node info for this entry on the publicinternet routing domain
|
||||
public_internet: BucketEntryPublicInternet,
|
||||
/// The node info for this entry on the localnetwork routing domain
|
||||
local_network: BucketEntryLocalNetwork,
|
||||
/// Statistics gathered for the peer
|
||||
peer_stats: PeerStats,
|
||||
/// The accounting for the latency statistics
|
||||
#[with(Skip)]
|
||||
latency_stats_accounting: LatencyStatsAccounting,
|
||||
/// The accounting for the transfer statistics
|
||||
#[with(Skip)]
|
||||
transfer_stats_accounting: TransferStatsAccounting,
|
||||
/// Tracking identifier for NodeRef debugging
|
||||
#[cfg(feature = "tracking")]
|
||||
#[with(Skip)]
|
||||
next_track_id: usize,
|
||||
/// Backtraces for NodeRef debugging
|
||||
#[cfg(feature = "tracking")]
|
||||
#[with(Skip)]
|
||||
node_ref_tracks: HashMap<usize, backtrace::Backtrace>,
|
||||
}
|
||||
|
||||
@ -132,6 +169,28 @@ impl BucketEntryInner {
|
||||
}
|
||||
}
|
||||
|
||||
// Less is more reliable then older
|
||||
pub fn cmp_oldest_reliable(cur_ts: u64, e1: &Self, e2: &Self) -> std::cmp::Ordering {
|
||||
// Reverse compare so most reliable is at front
|
||||
let ret = e2.state(cur_ts).cmp(&e1.state(cur_ts));
|
||||
if ret != std::cmp::Ordering::Equal {
|
||||
return ret;
|
||||
}
|
||||
|
||||
// Lower timestamp to the front, recent or no timestamp is at the end
|
||||
if let Some(e1_ts) = &e1.peer_stats.rpc_stats.first_consecutive_seen_ts {
|
||||
if let Some(e2_ts) = &e2.peer_stats.rpc_stats.first_consecutive_seen_ts {
|
||||
e1_ts.cmp(&e2_ts)
|
||||
} else {
|
||||
std::cmp::Ordering::Less
|
||||
}
|
||||
} else if e2.peer_stats.rpc_stats.first_consecutive_seen_ts.is_some() {
|
||||
std::cmp::Ordering::Greater
|
||||
} else {
|
||||
std::cmp::Ordering::Equal
|
||||
}
|
||||
}
|
||||
|
||||
pub fn sort_fastest_reliable_fn(cur_ts: u64) -> impl FnMut(&Self, &Self) -> std::cmp::Ordering {
|
||||
move |e1, e2| Self::cmp_fastest_reliable(cur_ts, e1, e2)
|
||||
}
|
||||
@ -159,13 +218,15 @@ impl BucketEntryInner {
|
||||
|
||||
// See if we have an existing signed_node_info to update or not
|
||||
if let Some(current_sni) = opt_current_sni {
|
||||
// Always allow overwriting invalid/unsigned node
|
||||
if current_sni.has_valid_signature() {
|
||||
// If the timestamp hasn't changed or is less, ignore this update
|
||||
if signed_node_info.timestamp <= current_sni.timestamp {
|
||||
if signed_node_info.timestamp() <= current_sni.timestamp() {
|
||||
// If we received a node update with the same timestamp
|
||||
// we can make this node live again, but only if our network has recently changed
|
||||
// which may make nodes that were unreachable now reachable with the same dialinfo
|
||||
if !self.updated_since_last_network_change
|
||||
&& signed_node_info.timestamp == current_sni.timestamp
|
||||
&& signed_node_info.timestamp() == current_sni.timestamp()
|
||||
{
|
||||
// No need to update the signednodeinfo though since the timestamp is the same
|
||||
// Touch the node and let it try to live again
|
||||
@ -175,12 +236,24 @@ impl BucketEntryInner {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update the protocol min/max version we have
|
||||
self.min_max_version = Some((
|
||||
signed_node_info.node_info.min_version,
|
||||
signed_node_info.node_info.max_version,
|
||||
));
|
||||
// Update the protocol min/max version we have to use, to include relay requirements if needed
|
||||
let mut version_range = VersionRange {
|
||||
min: signed_node_info.node_info().min_version,
|
||||
max: signed_node_info.node_info().max_version,
|
||||
};
|
||||
if let Some(relay_info) = signed_node_info.relay_info() {
|
||||
version_range.min.max_assign(relay_info.min_version);
|
||||
version_range.max.min_assign(relay_info.max_version);
|
||||
}
|
||||
if version_range.min <= version_range.max {
|
||||
// Can be reached with at least one crypto version
|
||||
self.min_max_version = Some(version_range);
|
||||
} else {
|
||||
// No valid crypto version in range
|
||||
self.min_max_version = None;
|
||||
}
|
||||
|
||||
// Update the signed node info
|
||||
*opt_current_sni = Some(Box::new(signed_node_info));
|
||||
@ -207,7 +280,7 @@ impl BucketEntryInner {
|
||||
RoutingDomain::LocalNetwork => &self.local_network.signed_node_info,
|
||||
RoutingDomain::PublicInternet => &self.public_internet.signed_node_info,
|
||||
};
|
||||
opt_current_sni.as_ref().map(|s| &s.node_info)
|
||||
opt_current_sni.as_ref().map(|s| s.node_info())
|
||||
}
|
||||
|
||||
pub fn signed_node_info(&self, routing_domain: RoutingDomain) -> Option<&SignedNodeInfo> {
|
||||
@ -264,37 +337,54 @@ impl BucketEntryInner {
|
||||
self.last_connections.clear();
|
||||
}
|
||||
|
||||
// Gets the best 'last connection' that matches a set of routing domain, protocol types and address types
|
||||
pub(super) fn last_connection(
|
||||
// Gets all the 'last connections' that match a particular filter
|
||||
pub(super) fn last_connections(
|
||||
&self,
|
||||
routing_table_inner: &RoutingTableInner,
|
||||
node_ref_filter: Option<NodeRefFilter>,
|
||||
) -> Option<(ConnectionDescriptor, u64)> {
|
||||
// Iterate peer scopes and protocol types and address type in order to ensure we pick the preferred protocols if all else is the same
|
||||
let nrf = node_ref_filter.unwrap_or_default();
|
||||
for pt in nrf.dial_info_filter.protocol_type_set {
|
||||
for at in nrf.dial_info_filter.address_type_set {
|
||||
let key = LastConnectionKey(pt, at);
|
||||
if let Some(v) = self.last_connections.get(&key) {
|
||||
// Verify this connection could be in the filtered routing domain
|
||||
let address = v.0.remote_address().address();
|
||||
if let Some(rd) =
|
||||
RoutingTable::routing_domain_for_address_inner(routing_table_inner, address)
|
||||
rti: &RoutingTableInner,
|
||||
filter: Option<NodeRefFilter>,
|
||||
) -> Vec<(ConnectionDescriptor, u64)> {
|
||||
let mut out: Vec<(ConnectionDescriptor, u64)> = self
|
||||
.last_connections
|
||||
.iter()
|
||||
.filter_map(|(k, v)| {
|
||||
let include = if let Some(filter) = &filter {
|
||||
let remote_address = v.0.remote_address().address();
|
||||
if let Some(routing_domain) = rti.routing_domain_for_address(remote_address) {
|
||||
if filter.routing_domain_set.contains(routing_domain)
|
||||
&& filter.dial_info_filter.protocol_type_set.contains(k.0)
|
||||
&& filter.dial_info_filter.address_type_set.contains(k.1)
|
||||
{
|
||||
if nrf.routing_domain_set.contains(rd) {
|
||||
return Some(*v);
|
||||
}
|
||||
}
|
||||
}
|
||||
// matches filter
|
||||
true
|
||||
} else {
|
||||
// does not match filter
|
||||
false
|
||||
}
|
||||
} else {
|
||||
// no valid routing domain
|
||||
false
|
||||
}
|
||||
} else {
|
||||
// no filter
|
||||
true
|
||||
};
|
||||
if include {
|
||||
Some(v.clone())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
pub fn set_min_max_version(&mut self, min_max_version: (u8, u8)) {
|
||||
})
|
||||
.collect();
|
||||
// Sort with newest timestamps first
|
||||
out.sort_by(|a, b| b.1.cmp(&a.1));
|
||||
out
|
||||
}
|
||||
|
||||
pub fn set_min_max_version(&mut self, min_max_version: VersionRange) {
|
||||
self.min_max_version = Some(min_max_version);
|
||||
}
|
||||
|
||||
pub fn min_max_version(&self) -> Option<(u8, u8)> {
|
||||
pub fn min_max_version(&self) -> Option<VersionRange> {
|
||||
self.min_max_version
|
||||
}
|
||||
|
||||
@ -409,14 +499,17 @@ impl BucketEntryInner {
|
||||
}
|
||||
}
|
||||
|
||||
fn needs_constant_ping(&self, cur_ts: u64, interval: u64) -> bool {
|
||||
// If we have not either seen the node, nor asked it a question in the last 'interval'
|
||||
// then we should ping it
|
||||
let latest_contact_time = self
|
||||
.peer_stats
|
||||
/// Return the last time we either saw a node, or asked it a question
|
||||
fn latest_contact_time(&self) -> Option<u64> {
|
||||
self.peer_stats
|
||||
.rpc_stats
|
||||
.last_seen_ts
|
||||
.max(self.peer_stats.rpc_stats.last_question);
|
||||
.max(self.peer_stats.rpc_stats.last_question)
|
||||
}
|
||||
|
||||
fn needs_constant_ping(&self, cur_ts: u64, interval: u64) -> bool {
|
||||
// If we have not either seen the node in the last 'interval' then we should ping it
|
||||
let latest_contact_time = self.latest_contact_time();
|
||||
|
||||
match latest_contact_time {
|
||||
None => true,
|
||||
@ -438,14 +531,19 @@ impl BucketEntryInner {
|
||||
return self.needs_constant_ping(cur_ts, KEEPALIVE_PING_INTERVAL_SECS as u64);
|
||||
}
|
||||
|
||||
// If we don't have node status for this node, then we should ping it to get some node status
|
||||
for routing_domain in RoutingDomainSet::all() {
|
||||
if self.has_node_info(routing_domain.into()) {
|
||||
if self.node_status(routing_domain).is_none() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
match state {
|
||||
BucketEntryState::Reliable => {
|
||||
// If we are in a reliable state, we need a ping on an exponential scale
|
||||
let latest_contact_time = self
|
||||
.peer_stats
|
||||
.rpc_stats
|
||||
.last_seen_ts
|
||||
.max(self.peer_stats.rpc_stats.last_question);
|
||||
let latest_contact_time = self.latest_contact_time();
|
||||
|
||||
match latest_contact_time {
|
||||
None => {
|
||||
@ -607,7 +705,37 @@ impl BucketEntry {
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn with<F, R>(&self, f: F) -> R
|
||||
pub(super) fn new_with_inner(inner: BucketEntryInner) -> Self {
|
||||
Self {
|
||||
ref_count: AtomicU32::new(0),
|
||||
inner: RwLock::new(inner),
|
||||
}
|
||||
}
|
||||
|
||||
// Note, that this requires -also- holding the RoutingTable read lock, as an
|
||||
// immutable reference to RoutingTableInner must be passed in to get this
|
||||
// This ensures that an operation on the routing table can not change entries
|
||||
// while it is being read from
|
||||
pub fn with<F, R>(&self, rti: &RoutingTableInner, f: F) -> R
|
||||
where
|
||||
F: FnOnce(&RoutingTableInner, &BucketEntryInner) -> R,
|
||||
{
|
||||
let inner = self.inner.read();
|
||||
f(rti, &*inner)
|
||||
}
|
||||
|
||||
// Note, that this requires -also- holding the RoutingTable write lock, as a
|
||||
// mutable reference to RoutingTableInner must be passed in to get this
|
||||
pub fn with_mut<F, R>(&self, rti: &mut RoutingTableInner, f: F) -> R
|
||||
where
|
||||
F: FnOnce(&mut RoutingTableInner, &mut BucketEntryInner) -> R,
|
||||
{
|
||||
let mut inner = self.inner.write();
|
||||
f(rti, &mut *inner)
|
||||
}
|
||||
|
||||
// Internal inner access for RoutingTableInner only
|
||||
pub(super) fn with_inner<F, R>(&self, f: F) -> R
|
||||
where
|
||||
F: FnOnce(&BucketEntryInner) -> R,
|
||||
{
|
||||
@ -615,7 +743,8 @@ impl BucketEntry {
|
||||
f(&*inner)
|
||||
}
|
||||
|
||||
pub(super) fn with_mut<F, R>(&self, f: F) -> R
|
||||
// Internal inner access for RoutingTableInner only
|
||||
pub(super) fn with_mut_inner<F, R>(&self, f: F) -> R
|
||||
where
|
||||
F: FnOnce(&mut BucketEntryInner) -> R,
|
||||
{
|
||||
|
@ -1,7 +1,7 @@
|
||||
use super::*;
|
||||
|
||||
impl RoutingTable {
|
||||
pub fn debug_info_nodeinfo(&self) -> String {
|
||||
pub(crate) fn debug_info_nodeinfo(&self) -> String {
|
||||
let mut out = String::new();
|
||||
let inner = self.inner.read();
|
||||
out += "Routing Table Info:\n";
|
||||
@ -23,7 +23,7 @@ impl RoutingTable {
|
||||
out
|
||||
}
|
||||
|
||||
pub async fn debug_info_txtrecord(&self) -> String {
|
||||
pub(crate) async fn debug_info_txtrecord(&self) -> String {
|
||||
let mut out = String::new();
|
||||
|
||||
let gdis = self.dial_info_details(RoutingDomain::PublicInternet);
|
||||
@ -58,8 +58,8 @@ impl RoutingTable {
|
||||
out += &format!(
|
||||
"{},{},{},{},{}",
|
||||
BOOTSTRAP_TXT_VERSION,
|
||||
MIN_VERSION,
|
||||
MAX_VERSION,
|
||||
MIN_CRYPTO_VERSION,
|
||||
MAX_CRYPTO_VERSION,
|
||||
self.node_id().encode(),
|
||||
some_hostname.unwrap()
|
||||
);
|
||||
@ -71,7 +71,7 @@ impl RoutingTable {
|
||||
out
|
||||
}
|
||||
|
||||
pub fn debug_info_dialinfo(&self) -> String {
|
||||
pub(crate) fn debug_info_dialinfo(&self) -> String {
|
||||
let ldis = self.dial_info_details(RoutingDomain::LocalNetwork);
|
||||
let gdis = self.dial_info_details(RoutingDomain::PublicInternet);
|
||||
let mut out = String::new();
|
||||
@ -100,8 +100,9 @@ impl RoutingTable {
|
||||
out
|
||||
}
|
||||
|
||||
pub fn debug_info_entries(&self, limit: usize, min_state: BucketEntryState) -> String {
|
||||
pub(crate) fn debug_info_entries(&self, limit: usize, min_state: BucketEntryState) -> String {
|
||||
let inner = self.inner.read();
|
||||
let inner = &*inner;
|
||||
let cur_ts = intf::get_timestamp();
|
||||
|
||||
let mut out = String::new();
|
||||
@ -114,14 +115,14 @@ impl RoutingTable {
|
||||
let filtered_entries: Vec<(&DHTKey, &Arc<BucketEntry>)> = inner.buckets[b]
|
||||
.entries()
|
||||
.filter(|e| {
|
||||
let state = e.1.with(|e| e.state(cur_ts));
|
||||
let state = e.1.with(inner, |_rti, e| e.state(cur_ts));
|
||||
state >= min_state
|
||||
})
|
||||
.collect();
|
||||
if !filtered_entries.is_empty() {
|
||||
out += &format!(" Bucket #{}:\n", b);
|
||||
for e in filtered_entries {
|
||||
let state = e.1.with(|e| e.state(cur_ts));
|
||||
let state = e.1.with(inner, |_rti, e| e.state(cur_ts));
|
||||
out += &format!(
|
||||
" {} [{}]\n",
|
||||
e.0.encode(),
|
||||
@ -147,7 +148,7 @@ impl RoutingTable {
|
||||
out
|
||||
}
|
||||
|
||||
pub fn debug_info_entry(&self, node_id: DHTKey) -> String {
|
||||
pub(crate) fn debug_info_entry(&self, node_id: DHTKey) -> String {
|
||||
let mut out = String::new();
|
||||
out += &format!("Entry {:?}:\n", node_id);
|
||||
if let Some(nr) = self.lookup_node_ref(node_id) {
|
||||
@ -159,8 +160,9 @@ impl RoutingTable {
|
||||
out
|
||||
}
|
||||
|
||||
pub fn debug_info_buckets(&self, min_state: BucketEntryState) -> String {
|
||||
pub(crate) fn debug_info_buckets(&self, min_state: BucketEntryState) -> String {
|
||||
let inner = self.inner.read();
|
||||
let inner = &*inner;
|
||||
let cur_ts = intf::get_timestamp();
|
||||
|
||||
let mut out = String::new();
|
||||
@ -175,7 +177,7 @@ impl RoutingTable {
|
||||
while c < COLS {
|
||||
let mut cnt = 0;
|
||||
for e in inner.buckets[b].entries() {
|
||||
if e.1.with(|e| e.state(cur_ts) >= min_state) {
|
||||
if e.1.with(inner, |_rti, e| e.state(cur_ts) >= min_state) {
|
||||
cnt += 1;
|
||||
}
|
||||
}
|
||||
|
@ -1,615 +0,0 @@
|
||||
use super::*;
|
||||
|
||||
use crate::dht::*;
|
||||
use crate::xx::*;
|
||||
use crate::*;
|
||||
|
||||
pub type LowLevelProtocolPorts = BTreeSet<(LowLevelProtocolType, AddressType, u16)>;
|
||||
pub type ProtocolToPortMapping = BTreeMap<(ProtocolType, AddressType), (LowLevelProtocolType, u16)>;
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct MappedPortInfo {
|
||||
pub low_level_protocol_ports: LowLevelProtocolPorts,
|
||||
pub protocol_to_port: ProtocolToPortMapping,
|
||||
}
|
||||
|
||||
impl RoutingTable {
|
||||
// Makes a filter that finds nodes with a matching inbound dialinfo
|
||||
pub fn make_inbound_dial_info_entry_filter(
|
||||
routing_domain: RoutingDomain,
|
||||
dial_info_filter: DialInfoFilter,
|
||||
) -> impl FnMut(&BucketEntryInner) -> bool {
|
||||
// does it have matching public dial info?
|
||||
move |e| {
|
||||
if let Some(ni) = e.node_info(routing_domain) {
|
||||
if ni
|
||||
.first_filtered_dial_info_detail(|did| did.matches_filter(&dial_info_filter))
|
||||
.is_some()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
// Makes a filter that finds nodes capable of dialing a particular outbound dialinfo
|
||||
pub fn make_outbound_dial_info_entry_filter(
|
||||
routing_domain: RoutingDomain,
|
||||
dial_info: DialInfo,
|
||||
) -> impl FnMut(&BucketEntryInner) -> bool {
|
||||
// does the node's outbound capabilities match the dialinfo?
|
||||
move |e| {
|
||||
if let Some(ni) = e.node_info(routing_domain) {
|
||||
let dif = DialInfoFilter::all()
|
||||
.with_protocol_type_set(ni.outbound_protocols)
|
||||
.with_address_type_set(ni.address_types);
|
||||
if dial_info.matches_filter(&dif) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
// Make a filter that wraps another filter
|
||||
pub fn combine_entry_filters<F, G>(
|
||||
mut f1: F,
|
||||
mut f2: G,
|
||||
) -> impl FnMut(&BucketEntryInner) -> bool
|
||||
where
|
||||
F: FnMut(&BucketEntryInner) -> bool,
|
||||
G: FnMut(&BucketEntryInner) -> bool,
|
||||
{
|
||||
move |e| {
|
||||
if !f1(e) {
|
||||
return false;
|
||||
}
|
||||
if !f2(e) {
|
||||
return false;
|
||||
}
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
// Retrieve the fastest nodes in the routing table matching an entry filter
|
||||
pub fn find_fast_public_nodes_filtered<F>(
|
||||
&self,
|
||||
node_count: usize,
|
||||
mut entry_filter: F,
|
||||
) -> Vec<NodeRef>
|
||||
where
|
||||
F: FnMut(&BucketEntryInner) -> bool,
|
||||
{
|
||||
self.find_fastest_nodes(
|
||||
// count
|
||||
node_count,
|
||||
// filter
|
||||
|_k: DHTKey, v: Option<Arc<BucketEntry>>| {
|
||||
let entry = v.unwrap();
|
||||
entry.with(|e| {
|
||||
// skip nodes on local network
|
||||
if e.node_info(RoutingDomain::LocalNetwork).is_some() {
|
||||
return false;
|
||||
}
|
||||
// skip nodes not on public internet
|
||||
if e.node_info(RoutingDomain::PublicInternet).is_none() {
|
||||
return false;
|
||||
}
|
||||
// skip nodes that dont match entry filter
|
||||
entry_filter(e)
|
||||
})
|
||||
},
|
||||
// transform
|
||||
|k: DHTKey, v: Option<Arc<BucketEntry>>| {
|
||||
NodeRef::new(self.clone(), k, v.unwrap().clone(), None)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// Retrieve up to N of each type of protocol capable nodes
|
||||
pub fn find_bootstrap_nodes_filtered(&self, max_per_type: usize) -> Vec<NodeRef> {
|
||||
let protocol_types = vec![
|
||||
ProtocolType::UDP,
|
||||
ProtocolType::TCP,
|
||||
ProtocolType::WS,
|
||||
ProtocolType::WSS,
|
||||
];
|
||||
let mut nodes_proto_v4 = vec![0usize, 0usize, 0usize, 0usize];
|
||||
let mut nodes_proto_v6 = vec![0usize, 0usize, 0usize, 0usize];
|
||||
|
||||
self.find_fastest_nodes(
|
||||
// count
|
||||
protocol_types.len() * 2 * max_per_type,
|
||||
// filter
|
||||
move |_k: DHTKey, v: Option<Arc<BucketEntry>>| {
|
||||
let entry = v.unwrap();
|
||||
entry.with(|e| {
|
||||
// skip nodes on our local network here
|
||||
if e.has_node_info(RoutingDomain::LocalNetwork.into()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// does it have some dial info we need?
|
||||
let filter = |n: &NodeInfo| {
|
||||
let mut keep = false;
|
||||
for did in &n.dial_info_detail_list {
|
||||
if matches!(did.dial_info.address_type(), AddressType::IPV4) {
|
||||
for (n, protocol_type) in protocol_types.iter().enumerate() {
|
||||
if nodes_proto_v4[n] < max_per_type
|
||||
&& did.dial_info.protocol_type() == *protocol_type
|
||||
{
|
||||
nodes_proto_v4[n] += 1;
|
||||
keep = true;
|
||||
}
|
||||
}
|
||||
} else if matches!(did.dial_info.address_type(), AddressType::IPV6) {
|
||||
for (n, protocol_type) in protocol_types.iter().enumerate() {
|
||||
if nodes_proto_v6[n] < max_per_type
|
||||
&& did.dial_info.protocol_type() == *protocol_type
|
||||
{
|
||||
nodes_proto_v6[n] += 1;
|
||||
keep = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
keep
|
||||
};
|
||||
|
||||
e.node_info(RoutingDomain::PublicInternet)
|
||||
.map(filter)
|
||||
.unwrap_or(false)
|
||||
})
|
||||
},
|
||||
// transform
|
||||
|k: DHTKey, v: Option<Arc<BucketEntry>>| {
|
||||
NodeRef::new(self.clone(), k, v.unwrap().clone(), None)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
pub fn filter_has_valid_signed_node_info(
|
||||
&self,
|
||||
routing_domain: RoutingDomain,
|
||||
v: Option<Arc<BucketEntry>>,
|
||||
) -> bool {
|
||||
match v {
|
||||
None => self.has_valid_own_node_info(routing_domain),
|
||||
Some(entry) => entry.with(|e| {
|
||||
e.signed_node_info(routing_domain.into())
|
||||
.map(|sni| sni.has_valid_signature())
|
||||
.unwrap_or(false)
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn transform_to_peer_info(
|
||||
&self,
|
||||
routing_domain: RoutingDomain,
|
||||
k: DHTKey,
|
||||
v: Option<Arc<BucketEntry>>,
|
||||
) -> PeerInfo {
|
||||
match v {
|
||||
None => self.get_own_peer_info(routing_domain),
|
||||
Some(entry) => entry.with(|e| e.make_peer_info(k, routing_domain).unwrap()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn find_peers_with_sort_and_filter<F, C, T, O>(
|
||||
&self,
|
||||
node_count: usize,
|
||||
cur_ts: u64,
|
||||
mut filter: F,
|
||||
compare: C,
|
||||
mut transform: T,
|
||||
) -> Vec<O>
|
||||
where
|
||||
F: FnMut(DHTKey, Option<Arc<BucketEntry>>) -> bool,
|
||||
C: FnMut(
|
||||
&(DHTKey, Option<Arc<BucketEntry>>),
|
||||
&(DHTKey, Option<Arc<BucketEntry>>),
|
||||
) -> core::cmp::Ordering,
|
||||
T: FnMut(DHTKey, Option<Arc<BucketEntry>>) -> O,
|
||||
{
|
||||
let inner = self.inner.read();
|
||||
let self_node_id = self.unlocked_inner.node_id;
|
||||
|
||||
// collect all the nodes for sorting
|
||||
let mut nodes =
|
||||
Vec::<(DHTKey, Option<Arc<BucketEntry>>)>::with_capacity(inner.bucket_entry_count + 1);
|
||||
|
||||
// add our own node (only one of there with the None entry)
|
||||
if filter(self_node_id, None) {
|
||||
nodes.push((self_node_id, None));
|
||||
}
|
||||
|
||||
// add all nodes from buckets
|
||||
Self::with_entries(&*inner, cur_ts, BucketEntryState::Unreliable, |k, v| {
|
||||
// Apply filter
|
||||
if filter(k, Some(v.clone())) {
|
||||
nodes.push((k, Some(v.clone())));
|
||||
}
|
||||
Option::<()>::None
|
||||
});
|
||||
|
||||
// sort by preference for returning nodes
|
||||
nodes.sort_by(compare);
|
||||
|
||||
// return transformed vector for filtered+sorted nodes
|
||||
let cnt = usize::min(node_count, nodes.len());
|
||||
let mut out = Vec::<O>::with_capacity(cnt);
|
||||
for node in nodes {
|
||||
let val = transform(node.0, node.1);
|
||||
out.push(val);
|
||||
}
|
||||
|
||||
out
|
||||
}
|
||||
|
||||
pub fn find_fastest_nodes<T, F, O>(
|
||||
&self,
|
||||
node_count: usize,
|
||||
mut filter: F,
|
||||
transform: T,
|
||||
) -> Vec<O>
|
||||
where
|
||||
F: FnMut(DHTKey, Option<Arc<BucketEntry>>) -> bool,
|
||||
T: FnMut(DHTKey, Option<Arc<BucketEntry>>) -> O,
|
||||
{
|
||||
let cur_ts = intf::get_timestamp();
|
||||
let out = self.find_peers_with_sort_and_filter(
|
||||
node_count,
|
||||
cur_ts,
|
||||
// filter
|
||||
|k, v| {
|
||||
if let Some(entry) = &v {
|
||||
// always filter out dead nodes
|
||||
if entry.with(|e| e.state(cur_ts) == BucketEntryState::Dead) {
|
||||
false
|
||||
} else {
|
||||
filter(k, v)
|
||||
}
|
||||
} else {
|
||||
// always filter out self peer, as it is irrelevant to the 'fastest nodes' search
|
||||
false
|
||||
}
|
||||
},
|
||||
// sort
|
||||
|(a_key, a_entry), (b_key, b_entry)| {
|
||||
// same nodes are always the same
|
||||
if a_key == b_key {
|
||||
return core::cmp::Ordering::Equal;
|
||||
}
|
||||
// our own node always comes last (should not happen, here for completeness)
|
||||
if a_entry.is_none() {
|
||||
return core::cmp::Ordering::Greater;
|
||||
}
|
||||
if b_entry.is_none() {
|
||||
return core::cmp::Ordering::Less;
|
||||
}
|
||||
// reliable nodes come first
|
||||
let ae = a_entry.as_ref().unwrap();
|
||||
let be = b_entry.as_ref().unwrap();
|
||||
ae.with(|ae| {
|
||||
be.with(|be| {
|
||||
let ra = ae.check_reliable(cur_ts);
|
||||
let rb = be.check_reliable(cur_ts);
|
||||
if ra != rb {
|
||||
if ra {
|
||||
return core::cmp::Ordering::Less;
|
||||
} else {
|
||||
return core::cmp::Ordering::Greater;
|
||||
}
|
||||
}
|
||||
|
||||
// latency is the next metric, closer nodes first
|
||||
let a_latency = match ae.peer_stats().latency.as_ref() {
|
||||
None => {
|
||||
// treat unknown latency as slow
|
||||
return core::cmp::Ordering::Greater;
|
||||
}
|
||||
Some(l) => l,
|
||||
};
|
||||
let b_latency = match be.peer_stats().latency.as_ref() {
|
||||
None => {
|
||||
// treat unknown latency as slow
|
||||
return core::cmp::Ordering::Less;
|
||||
}
|
||||
Some(l) => l,
|
||||
};
|
||||
// Sort by average latency
|
||||
a_latency.average.cmp(&b_latency.average)
|
||||
})
|
||||
})
|
||||
},
|
||||
// transform,
|
||||
transform,
|
||||
);
|
||||
out
|
||||
}
|
||||
|
||||
pub fn find_closest_nodes<F, T, O>(
|
||||
&self,
|
||||
node_id: DHTKey,
|
||||
filter: F,
|
||||
mut transform: T,
|
||||
) -> Vec<O>
|
||||
where
|
||||
F: FnMut(DHTKey, Option<Arc<BucketEntry>>) -> bool,
|
||||
T: FnMut(DHTKey, Option<Arc<BucketEntry>>) -> O,
|
||||
{
|
||||
let cur_ts = intf::get_timestamp();
|
||||
let node_count = {
|
||||
let c = self.unlocked_inner.config.get();
|
||||
c.network.dht.max_find_node_count as usize
|
||||
};
|
||||
let out = self.find_peers_with_sort_and_filter(
|
||||
node_count,
|
||||
cur_ts,
|
||||
// filter
|
||||
filter,
|
||||
// sort
|
||||
|(a_key, a_entry), (b_key, b_entry)| {
|
||||
// same nodes are always the same
|
||||
if a_key == b_key {
|
||||
return core::cmp::Ordering::Equal;
|
||||
}
|
||||
|
||||
// reliable nodes come first, pessimistically treating our own node as unreliable
|
||||
let ra = a_entry
|
||||
.as_ref()
|
||||
.map_or(false, |x| x.with(|x| x.check_reliable(cur_ts)));
|
||||
let rb = b_entry
|
||||
.as_ref()
|
||||
.map_or(false, |x| x.with(|x| x.check_reliable(cur_ts)));
|
||||
if ra != rb {
|
||||
if ra {
|
||||
return core::cmp::Ordering::Less;
|
||||
} else {
|
||||
return core::cmp::Ordering::Greater;
|
||||
}
|
||||
}
|
||||
|
||||
// distance is the next metric, closer nodes first
|
||||
let da = distance(a_key, &node_id);
|
||||
let db = distance(b_key, &node_id);
|
||||
da.cmp(&db)
|
||||
},
|
||||
// transform,
|
||||
&mut transform,
|
||||
);
|
||||
log_rtab!(">> find_closest_nodes: node count = {}", out.len());
|
||||
out
|
||||
}
|
||||
|
||||
// Build a map of protocols to low level ports
|
||||
// This way we can get the set of protocols required to keep our NAT mapping alive for keepalive pings
|
||||
// Only one protocol per low level protocol/port combination is required
|
||||
// For example, if WS/WSS and TCP protocols are on the same low-level TCP port, only TCP keepalives will be required
|
||||
// and we do not need to do WS/WSS keepalive as well. If they are on different ports, then we will need WS/WSS keepalives too.
|
||||
pub fn get_mapped_port_info(&self) -> MappedPortInfo {
|
||||
let mut low_level_protocol_ports =
|
||||
BTreeSet::<(LowLevelProtocolType, AddressType, u16)>::new();
|
||||
let mut protocol_to_port =
|
||||
BTreeMap::<(ProtocolType, AddressType), (LowLevelProtocolType, u16)>::new();
|
||||
let our_dids = self.all_filtered_dial_info_details(
|
||||
RoutingDomain::PublicInternet.into(),
|
||||
&DialInfoFilter::all(),
|
||||
);
|
||||
for did in our_dids {
|
||||
low_level_protocol_ports.insert((
|
||||
did.dial_info.protocol_type().low_level_protocol_type(),
|
||||
did.dial_info.address_type(),
|
||||
did.dial_info.socket_address().port(),
|
||||
));
|
||||
protocol_to_port.insert(
|
||||
(did.dial_info.protocol_type(), did.dial_info.address_type()),
|
||||
(
|
||||
did.dial_info.protocol_type().low_level_protocol_type(),
|
||||
did.dial_info.socket_address().port(),
|
||||
),
|
||||
);
|
||||
}
|
||||
MappedPortInfo {
|
||||
low_level_protocol_ports,
|
||||
protocol_to_port,
|
||||
}
|
||||
}
|
||||
|
||||
fn make_public_internet_relay_node_filter(&self) -> impl Fn(&BucketEntryInner) -> bool {
|
||||
// Get all our outbound protocol/address types
|
||||
let outbound_dif = self
|
||||
.network_manager()
|
||||
.get_outbound_dial_info_filter(RoutingDomain::PublicInternet);
|
||||
let mapped_port_info = self.get_mapped_port_info();
|
||||
|
||||
move |e: &BucketEntryInner| {
|
||||
// Ensure this node is not on the local network
|
||||
if e.has_node_info(RoutingDomain::LocalNetwork.into()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Disqualify nodes that don't cover all our inbound ports for tcp and udp
|
||||
// as we need to be able to use the relay for keepalives for all nat mappings
|
||||
let mut low_level_protocol_ports = mapped_port_info.low_level_protocol_ports.clone();
|
||||
|
||||
let can_serve_as_relay = e
|
||||
.node_info(RoutingDomain::PublicInternet)
|
||||
.map(|n| {
|
||||
let dids =
|
||||
n.all_filtered_dial_info_details(|did| did.matches_filter(&outbound_dif));
|
||||
for did in &dids {
|
||||
let pt = did.dial_info.protocol_type();
|
||||
let at = did.dial_info.address_type();
|
||||
if let Some((llpt, port)) = mapped_port_info.protocol_to_port.get(&(pt, at))
|
||||
{
|
||||
low_level_protocol_ports.remove(&(*llpt, at, *port));
|
||||
}
|
||||
}
|
||||
low_level_protocol_ports.is_empty()
|
||||
})
|
||||
.unwrap_or(false);
|
||||
if !can_serve_as_relay {
|
||||
return false;
|
||||
}
|
||||
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self), ret)]
|
||||
pub fn find_inbound_relay(
|
||||
&self,
|
||||
routing_domain: RoutingDomain,
|
||||
cur_ts: u64,
|
||||
) -> Option<NodeRef> {
|
||||
// Get relay filter function
|
||||
let relay_node_filter = match routing_domain {
|
||||
RoutingDomain::PublicInternet => self.make_public_internet_relay_node_filter(),
|
||||
RoutingDomain::LocalNetwork => {
|
||||
unimplemented!();
|
||||
}
|
||||
};
|
||||
|
||||
// Go through all entries and find fastest entry that matches filter function
|
||||
let inner = self.inner.read();
|
||||
let inner = &*inner;
|
||||
let mut best_inbound_relay: Option<(DHTKey, Arc<BucketEntry>)> = None;
|
||||
|
||||
// Iterate all known nodes for candidates
|
||||
Self::with_entries(inner, cur_ts, BucketEntryState::Unreliable, |k, v| {
|
||||
let v2 = v.clone();
|
||||
v.with(|e| {
|
||||
// Ensure we have the node's status
|
||||
if let Some(node_status) = e.node_status(routing_domain) {
|
||||
// Ensure the node will relay
|
||||
if node_status.will_relay() {
|
||||
// Compare against previous candidate
|
||||
if let Some(best_inbound_relay) = best_inbound_relay.as_mut() {
|
||||
// Less is faster
|
||||
let better = best_inbound_relay.1.with(|best| {
|
||||
BucketEntryInner::cmp_fastest_reliable(cur_ts, e, best)
|
||||
== std::cmp::Ordering::Less
|
||||
});
|
||||
// Now apply filter function and see if this node should be included
|
||||
if better && relay_node_filter(e) {
|
||||
*best_inbound_relay = (k, v2);
|
||||
}
|
||||
} else if relay_node_filter(e) {
|
||||
// Always store the first candidate
|
||||
best_inbound_relay = Some((k, v2));
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
// Don't end early, iterate through all entries
|
||||
Option::<()>::None
|
||||
});
|
||||
// Return the best inbound relay noderef
|
||||
best_inbound_relay.map(|(k, e)| NodeRef::new(self.clone(), k, e, None))
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self), ret)]
|
||||
pub fn register_find_node_answer(&self, peers: Vec<PeerInfo>) -> Vec<NodeRef> {
|
||||
let node_id = self.node_id();
|
||||
|
||||
// register nodes we'd found
|
||||
let mut out = Vec::<NodeRef>::with_capacity(peers.len());
|
||||
for p in peers {
|
||||
// if our own node if is in the list then ignore it, as we don't add ourselves to our own routing table
|
||||
if p.node_id.key == node_id {
|
||||
continue;
|
||||
}
|
||||
|
||||
// node can not be its own relay
|
||||
if let Some(rpi) = &p.signed_node_info.node_info.relay_peer_info {
|
||||
if rpi.node_id == p.node_id {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// register the node if it's new
|
||||
if let Some(nr) = self.register_node_with_signed_node_info(
|
||||
RoutingDomain::PublicInternet,
|
||||
p.node_id.key,
|
||||
p.signed_node_info.clone(),
|
||||
false,
|
||||
) {
|
||||
out.push(nr);
|
||||
}
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self), ret, err)]
|
||||
pub async fn find_node(
|
||||
&self,
|
||||
node_ref: NodeRef,
|
||||
node_id: DHTKey,
|
||||
) -> EyreResult<NetworkResult<Vec<NodeRef>>> {
|
||||
let rpc_processor = self.rpc_processor();
|
||||
|
||||
let res = network_result_try!(
|
||||
rpc_processor
|
||||
.clone()
|
||||
.rpc_call_find_node(Destination::direct(node_ref), node_id)
|
||||
.await?
|
||||
);
|
||||
|
||||
// register nodes we'd found
|
||||
Ok(NetworkResult::value(
|
||||
self.register_find_node_answer(res.answer),
|
||||
))
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self), ret, err)]
|
||||
pub async fn find_self(&self, node_ref: NodeRef) -> EyreResult<NetworkResult<Vec<NodeRef>>> {
|
||||
let node_id = self.node_id();
|
||||
self.find_node(node_ref, node_id).await
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self), ret, err)]
|
||||
pub async fn find_target(&self, node_ref: NodeRef) -> EyreResult<NetworkResult<Vec<NodeRef>>> {
|
||||
let node_id = node_ref.node_id();
|
||||
self.find_node(node_ref, node_id).await
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self))]
|
||||
pub async fn reverse_find_node(&self, node_ref: NodeRef, wide: bool) {
|
||||
// Ask bootstrap node to 'find' our own node so we can get some more nodes near ourselves
|
||||
// and then contact those nodes to inform -them- that we exist
|
||||
|
||||
// Ask bootstrap server for nodes closest to our own node
|
||||
let closest_nodes = network_result_value_or_log!(debug match self.find_self(node_ref.clone()).await {
|
||||
Err(e) => {
|
||||
log_rtab!(error
|
||||
"find_self failed for {:?}: {:?}",
|
||||
&node_ref, e
|
||||
);
|
||||
return;
|
||||
}
|
||||
Ok(v) => v,
|
||||
} => {
|
||||
return;
|
||||
});
|
||||
|
||||
// Ask each node near us to find us as well
|
||||
if wide {
|
||||
for closest_nr in closest_nodes {
|
||||
network_result_value_or_log!(debug match self.find_self(closest_nr.clone()).await {
|
||||
Err(e) => {
|
||||
log_rtab!(error
|
||||
"find_self failed for {:?}: {:?}",
|
||||
&closest_nr, e
|
||||
);
|
||||
continue;
|
||||
}
|
||||
Ok(v) => v,
|
||||
} => {
|
||||
// Do nothing with non-values
|
||||
continue;
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -1,80 +1,360 @@
|
||||
use super::*;
|
||||
use crate::dht::*;
|
||||
use crate::crypto::*;
|
||||
use alloc::fmt;
|
||||
|
||||
// Connectionless protocols like UDP are dependent on a NAT translation timeout
|
||||
// We should ping them with some frequency and 30 seconds is typical timeout
|
||||
const CONNECTIONLESS_TIMEOUT_SECS: u32 = 29;
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub struct NodeRefFilter {
|
||||
pub routing_domain_set: RoutingDomainSet,
|
||||
pub dial_info_filter: DialInfoFilter,
|
||||
}
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
impl Default for NodeRefFilter {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl NodeRefFilter {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
routing_domain_set: RoutingDomainSet::all(),
|
||||
dial_info_filter: DialInfoFilter::all(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_routing_domain(mut self, routing_domain: RoutingDomain) -> Self {
|
||||
self.routing_domain_set = routing_domain.into();
|
||||
self
|
||||
}
|
||||
pub fn with_routing_domain_set(mut self, routing_domain_set: RoutingDomainSet) -> Self {
|
||||
self.routing_domain_set = routing_domain_set;
|
||||
self
|
||||
}
|
||||
pub fn with_dial_info_filter(mut self, dial_info_filter: DialInfoFilter) -> Self {
|
||||
self.dial_info_filter = dial_info_filter;
|
||||
self
|
||||
}
|
||||
pub fn with_protocol_type(mut self, protocol_type: ProtocolType) -> Self {
|
||||
self.dial_info_filter = self.dial_info_filter.with_protocol_type(protocol_type);
|
||||
self
|
||||
}
|
||||
pub fn with_protocol_type_set(mut self, protocol_set: ProtocolTypeSet) -> Self {
|
||||
self.dial_info_filter = self.dial_info_filter.with_protocol_type_set(protocol_set);
|
||||
self
|
||||
}
|
||||
pub fn with_address_type(mut self, address_type: AddressType) -> Self {
|
||||
self.dial_info_filter = self.dial_info_filter.with_address_type(address_type);
|
||||
self
|
||||
}
|
||||
pub fn with_address_type_set(mut self, address_set: AddressTypeSet) -> Self {
|
||||
self.dial_info_filter = self.dial_info_filter.with_address_type_set(address_set);
|
||||
self
|
||||
}
|
||||
pub fn filtered(mut self, other_filter: &NodeRefFilter) -> Self {
|
||||
self.routing_domain_set &= other_filter.routing_domain_set;
|
||||
self.dial_info_filter = self
|
||||
.dial_info_filter
|
||||
.filtered(&other_filter.dial_info_filter);
|
||||
self
|
||||
}
|
||||
pub fn is_dead(&self) -> bool {
|
||||
self.dial_info_filter.is_dead() || self.routing_domain_set.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct NodeRef {
|
||||
pub struct NodeRefBaseCommon {
|
||||
routing_table: RoutingTable,
|
||||
node_id: DHTKey,
|
||||
entry: Arc<BucketEntry>,
|
||||
filter: Option<NodeRefFilter>,
|
||||
sequencing: Sequencing,
|
||||
#[cfg(feature = "tracking")]
|
||||
track_id: usize,
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
pub trait NodeRefBase: Sized {
|
||||
// Common field access
|
||||
fn common(&self) -> &NodeRefBaseCommon;
|
||||
fn common_mut(&mut self) -> &mut NodeRefBaseCommon;
|
||||
|
||||
// Implementation-specific operators
|
||||
fn operate<T, F>(&self, f: F) -> T
|
||||
where
|
||||
F: FnOnce(&RoutingTableInner, &BucketEntryInner) -> T;
|
||||
fn operate_mut<T, F>(&self, f: F) -> T
|
||||
where
|
||||
F: FnOnce(&mut RoutingTableInner, &mut BucketEntryInner) -> T;
|
||||
|
||||
// Filtering
|
||||
fn filter_ref(&self) -> Option<&NodeRefFilter> {
|
||||
self.common().filter.as_ref()
|
||||
}
|
||||
|
||||
fn take_filter(&mut self) -> Option<NodeRefFilter> {
|
||||
self.common_mut().filter.take()
|
||||
}
|
||||
|
||||
fn set_filter(&mut self, filter: Option<NodeRefFilter>) {
|
||||
self.common_mut().filter = filter
|
||||
}
|
||||
|
||||
fn set_sequencing(&mut self, sequencing: Sequencing) {
|
||||
self.common_mut().sequencing = sequencing;
|
||||
}
|
||||
fn sequencing(&self) -> Sequencing {
|
||||
self.common().sequencing
|
||||
}
|
||||
|
||||
fn merge_filter(&mut self, filter: NodeRefFilter) {
|
||||
let common_mut = self.common_mut();
|
||||
if let Some(self_filter) = common_mut.filter.take() {
|
||||
common_mut.filter = Some(self_filter.filtered(&filter));
|
||||
} else {
|
||||
common_mut.filter = Some(filter);
|
||||
}
|
||||
}
|
||||
|
||||
fn is_filter_dead(&self) -> bool {
|
||||
if let Some(filter) = &self.common().filter {
|
||||
filter.is_dead()
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
fn routing_domain_set(&self) -> RoutingDomainSet {
|
||||
self.common()
|
||||
.filter
|
||||
.as_ref()
|
||||
.map(|f| f.routing_domain_set)
|
||||
.unwrap_or(RoutingDomainSet::all())
|
||||
}
|
||||
|
||||
fn dial_info_filter(&self) -> DialInfoFilter {
|
||||
self.common()
|
||||
.filter
|
||||
.as_ref()
|
||||
.map(|f| f.dial_info_filter.clone())
|
||||
.unwrap_or(DialInfoFilter::all())
|
||||
}
|
||||
|
||||
fn best_routing_domain(&self) -> Option<RoutingDomain> {
|
||||
self.operate(|_rti, e| {
|
||||
e.best_routing_domain(
|
||||
self.common()
|
||||
.filter
|
||||
.as_ref()
|
||||
.map(|f| f.routing_domain_set)
|
||||
.unwrap_or(RoutingDomainSet::all()),
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
// Accessors
|
||||
fn routing_table(&self) -> RoutingTable {
|
||||
self.common().routing_table.clone()
|
||||
}
|
||||
fn node_id(&self) -> DHTKey {
|
||||
self.common().node_id
|
||||
}
|
||||
fn has_updated_since_last_network_change(&self) -> bool {
|
||||
self.operate(|_rti, e| e.has_updated_since_last_network_change())
|
||||
}
|
||||
fn set_updated_since_last_network_change(&self) {
|
||||
self.operate_mut(|_rti, e| e.set_updated_since_last_network_change(true));
|
||||
}
|
||||
fn update_node_status(&self, node_status: NodeStatus) {
|
||||
self.operate_mut(|_rti, e| {
|
||||
e.update_node_status(node_status);
|
||||
});
|
||||
}
|
||||
fn min_max_version(&self) -> Option<VersionRange> {
|
||||
self.operate(|_rti, e| e.min_max_version())
|
||||
}
|
||||
fn set_min_max_version(&self, min_max_version: VersionRange) {
|
||||
self.operate_mut(|_rti, e| e.set_min_max_version(min_max_version))
|
||||
}
|
||||
fn state(&self, cur_ts: u64) -> BucketEntryState {
|
||||
self.operate(|_rti, e| e.state(cur_ts))
|
||||
}
|
||||
fn peer_stats(&self) -> PeerStats {
|
||||
self.operate(|_rti, e| e.peer_stats().clone())
|
||||
}
|
||||
|
||||
// Per-RoutingDomain accessors
|
||||
fn make_peer_info(&self, routing_domain: RoutingDomain) -> Option<PeerInfo> {
|
||||
self.operate(|_rti, e| e.make_peer_info(self.node_id(), routing_domain))
|
||||
}
|
||||
fn node_info(&self, routing_domain: RoutingDomain) -> Option<NodeInfo> {
|
||||
self.operate(|_rti, e| e.node_info(routing_domain).cloned())
|
||||
}
|
||||
fn signed_node_info_has_valid_signature(&self, routing_domain: RoutingDomain) -> bool {
|
||||
self.operate(|_rti, e| {
|
||||
e.signed_node_info(routing_domain)
|
||||
.map(|sni| sni.has_valid_signature())
|
||||
.unwrap_or(false)
|
||||
})
|
||||
}
|
||||
fn has_seen_our_node_info(&self, routing_domain: RoutingDomain) -> bool {
|
||||
self.operate(|_rti, e| e.has_seen_our_node_info(routing_domain))
|
||||
}
|
||||
fn set_seen_our_node_info(&self, routing_domain: RoutingDomain) {
|
||||
self.operate_mut(|_rti, e| e.set_seen_our_node_info(routing_domain, true));
|
||||
}
|
||||
fn network_class(&self, routing_domain: RoutingDomain) -> Option<NetworkClass> {
|
||||
self.operate(|_rt, e| e.node_info(routing_domain).map(|n| n.network_class))
|
||||
}
|
||||
fn outbound_protocols(&self, routing_domain: RoutingDomain) -> Option<ProtocolTypeSet> {
|
||||
self.operate(|_rt, e| e.node_info(routing_domain).map(|n| n.outbound_protocols))
|
||||
}
|
||||
fn address_types(&self, routing_domain: RoutingDomain) -> Option<AddressTypeSet> {
|
||||
self.operate(|_rt, e| e.node_info(routing_domain).map(|n| n.address_types))
|
||||
}
|
||||
fn node_info_outbound_filter(&self, routing_domain: RoutingDomain) -> DialInfoFilter {
|
||||
let mut dif = DialInfoFilter::all();
|
||||
if let Some(outbound_protocols) = self.outbound_protocols(routing_domain) {
|
||||
dif = dif.with_protocol_type_set(outbound_protocols);
|
||||
}
|
||||
if let Some(address_types) = self.address_types(routing_domain) {
|
||||
dif = dif.with_address_type_set(address_types);
|
||||
}
|
||||
dif
|
||||
}
|
||||
fn relay(&self, routing_domain: RoutingDomain) -> Option<NodeRef> {
|
||||
self.operate_mut(|rti, e| {
|
||||
e.signed_node_info(routing_domain)
|
||||
.and_then(|n| n.relay_peer_info())
|
||||
.and_then(|t| {
|
||||
// If relay is ourselves, then return None, because we can't relay through ourselves
|
||||
// and to contact this node we should have had an existing inbound connection
|
||||
if t.node_id.key == rti.unlocked_inner.node_id {
|
||||
return None;
|
||||
}
|
||||
|
||||
// Register relay node and return noderef
|
||||
rti.register_node_with_signed_node_info(
|
||||
self.routing_table(),
|
||||
routing_domain,
|
||||
t.node_id.key,
|
||||
t.signed_node_info,
|
||||
false,
|
||||
)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// Filtered accessors
|
||||
fn first_filtered_dial_info_detail(&self) -> Option<DialInfoDetail> {
|
||||
let routing_domain_set = self.routing_domain_set();
|
||||
let dial_info_filter = self.dial_info_filter();
|
||||
|
||||
let (sort, dial_info_filter) = match self.common().sequencing {
|
||||
Sequencing::NoPreference => (None, dial_info_filter),
|
||||
Sequencing::PreferOrdered => (
|
||||
Some(DialInfoDetail::ordered_sequencing_sort),
|
||||
dial_info_filter,
|
||||
),
|
||||
Sequencing::EnsureOrdered => (
|
||||
Some(DialInfoDetail::ordered_sequencing_sort),
|
||||
dial_info_filter.filtered(
|
||||
&DialInfoFilter::all().with_protocol_type_set(ProtocolType::all_ordered_set()),
|
||||
),
|
||||
),
|
||||
};
|
||||
|
||||
self.operate(|_rt, e| {
|
||||
for routing_domain in routing_domain_set {
|
||||
if let Some(ni) = e.node_info(routing_domain) {
|
||||
let filter = |did: &DialInfoDetail| did.matches_filter(&dial_info_filter);
|
||||
if let Some(did) = ni.first_filtered_dial_info_detail(sort, filter) {
|
||||
return Some(did);
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
})
|
||||
}
|
||||
|
||||
fn all_filtered_dial_info_details<F>(&self) -> Vec<DialInfoDetail> {
|
||||
let routing_domain_set = self.routing_domain_set();
|
||||
let dial_info_filter = self.dial_info_filter();
|
||||
|
||||
let (sort, dial_info_filter) = match self.common().sequencing {
|
||||
Sequencing::NoPreference => (None, dial_info_filter),
|
||||
Sequencing::PreferOrdered => (
|
||||
Some(DialInfoDetail::ordered_sequencing_sort),
|
||||
dial_info_filter,
|
||||
),
|
||||
Sequencing::EnsureOrdered => (
|
||||
Some(DialInfoDetail::ordered_sequencing_sort),
|
||||
dial_info_filter.filtered(
|
||||
&DialInfoFilter::all().with_protocol_type_set(ProtocolType::all_ordered_set()),
|
||||
),
|
||||
),
|
||||
};
|
||||
|
||||
let mut out = Vec::new();
|
||||
self.operate(|_rt, e| {
|
||||
for routing_domain in routing_domain_set {
|
||||
if let Some(ni) = e.node_info(routing_domain) {
|
||||
let filter = |did: &DialInfoDetail| did.matches_filter(&dial_info_filter);
|
||||
if let Some(did) = ni.first_filtered_dial_info_detail(sort, filter) {
|
||||
out.push(did);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
out.remove_duplicates();
|
||||
out
|
||||
}
|
||||
|
||||
fn last_connection(&self) -> Option<ConnectionDescriptor> {
|
||||
// Get the last connections and the last time we saw anything with this connection
|
||||
// Filtered first and then sorted by most recent
|
||||
self.operate(|rti, e| {
|
||||
let last_connections = e.last_connections(rti, self.common().filter.clone());
|
||||
|
||||
// Do some checks to ensure these are possibly still 'live'
|
||||
for (last_connection, last_seen) in last_connections {
|
||||
// Should we check the connection table?
|
||||
if last_connection.protocol_type().is_connection_oriented() {
|
||||
// Look the connection up in the connection manager and see if it's still there
|
||||
let connection_manager =
|
||||
rti.unlocked_inner.network_manager.connection_manager();
|
||||
if connection_manager.get_connection(last_connection).is_some() {
|
||||
return Some(last_connection);
|
||||
}
|
||||
} else {
|
||||
// If this is not connection oriented, then we check our last seen time
|
||||
// to see if this mapping has expired (beyond our timeout)
|
||||
let cur_ts = intf::get_timestamp();
|
||||
if (last_seen + (CONNECTIONLESS_TIMEOUT_SECS as u64 * 1_000_000u64)) >= cur_ts {
|
||||
return Some(last_connection);
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
})
|
||||
}
|
||||
|
||||
fn clear_last_connections(&self) {
|
||||
self.operate_mut(|_rti, e| e.clear_last_connections())
|
||||
}
|
||||
|
||||
fn set_last_connection(&self, connection_descriptor: ConnectionDescriptor, ts: u64) {
|
||||
self.operate_mut(|rti, e| {
|
||||
e.set_last_connection(connection_descriptor, ts);
|
||||
rti.touch_recent_peer(self.common().node_id, connection_descriptor);
|
||||
})
|
||||
}
|
||||
|
||||
fn has_any_dial_info(&self) -> bool {
|
||||
self.operate(|_rti, e| {
|
||||
for rtd in RoutingDomain::all() {
|
||||
if let Some(sni) = e.signed_node_info(rtd) {
|
||||
if sni.has_any_dial_info() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
false
|
||||
})
|
||||
}
|
||||
|
||||
fn stats_question_sent(&self, ts: u64, bytes: u64, expects_answer: bool) {
|
||||
self.operate_mut(|rti, e| {
|
||||
rti.transfer_stats_accounting().add_up(bytes);
|
||||
e.question_sent(ts, bytes, expects_answer);
|
||||
})
|
||||
}
|
||||
fn stats_question_rcvd(&self, ts: u64, bytes: u64) {
|
||||
self.operate_mut(|rti, e| {
|
||||
rti.transfer_stats_accounting().add_down(bytes);
|
||||
e.question_rcvd(ts, bytes);
|
||||
})
|
||||
}
|
||||
fn stats_answer_sent(&self, bytes: u64) {
|
||||
self.operate_mut(|rti, e| {
|
||||
rti.transfer_stats_accounting().add_up(bytes);
|
||||
e.answer_sent(bytes);
|
||||
})
|
||||
}
|
||||
fn stats_answer_rcvd(&self, send_ts: u64, recv_ts: u64, bytes: u64) {
|
||||
self.operate_mut(|rti, e| {
|
||||
rti.transfer_stats_accounting().add_down(bytes);
|
||||
rti.latency_stats_accounting()
|
||||
.record_latency(recv_ts - send_ts);
|
||||
e.answer_rcvd(send_ts, recv_ts, bytes);
|
||||
})
|
||||
}
|
||||
fn stats_question_lost(&self) {
|
||||
self.operate_mut(|_rti, e| {
|
||||
e.question_lost();
|
||||
})
|
||||
}
|
||||
fn stats_failed_to_send(&self, ts: u64, expects_answer: bool) {
|
||||
self.operate_mut(|_rti, e| {
|
||||
e.failed_to_send(ts, expects_answer);
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
/// Reference to a routing table entry
|
||||
/// Keeps entry in the routing table until all references are gone
|
||||
pub struct NodeRef {
|
||||
common: NodeRefBaseCommon,
|
||||
}
|
||||
|
||||
impl NodeRef {
|
||||
pub fn new(
|
||||
routing_table: RoutingTable,
|
||||
@ -85,52 +365,15 @@ impl NodeRef {
|
||||
entry.ref_count.fetch_add(1u32, Ordering::Relaxed);
|
||||
|
||||
Self {
|
||||
common: NodeRefBaseCommon {
|
||||
routing_table,
|
||||
node_id,
|
||||
entry,
|
||||
filter,
|
||||
sequencing: Sequencing::NoPreference,
|
||||
#[cfg(feature = "tracking")]
|
||||
track_id: entry.track(),
|
||||
}
|
||||
}
|
||||
|
||||
// Operate on entry accessors
|
||||
|
||||
pub(super) fn operate<T, F>(&self, f: F) -> T
|
||||
where
|
||||
F: FnOnce(&RoutingTableInner, &BucketEntryInner) -> T,
|
||||
{
|
||||
let inner = &*self.routing_table.inner.read();
|
||||
self.entry.with(|e| f(inner, e))
|
||||
}
|
||||
|
||||
pub(super) fn operate_mut<T, F>(&self, f: F) -> T
|
||||
where
|
||||
F: FnOnce(&mut RoutingTableInner, &mut BucketEntryInner) -> T,
|
||||
{
|
||||
let inner = &mut *self.routing_table.inner.write();
|
||||
self.entry.with_mut(|e| f(inner, e))
|
||||
}
|
||||
|
||||
// Filtering
|
||||
|
||||
pub fn filter_ref(&self) -> Option<&NodeRefFilter> {
|
||||
self.filter.as_ref()
|
||||
}
|
||||
|
||||
pub fn take_filter(&mut self) -> Option<NodeRefFilter> {
|
||||
self.filter.take()
|
||||
}
|
||||
|
||||
pub fn set_filter(&mut self, filter: Option<NodeRefFilter>) {
|
||||
self.filter = filter
|
||||
}
|
||||
|
||||
pub fn merge_filter(&mut self, filter: NodeRefFilter) {
|
||||
if let Some(self_filter) = self.filter.take() {
|
||||
self.filter = Some(self_filter.filtered(&filter));
|
||||
} else {
|
||||
self.filter = Some(filter);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@ -140,281 +383,73 @@ impl NodeRef {
|
||||
out
|
||||
}
|
||||
|
||||
pub fn is_filter_dead(&self) -> bool {
|
||||
if let Some(filter) = &self.filter {
|
||||
filter.is_dead()
|
||||
} else {
|
||||
false
|
||||
pub fn locked<'a>(&self, rti: &'a RoutingTableInner) -> NodeRefLocked<'a> {
|
||||
NodeRefLocked::new(rti, self.clone())
|
||||
}
|
||||
pub fn locked_mut<'a>(&self, rti: &'a mut RoutingTableInner) -> NodeRefLockedMut<'a> {
|
||||
NodeRefLockedMut::new(rti, self.clone())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn routing_domain_set(&self) -> RoutingDomainSet {
|
||||
self.filter
|
||||
.as_ref()
|
||||
.map(|f| f.routing_domain_set)
|
||||
.unwrap_or(RoutingDomainSet::all())
|
||||
impl NodeRefBase for NodeRef {
|
||||
fn common(&self) -> &NodeRefBaseCommon {
|
||||
&self.common
|
||||
}
|
||||
|
||||
pub fn dial_info_filter(&self) -> DialInfoFilter {
|
||||
self.filter
|
||||
.as_ref()
|
||||
.map(|f| f.dial_info_filter.clone())
|
||||
.unwrap_or(DialInfoFilter::all())
|
||||
fn common_mut(&mut self) -> &mut NodeRefBaseCommon {
|
||||
&mut self.common
|
||||
}
|
||||
|
||||
pub fn best_routing_domain(&self) -> Option<RoutingDomain> {
|
||||
self.operate(|_rti, e| {
|
||||
e.best_routing_domain(
|
||||
self.filter
|
||||
.as_ref()
|
||||
.map(|f| f.routing_domain_set)
|
||||
.unwrap_or(RoutingDomainSet::all()),
|
||||
)
|
||||
})
|
||||
fn operate<T, F>(&self, f: F) -> T
|
||||
where
|
||||
F: FnOnce(&RoutingTableInner, &BucketEntryInner) -> T,
|
||||
{
|
||||
let inner = &*self.common.routing_table.inner.read();
|
||||
self.common.entry.with(inner, f)
|
||||
}
|
||||
|
||||
// Accessors
|
||||
pub fn routing_table(&self) -> RoutingTable {
|
||||
self.routing_table.clone()
|
||||
}
|
||||
pub fn node_id(&self) -> DHTKey {
|
||||
self.node_id
|
||||
}
|
||||
pub fn has_updated_since_last_network_change(&self) -> bool {
|
||||
self.operate(|_rti, e| e.has_updated_since_last_network_change())
|
||||
}
|
||||
pub fn set_updated_since_last_network_change(&self) {
|
||||
self.operate_mut(|_rti, e| e.set_updated_since_last_network_change(true));
|
||||
}
|
||||
pub fn update_node_status(&self, node_status: NodeStatus) {
|
||||
self.operate_mut(|_rti, e| {
|
||||
e.update_node_status(node_status);
|
||||
});
|
||||
}
|
||||
pub fn min_max_version(&self) -> Option<(u8, u8)> {
|
||||
self.operate(|_rti, e| e.min_max_version())
|
||||
}
|
||||
pub fn set_min_max_version(&self, min_max_version: (u8, u8)) {
|
||||
self.operate_mut(|_rti, e| e.set_min_max_version(min_max_version))
|
||||
}
|
||||
pub fn state(&self, cur_ts: u64) -> BucketEntryState {
|
||||
self.operate(|_rti, e| e.state(cur_ts))
|
||||
}
|
||||
pub fn peer_stats(&self) -> PeerStats {
|
||||
self.operate(|_rti, e| e.peer_stats().clone())
|
||||
}
|
||||
|
||||
// Per-RoutingDomain accessors
|
||||
pub fn make_peer_info(&self, routing_domain: RoutingDomain) -> Option<PeerInfo> {
|
||||
self.operate(|_rti, e| e.make_peer_info(self.node_id(), routing_domain))
|
||||
}
|
||||
pub fn signed_node_info_has_valid_signature(&self, routing_domain: RoutingDomain) -> bool {
|
||||
self.operate(|_rti, e| {
|
||||
e.signed_node_info(routing_domain)
|
||||
.map(|sni| sni.has_valid_signature())
|
||||
.unwrap_or(false)
|
||||
})
|
||||
}
|
||||
pub fn has_seen_our_node_info(&self, routing_domain: RoutingDomain) -> bool {
|
||||
self.operate(|_rti, e| e.has_seen_our_node_info(routing_domain))
|
||||
}
|
||||
pub fn set_seen_our_node_info(&self, routing_domain: RoutingDomain) {
|
||||
self.operate_mut(|_rti, e| e.set_seen_our_node_info(routing_domain, true));
|
||||
}
|
||||
pub fn network_class(&self, routing_domain: RoutingDomain) -> Option<NetworkClass> {
|
||||
self.operate(|_rt, e| e.node_info(routing_domain).map(|n| n.network_class))
|
||||
}
|
||||
pub fn outbound_protocols(&self, routing_domain: RoutingDomain) -> Option<ProtocolTypeSet> {
|
||||
self.operate(|_rt, e| e.node_info(routing_domain).map(|n| n.outbound_protocols))
|
||||
}
|
||||
pub fn address_types(&self, routing_domain: RoutingDomain) -> Option<AddressTypeSet> {
|
||||
self.operate(|_rt, e| e.node_info(routing_domain).map(|n| n.address_types))
|
||||
}
|
||||
pub fn node_info_outbound_filter(&self, routing_domain: RoutingDomain) -> DialInfoFilter {
|
||||
let mut dif = DialInfoFilter::all();
|
||||
if let Some(outbound_protocols) = self.outbound_protocols(routing_domain) {
|
||||
dif = dif.with_protocol_type_set(outbound_protocols);
|
||||
}
|
||||
if let Some(address_types) = self.address_types(routing_domain) {
|
||||
dif = dif.with_address_type_set(address_types);
|
||||
}
|
||||
dif
|
||||
}
|
||||
pub fn relay(&self, routing_domain: RoutingDomain) -> Option<NodeRef> {
|
||||
let target_rpi = self.operate(|_rti, e| {
|
||||
e.node_info(routing_domain)
|
||||
.map(|n| n.relay_peer_info.as_ref().map(|pi| pi.as_ref().clone()))
|
||||
})?;
|
||||
target_rpi.and_then(|t| {
|
||||
// If relay is ourselves, then return None, because we can't relay through ourselves
|
||||
// and to contact this node we should have had an existing inbound connection
|
||||
if t.node_id.key == self.routing_table.node_id() {
|
||||
return None;
|
||||
}
|
||||
|
||||
// Register relay node and return noderef
|
||||
self.routing_table.register_node_with_signed_node_info(
|
||||
routing_domain,
|
||||
t.node_id.key,
|
||||
t.signed_node_info,
|
||||
false,
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
// Filtered accessors
|
||||
pub fn first_filtered_dial_info_detail(&self) -> Option<DialInfoDetail> {
|
||||
let routing_domain_set = self.routing_domain_set();
|
||||
let dial_info_filter = self.dial_info_filter();
|
||||
|
||||
self.operate(|_rt, e| {
|
||||
for routing_domain in routing_domain_set {
|
||||
if let Some(ni) = e.node_info(routing_domain) {
|
||||
let filter = |did: &DialInfoDetail| did.matches_filter(&dial_info_filter);
|
||||
if let Some(did) = ni.first_filtered_dial_info_detail(filter) {
|
||||
return Some(did);
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
})
|
||||
}
|
||||
|
||||
pub fn all_filtered_dial_info_details<F>(&self) -> Vec<DialInfoDetail> {
|
||||
let routing_domain_set = self.routing_domain_set();
|
||||
let dial_info_filter = self.dial_info_filter();
|
||||
|
||||
let mut out = Vec::new();
|
||||
self.operate(|_rt, e| {
|
||||
for routing_domain in routing_domain_set {
|
||||
if let Some(ni) = e.node_info(routing_domain) {
|
||||
let filter = |did: &DialInfoDetail| did.matches_filter(&dial_info_filter);
|
||||
if let Some(did) = ni.first_filtered_dial_info_detail(filter) {
|
||||
out.push(did);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
out.remove_duplicates();
|
||||
out
|
||||
}
|
||||
|
||||
pub fn last_connection(&self) -> Option<ConnectionDescriptor> {
|
||||
// Get the last connection and the last time we saw anything with this connection
|
||||
let (last_connection, last_seen) =
|
||||
self.operate(|rti, e| e.last_connection(rti, self.filter.clone()))?;
|
||||
|
||||
// Should we check the connection table?
|
||||
if last_connection.protocol_type().is_connection_oriented() {
|
||||
// Look the connection up in the connection manager and see if it's still there
|
||||
let connection_manager = self.routing_table.network_manager().connection_manager();
|
||||
connection_manager.get_connection(last_connection)?;
|
||||
} else {
|
||||
// If this is not connection oriented, then we check our last seen time
|
||||
// to see if this mapping has expired (beyond our timeout)
|
||||
let cur_ts = intf::get_timestamp();
|
||||
if (last_seen + (CONNECTIONLESS_TIMEOUT_SECS as u64 * 1_000_000u64)) < cur_ts {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
Some(last_connection)
|
||||
}
|
||||
|
||||
pub fn clear_last_connections(&self) {
|
||||
self.operate_mut(|_rti, e| e.clear_last_connections())
|
||||
}
|
||||
|
||||
pub fn set_last_connection(&self, connection_descriptor: ConnectionDescriptor, ts: u64) {
|
||||
self.operate_mut(|_rti, e| e.set_last_connection(connection_descriptor, ts));
|
||||
self.routing_table
|
||||
.touch_recent_peer(self.node_id(), connection_descriptor);
|
||||
}
|
||||
|
||||
pub fn has_any_dial_info(&self) -> bool {
|
||||
self.operate(|_rti, e| {
|
||||
for rtd in RoutingDomain::all() {
|
||||
if let Some(ni) = e.node_info(rtd) {
|
||||
if ni.has_any_dial_info() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
false
|
||||
})
|
||||
}
|
||||
|
||||
pub fn stats_question_sent(&self, ts: u64, bytes: u64, expects_answer: bool) {
|
||||
self.operate_mut(|rti, e| {
|
||||
rti.self_transfer_stats_accounting.add_up(bytes);
|
||||
e.question_sent(ts, bytes, expects_answer);
|
||||
})
|
||||
}
|
||||
pub fn stats_question_rcvd(&self, ts: u64, bytes: u64) {
|
||||
self.operate_mut(|rti, e| {
|
||||
rti.self_transfer_stats_accounting.add_down(bytes);
|
||||
e.question_rcvd(ts, bytes);
|
||||
})
|
||||
}
|
||||
pub fn stats_answer_sent(&self, bytes: u64) {
|
||||
self.operate_mut(|rti, e| {
|
||||
rti.self_transfer_stats_accounting.add_up(bytes);
|
||||
e.answer_sent(bytes);
|
||||
})
|
||||
}
|
||||
pub fn stats_answer_rcvd(&self, send_ts: u64, recv_ts: u64, bytes: u64) {
|
||||
self.operate_mut(|rti, e| {
|
||||
rti.self_transfer_stats_accounting.add_down(bytes);
|
||||
rti.self_latency_stats_accounting
|
||||
.record_latency(recv_ts - send_ts);
|
||||
e.answer_rcvd(send_ts, recv_ts, bytes);
|
||||
})
|
||||
}
|
||||
pub fn stats_question_lost(&self) {
|
||||
self.operate_mut(|_rti, e| {
|
||||
e.question_lost();
|
||||
})
|
||||
}
|
||||
pub fn stats_failed_to_send(&self, ts: u64, expects_answer: bool) {
|
||||
self.operate_mut(|_rti, e| {
|
||||
e.failed_to_send(ts, expects_answer);
|
||||
})
|
||||
fn operate_mut<T, F>(&self, f: F) -> T
|
||||
where
|
||||
F: FnOnce(&mut RoutingTableInner, &mut BucketEntryInner) -> T,
|
||||
{
|
||||
let inner = &mut *self.common.routing_table.inner.write();
|
||||
self.common.entry.with_mut(inner, f)
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for NodeRef {
|
||||
fn clone(&self) -> Self {
|
||||
self.entry.ref_count.fetch_add(1u32, Ordering::Relaxed);
|
||||
self.common
|
||||
.entry
|
||||
.ref_count
|
||||
.fetch_add(1u32, Ordering::Relaxed);
|
||||
|
||||
Self {
|
||||
routing_table: self.routing_table.clone(),
|
||||
node_id: self.node_id,
|
||||
entry: self.entry.clone(),
|
||||
filter: self.filter.clone(),
|
||||
common: NodeRefBaseCommon {
|
||||
routing_table: self.common.routing_table.clone(),
|
||||
node_id: self.common.node_id,
|
||||
entry: self.common.entry.clone(),
|
||||
filter: self.common.filter.clone(),
|
||||
sequencing: self.common.sequencing,
|
||||
#[cfg(feature = "tracking")]
|
||||
track_id: e.track(),
|
||||
track_id: self.common.entry.write().track(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for NodeRef {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.node_id == other.node_id
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for NodeRef {}
|
||||
|
||||
impl fmt::Display for NodeRef {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.node_id.encode())
|
||||
write!(f, "{}", self.common.node_id.encode())
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for NodeRef {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("NodeRef")
|
||||
.field("node_id", &self.node_id)
|
||||
.field("filter", &self.filter)
|
||||
.field("node_id", &self.common.node_id)
|
||||
.field("filter", &self.common.filter)
|
||||
.field("sequencing", &self.common.sequencing)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
@ -422,12 +457,138 @@ impl fmt::Debug for NodeRef {
|
||||
impl Drop for NodeRef {
|
||||
fn drop(&mut self) {
|
||||
#[cfg(feature = "tracking")]
|
||||
self.operate(|e| e.untrack(self.track_id));
|
||||
self.common.entry.write().untrack(self.track_id);
|
||||
|
||||
// drop the noderef and queue a bucket kick if it was the last one
|
||||
let new_ref_count = self.entry.ref_count.fetch_sub(1u32, Ordering::Relaxed) - 1;
|
||||
let new_ref_count = self
|
||||
.common
|
||||
.entry
|
||||
.ref_count
|
||||
.fetch_sub(1u32, Ordering::Relaxed)
|
||||
- 1;
|
||||
if new_ref_count == 0 {
|
||||
self.routing_table.queue_bucket_kick(self.node_id);
|
||||
self.common
|
||||
.routing_table
|
||||
.queue_bucket_kick(self.common.node_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
/// Locked reference to a routing table entry
|
||||
/// For internal use inside the RoutingTable module where you have
|
||||
/// already locked a RoutingTableInner
|
||||
/// Keeps entry in the routing table until all references are gone
|
||||
pub struct NodeRefLocked<'a> {
|
||||
inner: Mutex<&'a RoutingTableInner>,
|
||||
nr: NodeRef,
|
||||
}
|
||||
|
||||
impl<'a> NodeRefLocked<'a> {
|
||||
pub fn new(inner: &'a RoutingTableInner, nr: NodeRef) -> Self {
|
||||
Self {
|
||||
inner: Mutex::new(inner),
|
||||
nr,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> NodeRefBase for NodeRefLocked<'a> {
|
||||
fn common(&self) -> &NodeRefBaseCommon {
|
||||
&self.nr.common
|
||||
}
|
||||
|
||||
fn common_mut(&mut self) -> &mut NodeRefBaseCommon {
|
||||
&mut self.nr.common
|
||||
}
|
||||
|
||||
fn operate<T, F>(&self, f: F) -> T
|
||||
where
|
||||
F: FnOnce(&RoutingTableInner, &BucketEntryInner) -> T,
|
||||
{
|
||||
let inner = &*self.inner.lock();
|
||||
self.nr.common.entry.with(inner, f)
|
||||
}
|
||||
|
||||
fn operate_mut<T, F>(&self, _f: F) -> T
|
||||
where
|
||||
F: FnOnce(&mut RoutingTableInner, &mut BucketEntryInner) -> T,
|
||||
{
|
||||
panic!("need to locked_mut() for this operation")
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> fmt::Display for NodeRefLocked<'a> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.nr)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> fmt::Debug for NodeRefLocked<'a> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("NodeRefLocked")
|
||||
.field("nr", &self.nr)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
/// Mutable locked reference to a routing table entry
|
||||
/// For internal use inside the RoutingTable module where you have
|
||||
/// already locked a RoutingTableInner
|
||||
/// Keeps entry in the routing table until all references are gone
|
||||
pub struct NodeRefLockedMut<'a> {
|
||||
inner: Mutex<&'a mut RoutingTableInner>,
|
||||
nr: NodeRef,
|
||||
}
|
||||
|
||||
impl<'a> NodeRefLockedMut<'a> {
|
||||
pub fn new(inner: &'a mut RoutingTableInner, nr: NodeRef) -> Self {
|
||||
Self {
|
||||
inner: Mutex::new(inner),
|
||||
nr,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> NodeRefBase for NodeRefLockedMut<'a> {
|
||||
fn common(&self) -> &NodeRefBaseCommon {
|
||||
&self.nr.common
|
||||
}
|
||||
|
||||
fn common_mut(&mut self) -> &mut NodeRefBaseCommon {
|
||||
&mut self.nr.common
|
||||
}
|
||||
|
||||
fn operate<T, F>(&self, f: F) -> T
|
||||
where
|
||||
F: FnOnce(&RoutingTableInner, &BucketEntryInner) -> T,
|
||||
{
|
||||
let inner = &*self.inner.lock();
|
||||
self.nr.common.entry.with(inner, f)
|
||||
}
|
||||
|
||||
fn operate_mut<T, F>(&self, f: F) -> T
|
||||
where
|
||||
F: FnOnce(&mut RoutingTableInner, &mut BucketEntryInner) -> T,
|
||||
{
|
||||
let inner = &mut *self.inner.lock();
|
||||
self.nr.common.entry.with_mut(inner, f)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> fmt::Display for NodeRefLockedMut<'a> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.nr)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> fmt::Debug for NodeRefLockedMut<'a> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("NodeRefLockedMut")
|
||||
.field("nr", &self.nr)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
61
veilid-core/src/routing_table/node_ref_filter.rs
Normal file
61
veilid-core/src/routing_table/node_ref_filter.rs
Normal file
@ -0,0 +1,61 @@
|
||||
use super::*;
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub struct NodeRefFilter {
|
||||
pub routing_domain_set: RoutingDomainSet,
|
||||
pub dial_info_filter: DialInfoFilter,
|
||||
}
|
||||
|
||||
impl Default for NodeRefFilter {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl NodeRefFilter {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
routing_domain_set: RoutingDomainSet::all(),
|
||||
dial_info_filter: DialInfoFilter::all(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_routing_domain(mut self, routing_domain: RoutingDomain) -> Self {
|
||||
self.routing_domain_set = routing_domain.into();
|
||||
self
|
||||
}
|
||||
pub fn with_routing_domain_set(mut self, routing_domain_set: RoutingDomainSet) -> Self {
|
||||
self.routing_domain_set = routing_domain_set;
|
||||
self
|
||||
}
|
||||
pub fn with_dial_info_filter(mut self, dial_info_filter: DialInfoFilter) -> Self {
|
||||
self.dial_info_filter = dial_info_filter;
|
||||
self
|
||||
}
|
||||
pub fn with_protocol_type(mut self, protocol_type: ProtocolType) -> Self {
|
||||
self.dial_info_filter = self.dial_info_filter.with_protocol_type(protocol_type);
|
||||
self
|
||||
}
|
||||
pub fn with_protocol_type_set(mut self, protocol_set: ProtocolTypeSet) -> Self {
|
||||
self.dial_info_filter = self.dial_info_filter.with_protocol_type_set(protocol_set);
|
||||
self
|
||||
}
|
||||
pub fn with_address_type(mut self, address_type: AddressType) -> Self {
|
||||
self.dial_info_filter = self.dial_info_filter.with_address_type(address_type);
|
||||
self
|
||||
}
|
||||
pub fn with_address_type_set(mut self, address_set: AddressTypeSet) -> Self {
|
||||
self.dial_info_filter = self.dial_info_filter.with_address_type_set(address_set);
|
||||
self
|
||||
}
|
||||
pub fn filtered(mut self, other_filter: &NodeRefFilter) -> Self {
|
||||
self.routing_domain_set &= other_filter.routing_domain_set;
|
||||
self.dial_info_filter = self
|
||||
.dial_info_filter
|
||||
.filtered(&other_filter.dial_info_filter);
|
||||
self
|
||||
}
|
||||
pub fn is_dead(&self) -> bool {
|
||||
self.dial_info_filter.is_dead() || self.routing_domain_set.is_empty()
|
||||
}
|
||||
}
|
174
veilid-core/src/routing_table/privacy.rs
Normal file
174
veilid-core/src/routing_table/privacy.rs
Normal file
@ -0,0 +1,174 @@
|
||||
use super::*;
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Compiled Privacy Objects
|
||||
|
||||
/// An encrypted private/safety route hop
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct RouteHopData {
|
||||
/// The nonce used in the encryption ENC(Xn,DH(PKn,SKapr))
|
||||
pub nonce: Nonce,
|
||||
/// The encrypted blob
|
||||
pub blob: Vec<u8>,
|
||||
}
|
||||
|
||||
/// How to find a route node
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum RouteNode {
|
||||
/// Route node is optimized, no contact method information as this node id has been seen before
|
||||
NodeId(NodeId),
|
||||
/// Route node with full contact method information to ensure the peer is reachable
|
||||
PeerInfo(PeerInfo),
|
||||
}
|
||||
impl fmt::Display for RouteNode {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"{}",
|
||||
match self {
|
||||
RouteNode::NodeId(x) => x.key.encode(),
|
||||
RouteNode::PeerInfo(pi) => pi.node_id.key.encode(),
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// An unencrypted private/safety route hop
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct RouteHop {
|
||||
/// The location of the hop
|
||||
pub node: RouteNode,
|
||||
/// The encrypted blob to pass to the next hop as its data (None for stubs)
|
||||
pub next_hop: Option<RouteHopData>,
|
||||
}
|
||||
|
||||
/// The kind of hops a private route can have
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum PrivateRouteHops {
|
||||
/// The first hop of a private route, unencrypted, route_hops == total hop count
|
||||
FirstHop(RouteHop),
|
||||
/// Private route internal node. Has > 0 private route hops left but < total hop count
|
||||
Data(RouteHopData),
|
||||
/// Private route has ended (hop count = 0)
|
||||
Empty,
|
||||
}
|
||||
|
||||
/// A private route for receiver privacy
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct PrivateRoute {
|
||||
/// The public key used for the entire route
|
||||
pub public_key: DHTKey,
|
||||
pub hop_count: u8,
|
||||
pub hops: PrivateRouteHops,
|
||||
}
|
||||
|
||||
impl PrivateRoute {
|
||||
/// Empty private route is the form used when receiving the last hop
|
||||
pub fn new_empty(public_key: DHTKey) -> Self {
|
||||
Self {
|
||||
public_key,
|
||||
hop_count: 0,
|
||||
hops: PrivateRouteHops::Empty,
|
||||
}
|
||||
}
|
||||
/// Stub route is the form used when no privacy is required, but you need to specify the destination for a safety route
|
||||
pub fn new_stub(public_key: DHTKey, node: RouteNode) -> Self {
|
||||
Self {
|
||||
public_key,
|
||||
hop_count: 1,
|
||||
hops: PrivateRouteHops::FirstHop(RouteHop {
|
||||
node,
|
||||
next_hop: None,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
/// Remove the first unencrypted hop if possible
|
||||
pub fn pop_first_hop(&mut self) -> Option<RouteNode> {
|
||||
match &mut self.hops {
|
||||
PrivateRouteHops::FirstHop(first_hop) => {
|
||||
let first_hop_node = first_hop.node.clone();
|
||||
|
||||
// Reduce hop count
|
||||
if self.hop_count > 0 {
|
||||
self.hop_count -= 1;
|
||||
} else {
|
||||
error!("hop count should not be 0 for first hop");
|
||||
}
|
||||
|
||||
// Go to next hop
|
||||
self.hops = match first_hop.next_hop.take() {
|
||||
Some(rhd) => PrivateRouteHops::Data(rhd),
|
||||
None => PrivateRouteHops::Empty,
|
||||
};
|
||||
|
||||
return Some(first_hop_node);
|
||||
}
|
||||
PrivateRouteHops::Data(_) => return None,
|
||||
PrivateRouteHops::Empty => return None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for PrivateRoute {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"PR({:?}+{}{})",
|
||||
self.public_key,
|
||||
self.hop_count,
|
||||
match &self.hops {
|
||||
PrivateRouteHops::FirstHop(fh) => {
|
||||
format!("->{}", fh.node)
|
||||
}
|
||||
PrivateRouteHops::Data(_) => {
|
||||
"->?".to_owned()
|
||||
}
|
||||
PrivateRouteHops::Empty => {
|
||||
"".to_owned()
|
||||
}
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum SafetyRouteHops {
|
||||
/// Has >= 1 safety route hops
|
||||
Data(RouteHopData),
|
||||
/// Has 0 safety route hops
|
||||
Private(PrivateRoute),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SafetyRoute {
|
||||
pub public_key: DHTKey,
|
||||
pub hop_count: u8,
|
||||
pub hops: SafetyRouteHops,
|
||||
}
|
||||
|
||||
impl SafetyRoute {
|
||||
pub fn new_stub(public_key: DHTKey, private_route: PrivateRoute) -> Self {
|
||||
assert!(matches!(private_route.hops, PrivateRouteHops::Data(_)));
|
||||
Self {
|
||||
public_key,
|
||||
hop_count: 0,
|
||||
hops: SafetyRouteHops::Private(private_route),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for SafetyRoute {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"SR({:?}+{}{})",
|
||||
self.public_key,
|
||||
self.hop_count,
|
||||
match &self.hops {
|
||||
SafetyRouteHops::Data(_) => "".to_owned(),
|
||||
SafetyRouteHops::Private(p) => format!("->{}", p),
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
1515
veilid-core/src/routing_table/route_spec_store.rs
Normal file
1515
veilid-core/src/routing_table/route_spec_store.rs
Normal file
File diff suppressed because it is too large
Load Diff
@ -3,8 +3,20 @@ use super::*;
|
||||
enum RoutingDomainChange {
|
||||
ClearDialInfoDetails,
|
||||
ClearRelayNode,
|
||||
SetRelayNode { relay_node: NodeRef },
|
||||
AddDialInfoDetail { dial_info_detail: DialInfoDetail },
|
||||
SetRelayNode {
|
||||
relay_node: NodeRef,
|
||||
},
|
||||
AddDialInfoDetail {
|
||||
dial_info_detail: DialInfoDetail,
|
||||
},
|
||||
SetupNetwork {
|
||||
outbound_protocols: ProtocolTypeSet,
|
||||
inbound_protocols: ProtocolTypeSet,
|
||||
address_types: AddressTypeSet,
|
||||
},
|
||||
SetNetworkClass {
|
||||
network_class: Option<NetworkClass>,
|
||||
},
|
||||
}
|
||||
|
||||
pub struct RoutingDomainEditor {
|
||||
@ -67,31 +79,54 @@ impl RoutingDomainEditor {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub fn setup_network(
|
||||
&mut self,
|
||||
outbound_protocols: ProtocolTypeSet,
|
||||
inbound_protocols: ProtocolTypeSet,
|
||||
address_types: AddressTypeSet,
|
||||
) {
|
||||
self.changes.push(RoutingDomainChange::SetupNetwork {
|
||||
outbound_protocols,
|
||||
inbound_protocols,
|
||||
address_types,
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub fn set_network_class(&mut self, network_class: Option<NetworkClass>) {
|
||||
self.changes
|
||||
.push(RoutingDomainChange::SetNetworkClass { network_class })
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub async fn commit(self) {
|
||||
// No locking if we have nothing to do
|
||||
if self.changes.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut changed = false;
|
||||
{
|
||||
let node_id = self.routing_table.node_id();
|
||||
|
||||
let mut inner = self.routing_table.inner.write();
|
||||
let inner = &mut *inner;
|
||||
RoutingTable::with_routing_domain_mut(inner, self.routing_domain, |detail| {
|
||||
inner.with_routing_domain_mut(self.routing_domain, |detail| {
|
||||
for change in self.changes {
|
||||
match change {
|
||||
RoutingDomainChange::ClearDialInfoDetails => {
|
||||
debug!("[{:?}] cleared dial info details", self.routing_domain);
|
||||
detail.clear_dial_info_details();
|
||||
detail.common_mut().clear_dial_info_details();
|
||||
changed = true;
|
||||
}
|
||||
RoutingDomainChange::ClearRelayNode => {
|
||||
debug!("[{:?}] cleared relay node", self.routing_domain);
|
||||
detail.set_relay_node(None);
|
||||
detail.common_mut().set_relay_node(None);
|
||||
changed = true;
|
||||
}
|
||||
RoutingDomainChange::SetRelayNode { relay_node } => {
|
||||
debug!("[{:?}] set relay node: {}", self.routing_domain, relay_node);
|
||||
detail.set_relay_node(Some(relay_node));
|
||||
detail.common_mut().set_relay_node(Some(relay_node));
|
||||
changed = true;
|
||||
}
|
||||
RoutingDomainChange::AddDialInfoDetail { dial_info_detail } => {
|
||||
@ -99,27 +134,85 @@ impl RoutingDomainEditor {
|
||||
"[{:?}] add dial info detail: {:?}",
|
||||
self.routing_domain, dial_info_detail
|
||||
);
|
||||
detail.add_dial_info_detail(dial_info_detail.clone());
|
||||
detail
|
||||
.common_mut()
|
||||
.add_dial_info_detail(dial_info_detail.clone());
|
||||
|
||||
info!(
|
||||
"{:?} Dial Info: {}",
|
||||
"{:?} Dial Info: {}@{}",
|
||||
self.routing_domain,
|
||||
NodeDialInfo {
|
||||
node_id: NodeId::new(node_id),
|
||||
dial_info: dial_info_detail.dial_info
|
||||
}
|
||||
.to_string(),
|
||||
NodeId::new(node_id),
|
||||
dial_info_detail.dial_info
|
||||
);
|
||||
changed = true;
|
||||
}
|
||||
RoutingDomainChange::SetupNetwork {
|
||||
outbound_protocols,
|
||||
inbound_protocols,
|
||||
address_types,
|
||||
} => {
|
||||
let old_outbound_protocols = detail.common().outbound_protocols();
|
||||
let old_inbound_protocols = detail.common().inbound_protocols();
|
||||
let old_address_types = detail.common().address_types();
|
||||
|
||||
let this_changed = old_outbound_protocols != outbound_protocols
|
||||
|| old_inbound_protocols != inbound_protocols
|
||||
|| old_address_types != address_types;
|
||||
|
||||
debug!(
|
||||
"[{:?}] setup network: {:?} {:?} {:?}",
|
||||
self.routing_domain,
|
||||
outbound_protocols,
|
||||
inbound_protocols,
|
||||
address_types
|
||||
);
|
||||
|
||||
detail.common_mut().setup_network(
|
||||
outbound_protocols,
|
||||
inbound_protocols,
|
||||
address_types,
|
||||
);
|
||||
if this_changed {
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
RoutingDomainChange::SetNetworkClass { network_class } => {
|
||||
let old_network_class = detail.common().network_class();
|
||||
|
||||
let this_changed = old_network_class != network_class;
|
||||
|
||||
debug!(
|
||||
"[{:?}] set network class: {:?}",
|
||||
self.routing_domain, network_class,
|
||||
);
|
||||
|
||||
detail.common_mut().set_network_class(network_class);
|
||||
if this_changed {
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if changed {
|
||||
// Clear our 'peer info' cache, the peerinfo for this routing domain will get regenerated next time it is asked for
|
||||
detail.common_mut().clear_cache()
|
||||
}
|
||||
});
|
||||
if changed {
|
||||
RoutingTable::reset_all_seen_our_node_info(inner, self.routing_domain);
|
||||
RoutingTable::reset_all_updated_since_last_network_change(inner);
|
||||
// Mark that nothing in the routing table has seen our new node info
|
||||
inner.reset_all_seen_our_node_info(self.routing_domain);
|
||||
//
|
||||
inner.reset_all_updated_since_last_network_change();
|
||||
}
|
||||
}
|
||||
// Clear the routespecstore cache if our PublicInternet dial info has changed
|
||||
if changed {
|
||||
if self.routing_domain == RoutingDomain::PublicInternet {
|
||||
let rss = self.routing_table.route_spec_store();
|
||||
rss.reset();
|
||||
}
|
||||
}
|
||||
// Send our updated node info to all the nodes in the routing table
|
||||
if changed && self.send_node_info_updates {
|
||||
let network_manager = self.routing_table.unlocked_inner.network_manager.clone();
|
||||
network_manager
|
||||
|
@ -1,62 +1,422 @@
|
||||
use super::*;
|
||||
|
||||
/// Mechanism required to contact another node
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum ContactMethod {
|
||||
/// Node is not reachable by any means
|
||||
Unreachable,
|
||||
/// Connection should have already existed
|
||||
Existing,
|
||||
/// Contact the node directly
|
||||
Direct(DialInfo),
|
||||
/// Request via signal the node connect back directly (relay, target)
|
||||
SignalReverse(DHTKey, DHTKey),
|
||||
/// Request via signal the node negotiate a hole punch (relay, target_node)
|
||||
SignalHolePunch(DHTKey, DHTKey),
|
||||
/// Must use an inbound relay to reach the node
|
||||
InboundRelay(DHTKey),
|
||||
/// Must use outbound relay to reach the node
|
||||
OutboundRelay(DHTKey),
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct RoutingDomainDetailCommon {
|
||||
routing_domain: RoutingDomain,
|
||||
network_class: Option<NetworkClass>,
|
||||
outbound_protocols: ProtocolTypeSet,
|
||||
inbound_protocols: ProtocolTypeSet,
|
||||
address_types: AddressTypeSet,
|
||||
relay_node: Option<NodeRef>,
|
||||
dial_info_details: Vec<DialInfoDetail>,
|
||||
// caches
|
||||
cached_peer_info: Mutex<Option<PeerInfo>>,
|
||||
}
|
||||
|
||||
impl RoutingDomainDetailCommon {
|
||||
pub fn new(routing_domain: RoutingDomain) -> Self {
|
||||
Self {
|
||||
routing_domain,
|
||||
network_class: Default::default(),
|
||||
outbound_protocols: Default::default(),
|
||||
inbound_protocols: Default::default(),
|
||||
address_types: Default::default(),
|
||||
relay_node: Default::default(),
|
||||
dial_info_details: Default::default(),
|
||||
cached_peer_info: Mutex::new(Default::default()),
|
||||
}
|
||||
}
|
||||
|
||||
// Set from network manager
|
||||
pub(super) fn setup_network(
|
||||
&mut self,
|
||||
outbound_protocols: ProtocolTypeSet,
|
||||
inbound_protocols: ProtocolTypeSet,
|
||||
address_types: AddressTypeSet,
|
||||
) {
|
||||
self.outbound_protocols = outbound_protocols;
|
||||
self.inbound_protocols = inbound_protocols;
|
||||
self.address_types = address_types;
|
||||
self.clear_cache();
|
||||
}
|
||||
|
||||
pub(super) fn set_network_class(&mut self, network_class: Option<NetworkClass>) {
|
||||
self.network_class = network_class;
|
||||
self.clear_cache();
|
||||
}
|
||||
pub fn network_class(&self) -> Option<NetworkClass> {
|
||||
self.network_class
|
||||
}
|
||||
pub fn outbound_protocols(&self) -> ProtocolTypeSet {
|
||||
self.outbound_protocols
|
||||
}
|
||||
pub fn inbound_protocols(&self) -> ProtocolTypeSet {
|
||||
self.inbound_protocols
|
||||
}
|
||||
pub fn address_types(&self) -> AddressTypeSet {
|
||||
self.address_types
|
||||
}
|
||||
pub fn relay_node(&self) -> Option<NodeRef> {
|
||||
self.relay_node.clone()
|
||||
}
|
||||
pub(super) fn set_relay_node(&mut self, opt_relay_node: Option<NodeRef>) {
|
||||
self.relay_node = opt_relay_node.map(|nr| {
|
||||
nr.filtered_clone(NodeRefFilter::new().with_routing_domain(self.routing_domain))
|
||||
});
|
||||
self.clear_cache();
|
||||
}
|
||||
pub fn dial_info_details(&self) -> &Vec<DialInfoDetail> {
|
||||
&self.dial_info_details
|
||||
}
|
||||
pub(super) fn clear_dial_info_details(&mut self) {
|
||||
self.dial_info_details.clear();
|
||||
self.clear_cache();
|
||||
}
|
||||
pub(super) fn add_dial_info_detail(&mut self, did: DialInfoDetail) {
|
||||
self.dial_info_details.push(did);
|
||||
self.dial_info_details.sort();
|
||||
self.clear_cache();
|
||||
}
|
||||
|
||||
pub fn has_valid_own_node_info(&self) -> bool {
|
||||
self.network_class.unwrap_or(NetworkClass::Invalid) != NetworkClass::Invalid
|
||||
}
|
||||
|
||||
fn make_peer_info(&self, rti: &RoutingTableInner) -> PeerInfo {
|
||||
let node_info = NodeInfo {
|
||||
network_class: self.network_class.unwrap_or(NetworkClass::Invalid),
|
||||
outbound_protocols: self.outbound_protocols,
|
||||
address_types: self.address_types,
|
||||
min_version: MIN_CRYPTO_VERSION,
|
||||
max_version: MAX_CRYPTO_VERSION,
|
||||
dial_info_detail_list: self.dial_info_details.clone(),
|
||||
};
|
||||
|
||||
let relay_info = self
|
||||
.relay_node
|
||||
.as_ref()
|
||||
.and_then(|rn| {
|
||||
let opt_relay_pi = rn.locked(rti).make_peer_info(self.routing_domain);
|
||||
if let Some(relay_pi) = opt_relay_pi {
|
||||
match relay_pi.signed_node_info {
|
||||
SignedNodeInfo::Direct(d) => Some((relay_pi.node_id, d)),
|
||||
SignedNodeInfo::Relayed(_) => {
|
||||
warn!("relay node should not have a relay itself! if this happens, a relay updated its signed node info and became a relay, which should cause the relay to be dropped");
|
||||
None
|
||||
},
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
|
||||
let signed_node_info = match relay_info {
|
||||
Some((relay_id, relay_sdni)) => SignedNodeInfo::Relayed(
|
||||
SignedRelayedNodeInfo::with_secret(
|
||||
NodeId::new(rti.unlocked_inner.node_id),
|
||||
node_info,
|
||||
relay_id,
|
||||
relay_sdni,
|
||||
&rti.unlocked_inner.node_id_secret,
|
||||
)
|
||||
.unwrap(),
|
||||
),
|
||||
None => SignedNodeInfo::Direct(
|
||||
SignedDirectNodeInfo::with_secret(
|
||||
NodeId::new(rti.unlocked_inner.node_id),
|
||||
node_info,
|
||||
&rti.unlocked_inner.node_id_secret,
|
||||
)
|
||||
.unwrap()
|
||||
),
|
||||
};
|
||||
|
||||
PeerInfo::new(NodeId::new(rti.unlocked_inner.node_id), signed_node_info)
|
||||
}
|
||||
|
||||
pub fn with_peer_info<F, R>(&self, rti: &RoutingTableInner, f: F) -> R
|
||||
where
|
||||
F: FnOnce(&PeerInfo) -> R,
|
||||
{
|
||||
let mut cpi = self.cached_peer_info.lock();
|
||||
if cpi.is_none() {
|
||||
// Regenerate peer info
|
||||
let pi = self.make_peer_info(rti);
|
||||
|
||||
// Cache the peer info
|
||||
*cpi = Some(pi);
|
||||
}
|
||||
f(cpi.as_ref().unwrap())
|
||||
}
|
||||
|
||||
pub fn inbound_dial_info_filter(&self) -> DialInfoFilter {
|
||||
DialInfoFilter::all()
|
||||
.with_protocol_type_set(self.inbound_protocols)
|
||||
.with_address_type_set(self.address_types)
|
||||
}
|
||||
pub fn outbound_dial_info_filter(&self) -> DialInfoFilter {
|
||||
DialInfoFilter::all()
|
||||
.with_protocol_type_set(self.outbound_protocols)
|
||||
.with_address_type_set(self.address_types)
|
||||
}
|
||||
|
||||
pub(super) fn clear_cache(&self) {
|
||||
*self.cached_peer_info.lock() = None;
|
||||
}
|
||||
}
|
||||
|
||||
/// General trait for all routing domains
|
||||
pub trait RoutingDomainDetail {
|
||||
// Common accessors
|
||||
fn common(&self) -> &RoutingDomainDetailCommon;
|
||||
fn common_mut(&mut self) -> &mut RoutingDomainDetailCommon;
|
||||
|
||||
/// Can this routing domain contain a particular address
|
||||
fn can_contain_address(&self, address: Address) -> bool;
|
||||
fn relay_node(&self) -> Option<NodeRef>;
|
||||
fn set_relay_node(&mut self, opt_relay_node: Option<NodeRef>);
|
||||
fn dial_info_details(&self) -> &Vec<DialInfoDetail>;
|
||||
fn clear_dial_info_details(&mut self);
|
||||
fn add_dial_info_detail(&mut self, did: DialInfoDetail);
|
||||
|
||||
/// Get the contact method required for node A to reach node B in this routing domain
|
||||
/// Routing table must be locked for reading to use this function
|
||||
fn get_contact_method(
|
||||
&self,
|
||||
rti: &RoutingTableInner,
|
||||
peer_a: &PeerInfo,
|
||||
peer_b: &PeerInfo,
|
||||
dial_info_filter: DialInfoFilter,
|
||||
sequencing: Sequencing,
|
||||
) -> ContactMethod;
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
/// Public Internet routing domain internals
|
||||
#[derive(Debug, Default)]
|
||||
#[derive(Debug)]
|
||||
pub struct PublicInternetRoutingDomainDetail {
|
||||
/// An optional node we relay through for this domain
|
||||
relay_node: Option<NodeRef>,
|
||||
/// The dial infos on this domain we can be reached by
|
||||
dial_info_details: Vec<DialInfoDetail>,
|
||||
/// Common implementation for all routing domains
|
||||
common: RoutingDomainDetailCommon,
|
||||
}
|
||||
|
||||
impl Default for PublicInternetRoutingDomainDetail {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
common: RoutingDomainDetailCommon::new(RoutingDomain::PublicInternet),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn first_filtered_dial_info_detail(
|
||||
from_node: &NodeInfo,
|
||||
to_node: &NodeInfo,
|
||||
dial_info_filter: &DialInfoFilter,
|
||||
sequencing: Sequencing,
|
||||
) -> Option<DialInfoDetail> {
|
||||
let dial_info_filter = dial_info_filter.clone().filtered(
|
||||
&DialInfoFilter::all()
|
||||
.with_address_type_set(from_node.address_types)
|
||||
.with_protocol_type_set(from_node.outbound_protocols),
|
||||
);
|
||||
|
||||
// Get first filtered dialinfo
|
||||
let (sort, dial_info_filter) = match sequencing {
|
||||
Sequencing::NoPreference => (None, dial_info_filter),
|
||||
Sequencing::PreferOrdered => (
|
||||
Some(DialInfoDetail::ordered_sequencing_sort),
|
||||
dial_info_filter,
|
||||
),
|
||||
Sequencing::EnsureOrdered => (
|
||||
Some(DialInfoDetail::ordered_sequencing_sort),
|
||||
dial_info_filter.filtered(
|
||||
&DialInfoFilter::all().with_protocol_type_set(ProtocolType::all_ordered_set()),
|
||||
),
|
||||
),
|
||||
};
|
||||
// If the filter is dead then we won't be able to connect
|
||||
if dial_info_filter.is_dead() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let direct_filter = |did: &DialInfoDetail| did.matches_filter(&dial_info_filter);
|
||||
|
||||
// Get the best match dial info for node B if we have it
|
||||
to_node.first_filtered_dial_info_detail(sort, direct_filter)
|
||||
}
|
||||
|
||||
impl RoutingDomainDetail for PublicInternetRoutingDomainDetail {
|
||||
fn common(&self) -> &RoutingDomainDetailCommon {
|
||||
&self.common
|
||||
}
|
||||
fn common_mut(&mut self) -> &mut RoutingDomainDetailCommon {
|
||||
&mut self.common
|
||||
}
|
||||
fn can_contain_address(&self, address: Address) -> bool {
|
||||
address.is_global()
|
||||
}
|
||||
fn relay_node(&self) -> Option<NodeRef> {
|
||||
self.relay_node.clone()
|
||||
fn get_contact_method(
|
||||
&self,
|
||||
_rti: &RoutingTableInner,
|
||||
peer_a: &PeerInfo,
|
||||
peer_b: &PeerInfo,
|
||||
dial_info_filter: DialInfoFilter,
|
||||
sequencing: Sequencing,
|
||||
) -> ContactMethod {
|
||||
// Get the nodeinfos for convenience
|
||||
let node_a = peer_a.signed_node_info.node_info();
|
||||
let node_b = peer_b.signed_node_info.node_info();
|
||||
|
||||
// Get the best match dial info for node B if we have it
|
||||
if let Some(target_did) =
|
||||
first_filtered_dial_info_detail(node_a, node_b, &dial_info_filter, sequencing)
|
||||
{
|
||||
// Do we need to signal before going inbound?
|
||||
if !target_did.class.requires_signal() {
|
||||
// Go direct without signaling
|
||||
return ContactMethod::Direct(target_did.dial_info);
|
||||
}
|
||||
fn set_relay_node(&mut self, opt_relay_node: Option<NodeRef>) {
|
||||
self.relay_node = opt_relay_node.map(|nr| {
|
||||
nr.filtered_clone(
|
||||
NodeRefFilter::new().with_routing_domain(RoutingDomain::PublicInternet),
|
||||
|
||||
// Get the target's inbound relay, it must have one or it is not reachable
|
||||
if let Some(node_b_relay) = peer_b.signed_node_info.relay_info() {
|
||||
let node_b_relay_id = peer_b.signed_node_info.relay_id().unwrap();
|
||||
// Note that relay_peer_info could be node_a, in which case a connection already exists
|
||||
// and we shouldn't have even gotten here
|
||||
if node_b_relay_id.key == peer_a.node_id.key {
|
||||
return ContactMethod::Existing;
|
||||
}
|
||||
|
||||
// Can node A reach the inbound relay directly?
|
||||
if first_filtered_dial_info_detail(
|
||||
node_a,
|
||||
node_b_relay,
|
||||
&dial_info_filter,
|
||||
sequencing,
|
||||
)
|
||||
})
|
||||
.is_some()
|
||||
{
|
||||
// Can node A receive anything inbound ever?
|
||||
if matches!(node_a.network_class, NetworkClass::InboundCapable) {
|
||||
///////// Reverse connection
|
||||
|
||||
// Get the best match dial info for an reverse inbound connection from node B to node A
|
||||
if let Some(reverse_did) = first_filtered_dial_info_detail(
|
||||
node_b,
|
||||
node_a,
|
||||
&dial_info_filter,
|
||||
sequencing,
|
||||
) {
|
||||
// Ensure we aren't on the same public IP address (no hairpin nat)
|
||||
if reverse_did.dial_info.to_ip_addr()
|
||||
!= target_did.dial_info.to_ip_addr()
|
||||
{
|
||||
// Can we receive a direct reverse connection?
|
||||
if !reverse_did.class.requires_signal() {
|
||||
return ContactMethod::SignalReverse(
|
||||
node_b_relay_id.key,
|
||||
peer_b.node_id.key,
|
||||
);
|
||||
}
|
||||
fn dial_info_details(&self) -> &Vec<DialInfoDetail> {
|
||||
&self.dial_info_details
|
||||
}
|
||||
fn clear_dial_info_details(&mut self) {
|
||||
self.dial_info_details.clear();
|
||||
}
|
||||
fn add_dial_info_detail(&mut self, did: DialInfoDetail) {
|
||||
self.dial_info_details.push(did);
|
||||
self.dial_info_details.sort();
|
||||
|
||||
///////// UDP hole-punch
|
||||
|
||||
// Does node B have a direct udp dialinfo node A can reach?
|
||||
let udp_dial_info_filter = dial_info_filter
|
||||
.clone()
|
||||
.filtered(&DialInfoFilter::all().with_protocol_type(ProtocolType::UDP));
|
||||
if let Some(target_udp_did) = first_filtered_dial_info_detail(
|
||||
node_a,
|
||||
node_b,
|
||||
&udp_dial_info_filter,
|
||||
sequencing,
|
||||
) {
|
||||
// Does node A have a direct udp dialinfo that node B can reach?
|
||||
if let Some(reverse_udp_did) = first_filtered_dial_info_detail(
|
||||
node_b,
|
||||
node_a,
|
||||
&udp_dial_info_filter,
|
||||
sequencing,
|
||||
) {
|
||||
// Ensure we aren't on the same public IP address (no hairpin nat)
|
||||
if reverse_udp_did.dial_info.to_ip_addr()
|
||||
!= target_udp_did.dial_info.to_ip_addr()
|
||||
{
|
||||
// The target and ourselves have a udp dialinfo that they can reach
|
||||
return ContactMethod::SignalHolePunch(
|
||||
node_b_relay_id.key,
|
||||
peer_a.node_id.key,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
// Otherwise we have to inbound relay
|
||||
}
|
||||
|
||||
return ContactMethod::InboundRelay(node_b_relay_id.key);
|
||||
}
|
||||
}
|
||||
}
|
||||
// If the node B has no direct dial info, it needs to have an inbound relay
|
||||
else if let Some(node_b_relay) = peer_b.signed_node_info.relay_info() {
|
||||
let node_b_relay_id = peer_b.signed_node_info.relay_id().unwrap();
|
||||
|
||||
// Can we reach the full relay?
|
||||
if first_filtered_dial_info_detail(
|
||||
node_a,
|
||||
&node_b_relay,
|
||||
&dial_info_filter,
|
||||
sequencing,
|
||||
)
|
||||
.is_some()
|
||||
{
|
||||
return ContactMethod::InboundRelay(node_b_relay_id.key);
|
||||
}
|
||||
}
|
||||
|
||||
// If node A can't reach the node by other means, it may need to use its own relay
|
||||
if let Some(node_a_relay_id) = peer_a.signed_node_info.relay_id() {
|
||||
return ContactMethod::OutboundRelay(node_a_relay_id.key);
|
||||
}
|
||||
|
||||
ContactMethod::Unreachable
|
||||
}
|
||||
}
|
||||
|
||||
/// Local Network routing domain internals
|
||||
#[derive(Debug, Default)]
|
||||
pub struct LocalInternetRoutingDomainDetail {
|
||||
/// An optional node we relay through for this domain
|
||||
relay_node: Option<NodeRef>,
|
||||
/// The dial infos on this domain we can be reached by
|
||||
dial_info_details: Vec<DialInfoDetail>,
|
||||
#[derive(Debug)]
|
||||
pub struct LocalNetworkRoutingDomainDetail {
|
||||
/// The local networks this domain will communicate with
|
||||
local_networks: Vec<(IpAddr, IpAddr)>,
|
||||
/// Common implementation for all routing domains
|
||||
common: RoutingDomainDetailCommon,
|
||||
}
|
||||
|
||||
impl LocalInternetRoutingDomainDetail {
|
||||
impl Default for LocalNetworkRoutingDomainDetail {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
local_networks: Default::default(),
|
||||
common: RoutingDomainDetailCommon::new(RoutingDomain::LocalNetwork),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl LocalNetworkRoutingDomainDetail {
|
||||
pub fn set_local_networks(&mut self, mut local_networks: Vec<(IpAddr, IpAddr)>) -> bool {
|
||||
local_networks.sort();
|
||||
if local_networks == self.local_networks {
|
||||
@ -67,7 +427,13 @@ impl LocalInternetRoutingDomainDetail {
|
||||
}
|
||||
}
|
||||
|
||||
impl RoutingDomainDetail for LocalInternetRoutingDomainDetail {
|
||||
impl RoutingDomainDetail for LocalNetworkRoutingDomainDetail {
|
||||
fn common(&self) -> &RoutingDomainDetailCommon {
|
||||
&self.common
|
||||
}
|
||||
fn common_mut(&mut self) -> &mut RoutingDomainDetailCommon {
|
||||
&mut self.common
|
||||
}
|
||||
fn can_contain_address(&self, address: Address) -> bool {
|
||||
let ip = address.to_ip_addr();
|
||||
for localnet in &self.local_networks {
|
||||
@ -77,22 +443,48 @@ impl RoutingDomainDetail for LocalInternetRoutingDomainDetail {
|
||||
}
|
||||
false
|
||||
}
|
||||
fn relay_node(&self) -> Option<NodeRef> {
|
||||
self.relay_node.clone()
|
||||
|
||||
fn get_contact_method(
|
||||
&self,
|
||||
_rti: &RoutingTableInner,
|
||||
peer_a: &PeerInfo,
|
||||
peer_b: &PeerInfo,
|
||||
dial_info_filter: DialInfoFilter,
|
||||
sequencing: Sequencing,
|
||||
) -> ContactMethod {
|
||||
// Scope the filter down to protocols node A can do outbound
|
||||
let dial_info_filter = dial_info_filter.filtered(
|
||||
&DialInfoFilter::all()
|
||||
.with_address_type_set(peer_a.signed_node_info.node_info().address_types)
|
||||
.with_protocol_type_set(peer_a.signed_node_info.node_info().outbound_protocols),
|
||||
);
|
||||
|
||||
// Get first filtered dialinfo
|
||||
let (sort, dial_info_filter) = match sequencing {
|
||||
Sequencing::NoPreference => (None, dial_info_filter),
|
||||
Sequencing::PreferOrdered => (
|
||||
Some(DialInfoDetail::ordered_sequencing_sort),
|
||||
dial_info_filter,
|
||||
),
|
||||
Sequencing::EnsureOrdered => (
|
||||
Some(DialInfoDetail::ordered_sequencing_sort),
|
||||
dial_info_filter.filtered(
|
||||
&DialInfoFilter::all().with_protocol_type_set(ProtocolType::all_ordered_set()),
|
||||
),
|
||||
),
|
||||
};
|
||||
// If the filter is dead then we won't be able to connect
|
||||
if dial_info_filter.is_dead() {
|
||||
return ContactMethod::Unreachable;
|
||||
}
|
||||
fn set_relay_node(&mut self, opt_relay_node: Option<NodeRef>) {
|
||||
self.relay_node = opt_relay_node.map(|nr| {
|
||||
nr.filtered_clone(NodeRefFilter::new().with_routing_domain(RoutingDomain::LocalNetwork))
|
||||
});
|
||||
|
||||
let filter = |did: &DialInfoDetail| did.matches_filter(&dial_info_filter);
|
||||
|
||||
let opt_target_did = peer_b.signed_node_info.node_info().first_filtered_dial_info_detail(sort, filter);
|
||||
if let Some(target_did) = opt_target_did {
|
||||
return ContactMethod::Direct(target_did.dial_info);
|
||||
}
|
||||
fn dial_info_details(&self) -> &Vec<DialInfoDetail> {
|
||||
&self.dial_info_details
|
||||
}
|
||||
fn clear_dial_info_details(&mut self) {
|
||||
self.dial_info_details.clear();
|
||||
}
|
||||
fn add_dial_info_detail(&mut self, did: DialInfoDetail) {
|
||||
self.dial_info_details.push(did);
|
||||
self.dial_info_details.sort();
|
||||
|
||||
ContactMethod::Unreachable
|
||||
}
|
||||
}
|
||||
|
999
veilid-core/src/routing_table/routing_table_inner.rs
Normal file
999
veilid-core/src/routing_table/routing_table_inner.rs
Normal file
@ -0,0 +1,999 @@
|
||||
use super::*;
|
||||
|
||||
const RECENT_PEERS_TABLE_SIZE: usize = 64;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct RecentPeersEntry {
|
||||
pub last_connection: ConnectionDescriptor,
|
||||
}
|
||||
|
||||
/// RoutingTable rwlock-internal data
|
||||
pub struct RoutingTableInner {
|
||||
/// Extra pointer to unlocked members to simplify access
|
||||
pub(super) unlocked_inner: Arc<RoutingTableUnlockedInner>,
|
||||
/// Routing table buckets that hold entries
|
||||
pub(super) buckets: Vec<Bucket>,
|
||||
/// A fast counter for the number of entries in the table, total
|
||||
pub(super) bucket_entry_count: usize,
|
||||
/// The public internet routing domain
|
||||
pub(super) public_internet_routing_domain: PublicInternetRoutingDomainDetail,
|
||||
/// The dial info we use on the local network
|
||||
pub(super) local_network_routing_domain: LocalNetworkRoutingDomainDetail,
|
||||
/// Interim accounting mechanism for this node's RPC latency to any other node
|
||||
pub(super) self_latency_stats_accounting: LatencyStatsAccounting,
|
||||
/// Interim accounting mechanism for the total bandwidth to/from this node
|
||||
pub(super) self_transfer_stats_accounting: TransferStatsAccounting,
|
||||
/// Statistics about the total bandwidth to/from this node
|
||||
pub(super) self_transfer_stats: TransferStatsDownUp,
|
||||
/// Peers we have recently communicated with
|
||||
pub(super) recent_peers: LruCache<DHTKey, RecentPeersEntry>,
|
||||
/// Storage for private/safety RouteSpecs
|
||||
pub(super) route_spec_store: Option<RouteSpecStore>,
|
||||
}
|
||||
|
||||
impl RoutingTableInner {
|
||||
pub(super) fn new(unlocked_inner: Arc<RoutingTableUnlockedInner>) -> RoutingTableInner {
|
||||
RoutingTableInner {
|
||||
unlocked_inner,
|
||||
buckets: Vec::new(),
|
||||
public_internet_routing_domain: PublicInternetRoutingDomainDetail::default(),
|
||||
local_network_routing_domain: LocalNetworkRoutingDomainDetail::default(),
|
||||
bucket_entry_count: 0,
|
||||
self_latency_stats_accounting: LatencyStatsAccounting::new(),
|
||||
self_transfer_stats_accounting: TransferStatsAccounting::new(),
|
||||
self_transfer_stats: TransferStatsDownUp::default(),
|
||||
recent_peers: LruCache::new(RECENT_PEERS_TABLE_SIZE),
|
||||
route_spec_store: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn network_manager(&self) -> NetworkManager {
|
||||
self.unlocked_inner.network_manager.clone()
|
||||
}
|
||||
pub fn rpc_processor(&self) -> RPCProcessor {
|
||||
self.network_manager().rpc_processor()
|
||||
}
|
||||
|
||||
pub fn node_id(&self) -> DHTKey {
|
||||
self.unlocked_inner.node_id
|
||||
}
|
||||
|
||||
pub fn node_id_secret(&self) -> DHTKeySecret {
|
||||
self.unlocked_inner.node_id_secret
|
||||
}
|
||||
|
||||
pub fn config(&self) -> VeilidConfig {
|
||||
self.unlocked_inner.config.clone()
|
||||
}
|
||||
|
||||
pub fn transfer_stats_accounting(&mut self) -> &mut TransferStatsAccounting {
|
||||
&mut self.self_transfer_stats_accounting
|
||||
}
|
||||
pub fn latency_stats_accounting(&mut self) -> &mut LatencyStatsAccounting {
|
||||
&mut self.self_latency_stats_accounting
|
||||
}
|
||||
|
||||
pub fn routing_domain_for_address(&self, address: Address) -> Option<RoutingDomain> {
|
||||
for rd in RoutingDomain::all() {
|
||||
let can_contain = self.with_routing_domain(rd, |rdd| rdd.can_contain_address(address));
|
||||
if can_contain {
|
||||
return Some(rd);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
pub fn with_routing_domain<F, R>(&self, domain: RoutingDomain, f: F) -> R
|
||||
where
|
||||
F: FnOnce(&dyn RoutingDomainDetail) -> R,
|
||||
{
|
||||
match domain {
|
||||
RoutingDomain::PublicInternet => f(&self.public_internet_routing_domain),
|
||||
RoutingDomain::LocalNetwork => f(&self.local_network_routing_domain),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_routing_domain_mut<F, R>(&mut self, domain: RoutingDomain, f: F) -> R
|
||||
where
|
||||
F: FnOnce(&mut dyn RoutingDomainDetail) -> R,
|
||||
{
|
||||
match domain {
|
||||
RoutingDomain::PublicInternet => f(&mut self.public_internet_routing_domain),
|
||||
RoutingDomain::LocalNetwork => f(&mut self.local_network_routing_domain),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn relay_node(&self, domain: RoutingDomain) -> Option<NodeRef> {
|
||||
self.with_routing_domain(domain, |rd| rd.common().relay_node())
|
||||
}
|
||||
|
||||
pub fn has_dial_info(&self, domain: RoutingDomain) -> bool {
|
||||
self.with_routing_domain(domain, |rd| !rd.common().dial_info_details().is_empty())
|
||||
}
|
||||
|
||||
pub fn dial_info_details(&self, domain: RoutingDomain) -> Vec<DialInfoDetail> {
|
||||
self.with_routing_domain(domain, |rd| rd.common().dial_info_details().clone())
|
||||
}
|
||||
|
||||
pub fn first_filtered_dial_info_detail(
|
||||
&self,
|
||||
routing_domain_set: RoutingDomainSet,
|
||||
filter: &DialInfoFilter,
|
||||
) -> Option<DialInfoDetail> {
|
||||
for routing_domain in routing_domain_set {
|
||||
let did = self.with_routing_domain(routing_domain, |rd| {
|
||||
for did in rd.common().dial_info_details() {
|
||||
if did.matches_filter(filter) {
|
||||
return Some(did.clone());
|
||||
}
|
||||
}
|
||||
None
|
||||
});
|
||||
if did.is_some() {
|
||||
return did;
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
pub fn all_filtered_dial_info_details(
|
||||
&self,
|
||||
routing_domain_set: RoutingDomainSet,
|
||||
filter: &DialInfoFilter,
|
||||
) -> Vec<DialInfoDetail> {
|
||||
let mut ret = Vec::new();
|
||||
for routing_domain in routing_domain_set {
|
||||
self.with_routing_domain(routing_domain, |rd| {
|
||||
for did in rd.common().dial_info_details() {
|
||||
if did.matches_filter(filter) {
|
||||
ret.push(did.clone());
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
ret.remove_duplicates();
|
||||
ret
|
||||
}
|
||||
|
||||
pub fn ensure_dial_info_is_valid(&self, domain: RoutingDomain, dial_info: &DialInfo) -> bool {
|
||||
let address = dial_info.socket_address().address();
|
||||
let can_contain_address =
|
||||
self.with_routing_domain(domain, |rd| rd.can_contain_address(address));
|
||||
|
||||
if !can_contain_address {
|
||||
log_rtab!(debug "can not add dial info to this routing domain");
|
||||
return false;
|
||||
}
|
||||
if !dial_info.is_valid() {
|
||||
log_rtab!(debug
|
||||
"shouldn't be registering invalid addresses: {:?}",
|
||||
dial_info
|
||||
);
|
||||
return false;
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
pub fn node_info_is_valid_in_routing_domain(
|
||||
&self,
|
||||
routing_domain: RoutingDomain,
|
||||
node_info: &NodeInfo,
|
||||
) -> bool {
|
||||
// Should not be passing around nodeinfo with an invalid network class
|
||||
if matches!(node_info.network_class, NetworkClass::Invalid) {
|
||||
return false;
|
||||
}
|
||||
// Ensure all of the dial info works in this routing domain
|
||||
for did in &node_info.dial_info_detail_list {
|
||||
if !self.ensure_dial_info_is_valid(routing_domain, &did.dial_info) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
pub fn signed_node_info_is_valid_in_routing_domain(
|
||||
&self,
|
||||
routing_domain: RoutingDomain,
|
||||
signed_node_info: &SignedNodeInfo,
|
||||
) -> bool {
|
||||
if !self.node_info_is_valid_in_routing_domain(routing_domain, signed_node_info.node_info())
|
||||
{
|
||||
return false;
|
||||
}
|
||||
// Ensure the relay is also valid in this routing domain if it is provided
|
||||
if let Some(relay_ni) = signed_node_info.relay_info() {
|
||||
if !self.node_info_is_valid_in_routing_domain(routing_domain, relay_ni) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self), ret)]
|
||||
pub fn get_contact_method(
|
||||
&self,
|
||||
routing_domain: RoutingDomain,
|
||||
peer_a: &PeerInfo,
|
||||
peer_b: &PeerInfo,
|
||||
dial_info_filter: DialInfoFilter,
|
||||
sequencing: Sequencing,
|
||||
) -> ContactMethod {
|
||||
self.with_routing_domain(routing_domain, |rdd| {
|
||||
rdd.get_contact_method(self, peer_a, peer_b, dial_info_filter, sequencing)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn reset_all_seen_our_node_info(&mut self, routing_domain: RoutingDomain) {
|
||||
let cur_ts = intf::get_timestamp();
|
||||
self.with_entries_mut(cur_ts, BucketEntryState::Dead, |rti, _, v| {
|
||||
v.with_mut(rti, |_rti, e| {
|
||||
e.set_seen_our_node_info(routing_domain, false);
|
||||
});
|
||||
Option::<()>::None
|
||||
});
|
||||
}
|
||||
|
||||
pub fn reset_all_updated_since_last_network_change(&mut self) {
|
||||
let cur_ts = intf::get_timestamp();
|
||||
self.with_entries_mut(cur_ts, BucketEntryState::Dead, |rti, _, v| {
|
||||
v.with_mut(rti, |_rti, e| {
|
||||
e.set_updated_since_last_network_change(false)
|
||||
});
|
||||
Option::<()>::None
|
||||
});
|
||||
}
|
||||
|
||||
/// Return a copy of our node's peerinfo
|
||||
pub fn get_own_peer_info(&self, routing_domain: RoutingDomain) -> PeerInfo {
|
||||
self.with_routing_domain(routing_domain, |rdd| {
|
||||
rdd.common().with_peer_info(self, |pi| pi.clone())
|
||||
})
|
||||
}
|
||||
|
||||
/// Return our currently registered network class
|
||||
pub fn has_valid_own_node_info(&self, routing_domain: RoutingDomain) -> bool {
|
||||
self.with_routing_domain(routing_domain, |rdd| rdd.common().has_valid_own_node_info())
|
||||
}
|
||||
|
||||
/// Return the domain's currently registered network class
|
||||
pub fn get_network_class(&self, routing_domain: RoutingDomain) -> Option<NetworkClass> {
|
||||
self.with_routing_domain(routing_domain, |rdd| rdd.common().network_class())
|
||||
}
|
||||
|
||||
/// Return the domain's filter for what we can receivein the form of a dial info filter
|
||||
pub fn get_inbound_dial_info_filter(&self, routing_domain: RoutingDomain) -> DialInfoFilter {
|
||||
self.with_routing_domain(routing_domain, |rdd| {
|
||||
rdd.common().inbound_dial_info_filter()
|
||||
})
|
||||
}
|
||||
|
||||
/// Return the domain's filter for what we can receive in the form of a node ref filter
|
||||
pub fn get_inbound_node_ref_filter(&self, routing_domain: RoutingDomain) -> NodeRefFilter {
|
||||
let dif = self.get_inbound_dial_info_filter(routing_domain);
|
||||
NodeRefFilter::new()
|
||||
.with_routing_domain(routing_domain)
|
||||
.with_dial_info_filter(dif)
|
||||
}
|
||||
|
||||
/// Return the domain's filter for what we can send out in the form of a dial info filter
|
||||
pub fn get_outbound_dial_info_filter(&self, routing_domain: RoutingDomain) -> DialInfoFilter {
|
||||
self.with_routing_domain(routing_domain, |rdd| {
|
||||
rdd.common().outbound_dial_info_filter()
|
||||
})
|
||||
}
|
||||
/// Return the domain's filter for what we can receive in the form of a node ref filter
|
||||
pub fn get_outbound_node_ref_filter(&self, routing_domain: RoutingDomain) -> NodeRefFilter {
|
||||
let dif = self.get_outbound_dial_info_filter(routing_domain);
|
||||
NodeRefFilter::new()
|
||||
.with_routing_domain(routing_domain)
|
||||
.with_dial_info_filter(dif)
|
||||
}
|
||||
|
||||
fn bucket_depth(index: usize) -> usize {
|
||||
match index {
|
||||
0 => 256,
|
||||
1 => 128,
|
||||
2 => 64,
|
||||
3 => 32,
|
||||
4 => 16,
|
||||
5 => 8,
|
||||
6 => 4,
|
||||
7 => 4,
|
||||
8 => 4,
|
||||
9 => 4,
|
||||
_ => 4,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn init_buckets(&mut self, routing_table: RoutingTable) {
|
||||
// Size the buckets (one per bit)
|
||||
self.buckets.clear();
|
||||
self.buckets.reserve(DHT_KEY_LENGTH * 8);
|
||||
for _ in 0..DHT_KEY_LENGTH * 8 {
|
||||
let bucket = Bucket::new(routing_table.clone());
|
||||
self.buckets.push(bucket);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn configure_local_network_routing_domain(
|
||||
&mut self,
|
||||
local_networks: Vec<(IpAddr, IpAddr)>,
|
||||
) {
|
||||
log_net!(debug "configure_local_network_routing_domain: {:#?}", local_networks);
|
||||
|
||||
let changed = self
|
||||
.local_network_routing_domain
|
||||
.set_local_networks(local_networks);
|
||||
|
||||
// If the local network topology has changed, nuke the existing local node info and let new local discovery happen
|
||||
if changed {
|
||||
let cur_ts = intf::get_timestamp();
|
||||
self.with_entries_mut(cur_ts, BucketEntryState::Dead, |rti, _, e| {
|
||||
e.with_mut(rti, |_rti, e| {
|
||||
e.clear_signed_node_info(RoutingDomain::LocalNetwork);
|
||||
e.set_seen_our_node_info(RoutingDomain::LocalNetwork, false);
|
||||
e.set_updated_since_last_network_change(false);
|
||||
});
|
||||
Option::<()>::None
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempt to empty the routing table
|
||||
/// should only be performed when there are no node_refs (detached)
|
||||
pub fn purge_buckets(&mut self) {
|
||||
log_rtab!(
|
||||
"Starting routing table buckets purge. Table currently has {} nodes",
|
||||
self.bucket_entry_count
|
||||
);
|
||||
for bucket in &mut self.buckets {
|
||||
bucket.kick(0);
|
||||
}
|
||||
log_rtab!(debug
|
||||
"Routing table buckets purge complete. Routing table now has {} nodes",
|
||||
self.bucket_entry_count
|
||||
);
|
||||
}
|
||||
|
||||
/// Attempt to remove last_connections from entries
|
||||
pub fn purge_last_connections(&mut self) {
|
||||
log_rtab!(
|
||||
"Starting routing table last_connections purge. Table currently has {} nodes",
|
||||
self.bucket_entry_count
|
||||
);
|
||||
for bucket in &self.buckets {
|
||||
for entry in bucket.entries() {
|
||||
entry.1.with_mut_inner(|e| {
|
||||
e.clear_last_connections();
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
log_rtab!(debug
|
||||
"Routing table last_connections purge complete. Routing table now has {} nodes",
|
||||
self.bucket_entry_count
|
||||
);
|
||||
}
|
||||
|
||||
/// Attempt to settle buckets and remove entries down to the desired number
|
||||
/// which may not be possible due extant NodeRefs
|
||||
pub fn kick_bucket(&mut self, idx: usize) {
|
||||
let bucket = &mut self.buckets[idx];
|
||||
let bucket_depth = Self::bucket_depth(idx);
|
||||
|
||||
if let Some(dead_node_ids) = bucket.kick(bucket_depth) {
|
||||
// Remove counts
|
||||
self.bucket_entry_count -= dead_node_ids.len();
|
||||
log_rtab!(debug "Routing table now has {} nodes", self.bucket_entry_count);
|
||||
|
||||
// Now purge the routing table inner vectors
|
||||
//let filter = |k: &DHTKey| dead_node_ids.contains(k);
|
||||
//inner.closest_reliable_nodes.retain(filter);
|
||||
//inner.fastest_reliable_nodes.retain(filter);
|
||||
//inner.closest_nodes.retain(filter);
|
||||
//inner.fastest_nodes.retain(filter);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn find_bucket_index(&self, node_id: DHTKey) -> usize {
|
||||
distance(&node_id, &self.unlocked_inner.node_id)
|
||||
.first_nonzero_bit()
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub fn get_entry_count(
|
||||
&self,
|
||||
routing_domain_set: RoutingDomainSet,
|
||||
min_state: BucketEntryState,
|
||||
) -> usize {
|
||||
let mut count = 0usize;
|
||||
let cur_ts = intf::get_timestamp();
|
||||
self.with_entries(cur_ts, min_state, |rti, _, e| {
|
||||
if e.with(rti, |_rti, e| e.best_routing_domain(routing_domain_set))
|
||||
.is_some()
|
||||
{
|
||||
count += 1;
|
||||
}
|
||||
Option::<()>::None
|
||||
});
|
||||
count
|
||||
}
|
||||
|
||||
pub fn with_entries<T, F: FnMut(&RoutingTableInner, DHTKey, Arc<BucketEntry>) -> Option<T>>(
|
||||
&self,
|
||||
cur_ts: u64,
|
||||
min_state: BucketEntryState,
|
||||
mut f: F,
|
||||
) -> Option<T> {
|
||||
let mut entryvec = Vec::with_capacity(self.bucket_entry_count);
|
||||
for bucket in &self.buckets {
|
||||
for entry in bucket.entries() {
|
||||
if entry.1.with(self, |_rti, e| e.state(cur_ts) >= min_state) {
|
||||
entryvec.push((*entry.0, entry.1.clone()));
|
||||
}
|
||||
}
|
||||
}
|
||||
for entry in entryvec {
|
||||
if let Some(out) = f(self, entry.0, entry.1) {
|
||||
return Some(out);
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
pub fn with_entries_mut<
|
||||
T,
|
||||
F: FnMut(&mut RoutingTableInner, DHTKey, Arc<BucketEntry>) -> Option<T>,
|
||||
>(
|
||||
&mut self,
|
||||
cur_ts: u64,
|
||||
min_state: BucketEntryState,
|
||||
mut f: F,
|
||||
) -> Option<T> {
|
||||
let mut entryvec = Vec::with_capacity(self.bucket_entry_count);
|
||||
for bucket in &self.buckets {
|
||||
for entry in bucket.entries() {
|
||||
if entry.1.with(self, |_rti, e| e.state(cur_ts) >= min_state) {
|
||||
entryvec.push((*entry.0, entry.1.clone()));
|
||||
}
|
||||
}
|
||||
}
|
||||
for entry in entryvec {
|
||||
if let Some(out) = f(self, entry.0, entry.1) {
|
||||
return Some(out);
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
pub fn get_nodes_needing_updates(
|
||||
&self,
|
||||
outer_self: RoutingTable,
|
||||
routing_domain: RoutingDomain,
|
||||
cur_ts: u64,
|
||||
all: bool,
|
||||
) -> Vec<NodeRef> {
|
||||
let mut node_refs = Vec::<NodeRef>::with_capacity(self.bucket_entry_count);
|
||||
self.with_entries(cur_ts, BucketEntryState::Unreliable, |rti, k, v| {
|
||||
// Only update nodes that haven't seen our node info yet
|
||||
if all || !v.with(rti, |_rti, e| e.has_seen_our_node_info(routing_domain)) {
|
||||
node_refs.push(NodeRef::new(
|
||||
outer_self.clone(),
|
||||
k,
|
||||
v,
|
||||
Some(NodeRefFilter::new().with_routing_domain(routing_domain)),
|
||||
));
|
||||
}
|
||||
Option::<()>::None
|
||||
});
|
||||
node_refs
|
||||
}
|
||||
|
||||
pub fn get_nodes_needing_ping(
|
||||
&self,
|
||||
outer_self: RoutingTable,
|
||||
routing_domain: RoutingDomain,
|
||||
cur_ts: u64,
|
||||
) -> Vec<NodeRef> {
|
||||
// Collect relay nodes
|
||||
let opt_relay_id = self.with_routing_domain(routing_domain, |rd| {
|
||||
rd.common().relay_node().map(|rn| rn.node_id())
|
||||
});
|
||||
|
||||
// Collect all entries that are 'needs_ping' and have some node info making them reachable somehow
|
||||
let mut node_refs = Vec::<NodeRef>::with_capacity(self.bucket_entry_count);
|
||||
self.with_entries(cur_ts, BucketEntryState::Unreliable, |rti, k, v| {
|
||||
if v.with(rti, |_rti, e| {
|
||||
e.has_node_info(routing_domain.into())
|
||||
&& e.needs_ping(cur_ts, opt_relay_id == Some(k))
|
||||
}) {
|
||||
node_refs.push(NodeRef::new(
|
||||
outer_self.clone(),
|
||||
k,
|
||||
v,
|
||||
Some(NodeRefFilter::new().with_routing_domain(routing_domain)),
|
||||
));
|
||||
}
|
||||
Option::<()>::None
|
||||
});
|
||||
node_refs
|
||||
}
|
||||
|
||||
pub fn get_all_nodes(&self, outer_self: RoutingTable, cur_ts: u64) -> Vec<NodeRef> {
|
||||
let mut node_refs = Vec::<NodeRef>::with_capacity(self.bucket_entry_count);
|
||||
self.with_entries(cur_ts, BucketEntryState::Unreliable, |_rti, k, v| {
|
||||
node_refs.push(NodeRef::new(outer_self.clone(), k, v, None));
|
||||
Option::<()>::None
|
||||
});
|
||||
node_refs
|
||||
}
|
||||
|
||||
/// Create a node reference, possibly creating a bucket entry
|
||||
/// the 'update_func' closure is called on the node, and, if created,
|
||||
/// in a locked fashion as to ensure the bucket entry state is always valid
|
||||
pub fn create_node_ref<F>(
|
||||
&mut self,
|
||||
outer_self: RoutingTable,
|
||||
node_id: DHTKey,
|
||||
update_func: F,
|
||||
) -> Option<NodeRef>
|
||||
where
|
||||
F: FnOnce(&mut RoutingTableInner, &mut BucketEntryInner),
|
||||
{
|
||||
// Ensure someone isn't trying register this node itself
|
||||
if node_id == self.node_id() {
|
||||
log_rtab!(debug "can't register own node");
|
||||
return None;
|
||||
}
|
||||
|
||||
// Look up existing entry
|
||||
let idx = self.find_bucket_index(node_id);
|
||||
let noderef = {
|
||||
let bucket = &self.buckets[idx];
|
||||
let entry = bucket.entry(&node_id);
|
||||
entry.map(|e| NodeRef::new(outer_self.clone(), node_id, e, None))
|
||||
};
|
||||
|
||||
// If one doesn't exist, insert into bucket, possibly evicting a bucket member
|
||||
let noderef = match noderef {
|
||||
None => {
|
||||
// Make new entry
|
||||
self.bucket_entry_count += 1;
|
||||
let cnt = self.bucket_entry_count;
|
||||
let bucket = &mut self.buckets[idx];
|
||||
let nr = bucket.add_entry(node_id);
|
||||
|
||||
// Update the entry
|
||||
let entry = bucket.entry(&node_id).unwrap();
|
||||
entry.with_mut(self, update_func);
|
||||
|
||||
// Kick the bucket
|
||||
self.unlocked_inner.kick_queue.lock().insert(idx);
|
||||
log_rtab!(debug "Routing table now has {} nodes, {} live", cnt, self.get_entry_count(RoutingDomainSet::all(), BucketEntryState::Unreliable));
|
||||
|
||||
nr
|
||||
}
|
||||
Some(nr) => {
|
||||
// Update the entry
|
||||
let bucket = &mut self.buckets[idx];
|
||||
let entry = bucket.entry(&node_id).unwrap();
|
||||
entry.with_mut(self, update_func);
|
||||
|
||||
nr
|
||||
}
|
||||
};
|
||||
|
||||
Some(noderef)
|
||||
}
|
||||
|
||||
/// Resolve an existing routing table entry and return a reference to it
|
||||
pub fn lookup_node_ref(&self, outer_self: RoutingTable, node_id: DHTKey) -> Option<NodeRef> {
|
||||
if node_id == self.unlocked_inner.node_id {
|
||||
log_rtab!(error "can't look up own node id in routing table");
|
||||
return None;
|
||||
}
|
||||
let idx = self.find_bucket_index(node_id);
|
||||
let bucket = &self.buckets[idx];
|
||||
bucket
|
||||
.entry(&node_id)
|
||||
.map(|e| NodeRef::new(outer_self, node_id, e, None))
|
||||
}
|
||||
|
||||
/// Resolve an existing routing table entry and return a filtered reference to it
|
||||
pub fn lookup_and_filter_noderef(
|
||||
&self,
|
||||
outer_self: RoutingTable,
|
||||
node_id: DHTKey,
|
||||
routing_domain_set: RoutingDomainSet,
|
||||
dial_info_filter: DialInfoFilter,
|
||||
) -> Option<NodeRef> {
|
||||
let nr = self.lookup_node_ref(outer_self, node_id)?;
|
||||
Some(
|
||||
nr.filtered_clone(
|
||||
NodeRefFilter::new()
|
||||
.with_dial_info_filter(dial_info_filter)
|
||||
.with_routing_domain_set(routing_domain_set),
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
/// Resolve an existing routing table entry and call a function on its entry without using a noderef
|
||||
pub fn with_node_entry<F, R>(&self, node_id: DHTKey, f: F) -> Option<R>
|
||||
where
|
||||
F: FnOnce(Arc<BucketEntry>) -> R,
|
||||
{
|
||||
if node_id == self.unlocked_inner.node_id {
|
||||
log_rtab!(error "can't look up own node id in routing table");
|
||||
return None;
|
||||
}
|
||||
let idx = self.find_bucket_index(node_id);
|
||||
let bucket = &self.buckets[idx];
|
||||
if let Some(e) = bucket.entry(&node_id) {
|
||||
return Some(f(e));
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Shortcut function to add a node to our routing table if it doesn't exist
|
||||
/// and add the dial info we have for it. Returns a noderef filtered to
|
||||
/// the routing domain in which this node was registered for convenience.
|
||||
pub fn register_node_with_signed_node_info(
|
||||
&mut self,
|
||||
outer_self: RoutingTable,
|
||||
routing_domain: RoutingDomain,
|
||||
node_id: DHTKey,
|
||||
signed_node_info: SignedNodeInfo,
|
||||
allow_invalid: bool,
|
||||
) -> Option<NodeRef> {
|
||||
// validate signed node info is not something malicious
|
||||
if node_id == self.node_id() {
|
||||
log_rtab!(debug "can't register own node id in routing table");
|
||||
return None;
|
||||
}
|
||||
if let Some(relay_id) = signed_node_info.relay_id() {
|
||||
if relay_id.key == node_id {
|
||||
log_rtab!(debug "node can not be its own relay");
|
||||
return None;
|
||||
}
|
||||
}
|
||||
if !allow_invalid {
|
||||
// verify signature
|
||||
if !signed_node_info.has_valid_signature() {
|
||||
log_rtab!(debug "signed node info for {} has invalid signature", node_id);
|
||||
return None;
|
||||
}
|
||||
// verify signed node info is valid in this routing domain
|
||||
if !self.signed_node_info_is_valid_in_routing_domain(routing_domain, &signed_node_info)
|
||||
{
|
||||
log_rtab!(debug "signed node info for {} not valid in the {:?} routing domain", node_id, routing_domain);
|
||||
return None;
|
||||
}
|
||||
}
|
||||
|
||||
self.create_node_ref(outer_self, node_id, |_rti, e| {
|
||||
e.update_signed_node_info(routing_domain, signed_node_info);
|
||||
})
|
||||
.map(|mut nr| {
|
||||
nr.set_filter(Some(
|
||||
NodeRefFilter::new().with_routing_domain(routing_domain),
|
||||
));
|
||||
nr
|
||||
})
|
||||
}
|
||||
|
||||
/// Shortcut function to add a node to our routing table if it doesn't exist
|
||||
/// and add the last peer address we have for it, since that's pretty common
|
||||
pub fn register_node_with_existing_connection(
|
||||
&mut self,
|
||||
outer_self: RoutingTable,
|
||||
node_id: DHTKey,
|
||||
descriptor: ConnectionDescriptor,
|
||||
timestamp: u64,
|
||||
) -> Option<NodeRef> {
|
||||
let out = self.create_node_ref(outer_self, node_id, |_rti, e| {
|
||||
// this node is live because it literally just connected to us
|
||||
e.touch_last_seen(timestamp);
|
||||
});
|
||||
if let Some(nr) = &out {
|
||||
// set the most recent node address for connection finding and udp replies
|
||||
nr.locked_mut(self)
|
||||
.set_last_connection(descriptor, timestamp);
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Routing Table Health Metrics
|
||||
|
||||
pub fn get_routing_table_health(&self) -> RoutingTableHealth {
|
||||
let mut health = RoutingTableHealth::default();
|
||||
let cur_ts = intf::get_timestamp();
|
||||
for bucket in &self.buckets {
|
||||
for (_, v) in bucket.entries() {
|
||||
match v.with(self, |_rti, e| e.state(cur_ts)) {
|
||||
BucketEntryState::Reliable => {
|
||||
health.reliable_entry_count += 1;
|
||||
}
|
||||
BucketEntryState::Unreliable => {
|
||||
health.unreliable_entry_count += 1;
|
||||
}
|
||||
BucketEntryState::Dead => {
|
||||
health.dead_entry_count += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
health
|
||||
}
|
||||
|
||||
pub fn touch_recent_peer(&mut self, node_id: DHTKey, last_connection: ConnectionDescriptor) {
|
||||
self.recent_peers
|
||||
.insert(node_id, RecentPeersEntry { last_connection });
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Find Nodes
|
||||
|
||||
// Retrieve the fastest nodes in the routing table matching an entry filter
|
||||
pub fn find_fast_public_nodes_filtered(
|
||||
&self,
|
||||
outer_self: RoutingTable,
|
||||
node_count: usize,
|
||||
mut filters: VecDeque<RoutingTableEntryFilter>,
|
||||
) -> Vec<NodeRef> {
|
||||
let public_node_filter = Box::new(
|
||||
|rti: &RoutingTableInner, _k: DHTKey, v: Option<Arc<BucketEntry>>| {
|
||||
let entry = v.unwrap();
|
||||
entry.with(rti, |_rti, e| {
|
||||
// skip nodes on local network
|
||||
if e.node_info(RoutingDomain::LocalNetwork).is_some() {
|
||||
return false;
|
||||
}
|
||||
// skip nodes not on public internet
|
||||
if e.node_info(RoutingDomain::PublicInternet).is_none() {
|
||||
return false;
|
||||
}
|
||||
true
|
||||
})
|
||||
},
|
||||
) as RoutingTableEntryFilter;
|
||||
filters.push_front(public_node_filter);
|
||||
|
||||
self.find_fastest_nodes(
|
||||
node_count,
|
||||
filters,
|
||||
|_rti: &RoutingTableInner, k: DHTKey, v: Option<Arc<BucketEntry>>| {
|
||||
NodeRef::new(outer_self.clone(), k, v.unwrap().clone(), None)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
pub fn filter_has_valid_signed_node_info(
|
||||
&self,
|
||||
routing_domain: RoutingDomain,
|
||||
has_valid_own_node_info: bool,
|
||||
v: Option<Arc<BucketEntry>>,
|
||||
) -> bool {
|
||||
match v {
|
||||
None => has_valid_own_node_info,
|
||||
Some(entry) => entry.with(self, |_rti, e| {
|
||||
e.signed_node_info(routing_domain.into())
|
||||
.map(|sni| sni.has_valid_signature())
|
||||
.unwrap_or(false)
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn transform_to_peer_info(
|
||||
&self,
|
||||
routing_domain: RoutingDomain,
|
||||
own_peer_info: PeerInfo,
|
||||
k: DHTKey,
|
||||
v: Option<Arc<BucketEntry>>,
|
||||
) -> PeerInfo {
|
||||
match v {
|
||||
None => own_peer_info,
|
||||
Some(entry) => entry.with(self, |_rti, e| e.make_peer_info(k, routing_domain).unwrap()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn find_peers_with_sort_and_filter<C, T, O>(
|
||||
&self,
|
||||
node_count: usize,
|
||||
cur_ts: u64,
|
||||
mut filters: VecDeque<RoutingTableEntryFilter>,
|
||||
mut compare: C,
|
||||
mut transform: T,
|
||||
) -> Vec<O>
|
||||
where
|
||||
C: for<'a, 'b> FnMut(
|
||||
&'a RoutingTableInner,
|
||||
&'b (DHTKey, Option<Arc<BucketEntry>>),
|
||||
&'b (DHTKey, Option<Arc<BucketEntry>>),
|
||||
) -> core::cmp::Ordering,
|
||||
T: for<'r> FnMut(&'r RoutingTableInner, DHTKey, Option<Arc<BucketEntry>>) -> O,
|
||||
{
|
||||
// collect all the nodes for sorting
|
||||
let mut nodes =
|
||||
Vec::<(DHTKey, Option<Arc<BucketEntry>>)>::with_capacity(self.bucket_entry_count + 1);
|
||||
|
||||
// add our own node (only one of there with the None entry)
|
||||
let mut filtered = false;
|
||||
for filter in &mut filters {
|
||||
if !filter(self, self.unlocked_inner.node_id, None) {
|
||||
filtered = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if !filtered {
|
||||
nodes.push((self.unlocked_inner.node_id, None));
|
||||
}
|
||||
|
||||
// add all nodes from buckets
|
||||
self.with_entries(cur_ts, BucketEntryState::Unreliable, |rti, k, v| {
|
||||
// Apply filter
|
||||
for filter in &mut filters {
|
||||
if filter(rti, k, Some(v.clone())) {
|
||||
nodes.push((k, Some(v.clone())));
|
||||
break;
|
||||
}
|
||||
}
|
||||
Option::<()>::None
|
||||
});
|
||||
|
||||
// sort by preference for returning nodes
|
||||
nodes.sort_by(|a, b| compare(self, a, b));
|
||||
|
||||
// return transformed vector for filtered+sorted nodes
|
||||
let cnt = usize::min(node_count, nodes.len());
|
||||
let mut out = Vec::<O>::with_capacity(cnt);
|
||||
for node in nodes {
|
||||
let val = transform(self, node.0, node.1);
|
||||
out.push(val);
|
||||
}
|
||||
|
||||
out
|
||||
}
|
||||
|
||||
pub fn find_fastest_nodes<T, O>(
|
||||
&self,
|
||||
node_count: usize,
|
||||
mut filters: VecDeque<RoutingTableEntryFilter>,
|
||||
transform: T,
|
||||
) -> Vec<O>
|
||||
where
|
||||
T: for<'r> FnMut(&'r RoutingTableInner, DHTKey, Option<Arc<BucketEntry>>) -> O,
|
||||
{
|
||||
let cur_ts = intf::get_timestamp();
|
||||
|
||||
// Add filter to remove dead nodes always
|
||||
let filter_dead = Box::new(
|
||||
move |rti: &RoutingTableInner, _k: DHTKey, v: Option<Arc<BucketEntry>>| {
|
||||
if let Some(entry) = &v {
|
||||
// always filter out dead nodes
|
||||
if entry.with(rti, |_rti, e| e.state(cur_ts) == BucketEntryState::Dead) {
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
} else {
|
||||
// always filter out self peer, as it is irrelevant to the 'fastest nodes' search
|
||||
false
|
||||
}
|
||||
},
|
||||
) as RoutingTableEntryFilter;
|
||||
filters.push_front(filter_dead);
|
||||
|
||||
// Fastest sort
|
||||
let sort = |rti: &RoutingTableInner,
|
||||
(a_key, a_entry): &(DHTKey, Option<Arc<BucketEntry>>),
|
||||
(b_key, b_entry): &(DHTKey, Option<Arc<BucketEntry>>)| {
|
||||
// same nodes are always the same
|
||||
if a_key == b_key {
|
||||
return core::cmp::Ordering::Equal;
|
||||
}
|
||||
// our own node always comes last (should not happen, here for completeness)
|
||||
if a_entry.is_none() {
|
||||
return core::cmp::Ordering::Greater;
|
||||
}
|
||||
if b_entry.is_none() {
|
||||
return core::cmp::Ordering::Less;
|
||||
}
|
||||
// reliable nodes come first
|
||||
let ae = a_entry.as_ref().unwrap();
|
||||
let be = b_entry.as_ref().unwrap();
|
||||
ae.with(rti, |rti, ae| {
|
||||
be.with(rti, |_rti, be| {
|
||||
let ra = ae.check_reliable(cur_ts);
|
||||
let rb = be.check_reliable(cur_ts);
|
||||
if ra != rb {
|
||||
if ra {
|
||||
return core::cmp::Ordering::Less;
|
||||
} else {
|
||||
return core::cmp::Ordering::Greater;
|
||||
}
|
||||
}
|
||||
|
||||
// latency is the next metric, closer nodes first
|
||||
let a_latency = match ae.peer_stats().latency.as_ref() {
|
||||
None => {
|
||||
// treat unknown latency as slow
|
||||
return core::cmp::Ordering::Greater;
|
||||
}
|
||||
Some(l) => l,
|
||||
};
|
||||
let b_latency = match be.peer_stats().latency.as_ref() {
|
||||
None => {
|
||||
// treat unknown latency as slow
|
||||
return core::cmp::Ordering::Less;
|
||||
}
|
||||
Some(l) => l,
|
||||
};
|
||||
// Sort by average latency
|
||||
a_latency.average.cmp(&b_latency.average)
|
||||
})
|
||||
})
|
||||
};
|
||||
|
||||
let out =
|
||||
self.find_peers_with_sort_and_filter(node_count, cur_ts, filters, sort, transform);
|
||||
out
|
||||
}
|
||||
|
||||
pub fn find_closest_nodes<T, O>(
|
||||
&self,
|
||||
node_id: DHTKey,
|
||||
filters: VecDeque<RoutingTableEntryFilter>,
|
||||
transform: T,
|
||||
) -> Vec<O>
|
||||
where
|
||||
T: for<'r> FnMut(&'r RoutingTableInner, DHTKey, Option<Arc<BucketEntry>>) -> O,
|
||||
{
|
||||
let cur_ts = intf::get_timestamp();
|
||||
let node_count = {
|
||||
let config = self.config();
|
||||
let c = config.get();
|
||||
c.network.dht.max_find_node_count as usize
|
||||
};
|
||||
|
||||
// closest sort
|
||||
let sort = |rti: &RoutingTableInner,
|
||||
(a_key, a_entry): &(DHTKey, Option<Arc<BucketEntry>>),
|
||||
(b_key, b_entry): &(DHTKey, Option<Arc<BucketEntry>>)| {
|
||||
// same nodes are always the same
|
||||
if a_key == b_key {
|
||||
return core::cmp::Ordering::Equal;
|
||||
}
|
||||
|
||||
// reliable nodes come first, pessimistically treating our own node as unreliable
|
||||
let ra = a_entry
|
||||
.as_ref()
|
||||
.map_or(false, |x| x.with(rti, |_rti, x| x.check_reliable(cur_ts)));
|
||||
let rb = b_entry
|
||||
.as_ref()
|
||||
.map_or(false, |x| x.with(rti, |_rti, x| x.check_reliable(cur_ts)));
|
||||
if ra != rb {
|
||||
if ra {
|
||||
return core::cmp::Ordering::Less;
|
||||
} else {
|
||||
return core::cmp::Ordering::Greater;
|
||||
}
|
||||
}
|
||||
|
||||
// distance is the next metric, closer nodes first
|
||||
let da = distance(a_key, &node_id);
|
||||
let db = distance(b_key, &node_id);
|
||||
da.cmp(&db)
|
||||
};
|
||||
|
||||
let out =
|
||||
self.find_peers_with_sort_and_filter(node_count, cur_ts, filters, sort, transform);
|
||||
log_rtab!(">> find_closest_nodes: node count = {}", out.len());
|
||||
out
|
||||
}
|
||||
}
|
@ -22,8 +22,13 @@ impl RoutingTable {
|
||||
);
|
||||
|
||||
// Roll all bucket entry transfers
|
||||
for b in &mut inner.buckets {
|
||||
b.roll_transfers(last_ts, cur_ts);
|
||||
let entries: Vec<Arc<BucketEntry>> = inner
|
||||
.buckets
|
||||
.iter()
|
||||
.flat_map(|b| b.entries().map(|(_k, v)| v.clone()))
|
||||
.collect();
|
||||
for v in entries {
|
||||
v.with_mut(inner, |_rti, e| e.roll_transfers(last_ts, cur_ts));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@ -42,7 +47,7 @@ impl RoutingTable {
|
||||
.collect();
|
||||
let mut inner = self.inner.write();
|
||||
for idx in kick_queue {
|
||||
Self::kick_bucket(&mut *inner, idx)
|
||||
inner.kick_bucket(idx)
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1,41 +0,0 @@
|
||||
use crate::dht::*;
|
||||
use crate::*;
|
||||
use core::convert::TryInto;
|
||||
use rpc_processor::*;
|
||||
|
||||
pub fn decode_block_id(public_key: &veilid_capnp::b_l_a_k_e3_hash::Reader) -> DHTKey {
|
||||
let u0 = public_key.get_u0().to_be_bytes();
|
||||
let u1 = public_key.get_u1().to_be_bytes();
|
||||
let u2 = public_key.get_u2().to_be_bytes();
|
||||
let u3 = public_key.get_u3().to_be_bytes();
|
||||
|
||||
let mut x: [u8; 32] = Default::default();
|
||||
x[0..8].copy_from_slice(&u0);
|
||||
x[8..16].copy_from_slice(&u1);
|
||||
x[16..24].copy_from_slice(&u2);
|
||||
x[24..32].copy_from_slice(&u3);
|
||||
|
||||
DHTKey::new(x)
|
||||
}
|
||||
|
||||
pub fn encode_block_id(
|
||||
key: &DHTKey,
|
||||
builder: &mut veilid_capnp::b_l_a_k_e3_hash::Builder,
|
||||
) -> Result<(), RPCError> {
|
||||
if !key.valid {
|
||||
return Err(RPCError::protocol("invalid key"));
|
||||
}
|
||||
builder.set_u0(u64::from_be_bytes(
|
||||
key.bytes[0..8].try_into().map_err(RPCError::internal)?,
|
||||
));
|
||||
builder.set_u1(u64::from_be_bytes(
|
||||
key.bytes[8..16].try_into().map_err(RPCError::internal)?,
|
||||
));
|
||||
builder.set_u2(u64::from_be_bytes(
|
||||
key.bytes[16..24].try_into().map_err(RPCError::internal)?,
|
||||
));
|
||||
builder.set_u3(u64::from_be_bytes(
|
||||
key.bytes[24..32].try_into().map_err(RPCError::internal)?,
|
||||
));
|
||||
Ok(())
|
||||
}
|
@ -1,9 +1,9 @@
|
||||
use crate::dht::*;
|
||||
use crate::crypto::*;
|
||||
use crate::*;
|
||||
use core::convert::TryInto;
|
||||
use rpc_processor::*;
|
||||
|
||||
pub fn decode_public_key(public_key: &veilid_capnp::curve25519_public_key::Reader) -> DHTKey {
|
||||
pub fn decode_dht_key(public_key: &veilid_capnp::key256::Reader) -> DHTKey {
|
||||
let u0 = public_key.get_u0().to_be_bytes();
|
||||
let u1 = public_key.get_u1().to_be_bytes();
|
||||
let u2 = public_key.get_u2().to_be_bytes();
|
||||
@ -18,13 +18,10 @@ pub fn decode_public_key(public_key: &veilid_capnp::curve25519_public_key::Reade
|
||||
DHTKey::new(x)
|
||||
}
|
||||
|
||||
pub fn encode_public_key(
|
||||
pub fn encode_dht_key(
|
||||
key: &DHTKey,
|
||||
builder: &mut veilid_capnp::curve25519_public_key::Builder,
|
||||
builder: &mut veilid_capnp::key256::Builder,
|
||||
) -> Result<(), RPCError> {
|
||||
if !key.valid {
|
||||
return Err(RPCError::protocol("invalid key"));
|
||||
}
|
||||
builder.set_u0(u64::from_be_bytes(
|
||||
key.bytes[0..8]
|
||||
.try_into()
|
@ -1,14 +1,7 @@
|
||||
use crate::*;
|
||||
use rpc_processor::*;
|
||||
|
||||
pub fn encode_signature(
|
||||
sig: &DHTSignature,
|
||||
builder: &mut veilid_capnp::ed25519_signature::Builder,
|
||||
) {
|
||||
if !sig.valid {
|
||||
panic!("don't encode invalid signatures");
|
||||
}
|
||||
|
||||
pub fn encode_signature(sig: &DHTSignature, builder: &mut veilid_capnp::signature512::Builder) {
|
||||
let sig = &sig.bytes;
|
||||
|
||||
builder.set_u0(u64::from_be_bytes(
|
||||
@ -37,7 +30,7 @@ pub fn encode_signature(
|
||||
));
|
||||
}
|
||||
|
||||
pub fn decode_signature(reader: &veilid_capnp::ed25519_signature::Reader) -> DHTSignature {
|
||||
pub fn decode_signature(reader: &veilid_capnp::signature512::Reader) -> DHTSignature {
|
||||
let u0 = reader.get_u0().to_be_bytes();
|
||||
let u1 = reader.get_u1().to_be_bytes();
|
||||
let u2 = reader.get_u2().to_be_bytes();
|
@ -1,11 +1,11 @@
|
||||
mod address;
|
||||
mod address_type_set;
|
||||
mod block_id;
|
||||
mod dht_key;
|
||||
mod dht_signature;
|
||||
mod dial_info;
|
||||
mod dial_info_class;
|
||||
mod dial_info_detail;
|
||||
mod network_class;
|
||||
mod node_dial_info;
|
||||
mod node_info;
|
||||
mod node_status;
|
||||
mod nonce;
|
||||
@ -13,11 +13,11 @@ mod operations;
|
||||
mod peer_info;
|
||||
mod private_safety_route;
|
||||
mod protocol_type_set;
|
||||
mod public_key;
|
||||
mod sender_info;
|
||||
mod signal_info;
|
||||
mod signature;
|
||||
mod signed_direct_node_info;
|
||||
mod signed_node_info;
|
||||
mod signed_relayed_node_info;
|
||||
mod socket_address;
|
||||
mod tunnel;
|
||||
mod value_data;
|
||||
@ -25,12 +25,12 @@ mod value_key;
|
||||
|
||||
pub use address::*;
|
||||
pub use address_type_set::*;
|
||||
pub use block_id::*;
|
||||
pub use dht_key::*;
|
||||
pub use dht_signature::*;
|
||||
pub use dial_info::*;
|
||||
pub use dial_info_class::*;
|
||||
pub use dial_info_detail::*;
|
||||
pub use network_class::*;
|
||||
pub use node_dial_info::*;
|
||||
pub use node_info::*;
|
||||
pub use node_status::*;
|
||||
pub use nonce::*;
|
||||
@ -38,11 +38,11 @@ pub use operations::*;
|
||||
pub use peer_info::*;
|
||||
pub use private_safety_route::*;
|
||||
pub use protocol_type_set::*;
|
||||
pub use public_key::*;
|
||||
pub use sender_info::*;
|
||||
pub use signal_info::*;
|
||||
pub use signature::*;
|
||||
pub use signed_direct_node_info::*;
|
||||
pub use signed_node_info::*;
|
||||
pub use signed_relayed_node_info::*;
|
||||
pub use socket_address::*;
|
||||
pub use tunnel::*;
|
||||
pub use value_data::*;
|
||||
|
@ -5,7 +5,7 @@ pub fn encode_network_class(network_class: NetworkClass) -> veilid_capnp::Networ
|
||||
NetworkClass::InboundCapable => veilid_capnp::NetworkClass::InboundCapable,
|
||||
NetworkClass::OutboundOnly => veilid_capnp::NetworkClass::OutboundOnly,
|
||||
NetworkClass::WebApp => veilid_capnp::NetworkClass::WebApp,
|
||||
NetworkClass::Invalid => panic!("invalid network class should not be encoded"),
|
||||
NetworkClass::Invalid => veilid_capnp::NetworkClass::Invalid,
|
||||
}
|
||||
}
|
||||
|
||||
@ -14,5 +14,6 @@ pub fn decode_network_class(network_class: veilid_capnp::NetworkClass) -> Networ
|
||||
veilid_capnp::NetworkClass::InboundCapable => NetworkClass::InboundCapable,
|
||||
veilid_capnp::NetworkClass::OutboundOnly => NetworkClass::OutboundOnly,
|
||||
veilid_capnp::NetworkClass::WebApp => NetworkClass::WebApp,
|
||||
veilid_capnp::NetworkClass::Invalid => NetworkClass::Invalid,
|
||||
}
|
||||
}
|
||||
|
@ -1,29 +0,0 @@
|
||||
use crate::*;
|
||||
use rpc_processor::*;
|
||||
|
||||
pub fn encode_node_dial_info(
|
||||
ndis: &NodeDialInfo,
|
||||
builder: &mut veilid_capnp::node_dial_info::Builder,
|
||||
) -> Result<(), RPCError> {
|
||||
let mut ni_builder = builder.reborrow().init_node_id();
|
||||
encode_public_key(&ndis.node_id.key, &mut ni_builder)?;
|
||||
let mut di_builder = builder.reborrow().init_dial_info();
|
||||
encode_dial_info(&ndis.dial_info, &mut di_builder)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn decode_node_dial_info(
|
||||
reader: &veilid_capnp::node_dial_info::Reader,
|
||||
) -> Result<NodeDialInfo, RPCError> {
|
||||
let node_id = decode_public_key(&reader.get_node_id().map_err(RPCError::map_protocol(
|
||||
"invalid public key in node_dial_info",
|
||||
))?);
|
||||
let dial_info = decode_dial_info(&reader.get_dial_info().map_err(RPCError::map_protocol(
|
||||
"invalid dial_info in node_dial_info",
|
||||
))?)?;
|
||||
|
||||
Ok(NodeDialInfo {
|
||||
node_id: NodeId::new(node_id),
|
||||
dial_info,
|
||||
})
|
||||
}
|
@ -31,18 +31,10 @@ pub fn encode_node_info(
|
||||
encode_dial_info_detail(&node_info.dial_info_detail_list[idx], &mut did_builder)?;
|
||||
}
|
||||
|
||||
if let Some(rpi) = &node_info.relay_peer_info {
|
||||
let mut rpi_builder = builder.reborrow().init_relay_peer_info();
|
||||
encode_peer_info(rpi, &mut rpi_builder)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn decode_node_info(
|
||||
reader: &veilid_capnp::node_info::Reader,
|
||||
allow_relay_peer_info: bool,
|
||||
) -> Result<NodeInfo, RPCError> {
|
||||
pub fn decode_node_info(reader: &veilid_capnp::node_info::Reader) -> Result<NodeInfo, RPCError> {
|
||||
let network_class = decode_network_class(
|
||||
reader
|
||||
.reborrow()
|
||||
@ -81,22 +73,6 @@ pub fn decode_node_info(
|
||||
dial_info_detail_list.push(decode_dial_info_detail(&did)?)
|
||||
}
|
||||
|
||||
let relay_peer_info = if allow_relay_peer_info {
|
||||
if reader.has_relay_peer_info() {
|
||||
Some(Box::new(decode_peer_info(
|
||||
&reader
|
||||
.reborrow()
|
||||
.get_relay_peer_info()
|
||||
.map_err(RPCError::protocol)?,
|
||||
false,
|
||||
)?))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(NodeInfo {
|
||||
network_class,
|
||||
outbound_protocols,
|
||||
@ -104,6 +80,5 @@ pub fn decode_node_info(
|
||||
min_version,
|
||||
max_version,
|
||||
dial_info_detail_list,
|
||||
relay_peer_info,
|
||||
})
|
||||
}
|
||||
|
@ -1,10 +1,7 @@
|
||||
use crate::*;
|
||||
use rpc_processor::*;
|
||||
|
||||
pub fn encode_nonce(
|
||||
nonce: &Nonce,
|
||||
builder: &mut veilid_capnp::x_cha_cha20_poly1305_nonce::Builder,
|
||||
) {
|
||||
pub fn encode_nonce(nonce: &Nonce, builder: &mut veilid_capnp::nonce24::Builder) {
|
||||
builder.set_u0(u64::from_be_bytes(
|
||||
nonce[0..8].try_into().expect("slice with incorrect length"),
|
||||
));
|
||||
@ -20,7 +17,7 @@ pub fn encode_nonce(
|
||||
));
|
||||
}
|
||||
|
||||
pub fn decode_nonce(reader: &veilid_capnp::x_cha_cha20_poly1305_nonce::Reader) -> Nonce {
|
||||
pub fn decode_nonce(reader: &veilid_capnp::nonce24::Reader) -> Nonce {
|
||||
let u0 = reader.get_u0().to_be_bytes();
|
||||
let u1 = reader.get_u1().to_be_bytes();
|
||||
let u2 = reader.get_u2().to_be_bytes();
|
||||
|
@ -11,9 +11,6 @@ impl RPCAnswer {
|
||||
pub fn new(detail: RPCAnswerDetail) -> Self {
|
||||
Self { detail }
|
||||
}
|
||||
// pub fn detail(&self) -> &RPCAnswerDetail {
|
||||
// &self.detail
|
||||
// }
|
||||
pub fn into_detail(self) -> RPCAnswerDetail {
|
||||
self.detail
|
||||
}
|
||||
@ -35,6 +32,7 @@ impl RPCAnswer {
|
||||
pub enum RPCAnswerDetail {
|
||||
StatusA(RPCOperationStatusA),
|
||||
FindNodeA(RPCOperationFindNodeA),
|
||||
AppCallA(RPCOperationAppCallA),
|
||||
GetValueA(RPCOperationGetValueA),
|
||||
SetValueA(RPCOperationSetValueA),
|
||||
WatchValueA(RPCOperationWatchValueA),
|
||||
@ -50,6 +48,7 @@ impl RPCAnswerDetail {
|
||||
match self {
|
||||
RPCAnswerDetail::StatusA(_) => "StatusA",
|
||||
RPCAnswerDetail::FindNodeA(_) => "FindNodeA",
|
||||
RPCAnswerDetail::AppCallA(_) => "AppCallA",
|
||||
RPCAnswerDetail::GetValueA(_) => "GetValueA",
|
||||
RPCAnswerDetail::SetValueA(_) => "SetValueA",
|
||||
RPCAnswerDetail::WatchValueA(_) => "WatchValueA",
|
||||
@ -76,6 +75,11 @@ impl RPCAnswerDetail {
|
||||
let out = RPCOperationFindNodeA::decode(&op_reader)?;
|
||||
RPCAnswerDetail::FindNodeA(out)
|
||||
}
|
||||
veilid_capnp::answer::detail::AppCallA(r) => {
|
||||
let op_reader = r.map_err(RPCError::protocol)?;
|
||||
let out = RPCOperationAppCallA::decode(&op_reader)?;
|
||||
RPCAnswerDetail::AppCallA(out)
|
||||
}
|
||||
veilid_capnp::answer::detail::GetValueA(r) => {
|
||||
let op_reader = r.map_err(RPCError::protocol)?;
|
||||
let out = RPCOperationGetValueA::decode(&op_reader)?;
|
||||
@ -126,6 +130,7 @@ impl RPCAnswerDetail {
|
||||
match self {
|
||||
RPCAnswerDetail::StatusA(d) => d.encode(&mut builder.reborrow().init_status_a()),
|
||||
RPCAnswerDetail::FindNodeA(d) => d.encode(&mut builder.reborrow().init_find_node_a()),
|
||||
RPCAnswerDetail::AppCallA(d) => d.encode(&mut builder.reborrow().init_app_call_a()),
|
||||
RPCAnswerDetail::GetValueA(d) => d.encode(&mut builder.reborrow().init_get_value_a()),
|
||||
RPCAnswerDetail::SetValueA(d) => d.encode(&mut builder.reborrow().init_set_value_a()),
|
||||
RPCAnswerDetail::WatchValueA(d) => {
|
||||
|
@ -1,5 +1,7 @@
|
||||
mod answer;
|
||||
mod operation;
|
||||
mod operation_app_call;
|
||||
mod operation_app_message;
|
||||
mod operation_cancel_tunnel;
|
||||
mod operation_complete_tunnel;
|
||||
mod operation_find_block;
|
||||
@ -22,6 +24,8 @@ mod statement;
|
||||
|
||||
pub use answer::*;
|
||||
pub use operation::*;
|
||||
pub use operation_app_call::*;
|
||||
pub use operation_app_message::*;
|
||||
pub use operation_cancel_tunnel::*;
|
||||
pub use operation_complete_tunnel::*;
|
||||
pub use operation_find_block::*;
|
||||
|
@ -19,7 +19,7 @@ impl RPCOperationKind {
|
||||
|
||||
pub fn decode(
|
||||
kind_reader: &veilid_capnp::operation::kind::Reader,
|
||||
sender_node_id: &DHTKey,
|
||||
opt_sender_node_id: Option<&DHTKey>,
|
||||
) -> Result<Self, RPCError> {
|
||||
let which_reader = kind_reader.which().map_err(RPCError::protocol)?;
|
||||
let out = match which_reader {
|
||||
@ -30,7 +30,7 @@ impl RPCOperationKind {
|
||||
}
|
||||
veilid_capnp::operation::kind::Which::Statement(r) => {
|
||||
let q_reader = r.map_err(RPCError::protocol)?;
|
||||
let out = RPCStatement::decode(&q_reader, sender_node_id)?;
|
||||
let out = RPCStatement::decode(&q_reader, opt_sender_node_id)?;
|
||||
RPCOperationKind::Statement(out)
|
||||
}
|
||||
veilid_capnp::operation::kind::Which::Answer(r) => {
|
||||
@ -111,22 +111,26 @@ impl RPCOperation {
|
||||
|
||||
pub fn decode(
|
||||
operation_reader: &veilid_capnp::operation::Reader,
|
||||
sender_node_id: &DHTKey,
|
||||
opt_sender_node_id: Option<&DHTKey>,
|
||||
) -> Result<Self, RPCError> {
|
||||
let op_id = operation_reader.get_op_id();
|
||||
|
||||
let sender_node_info = if operation_reader.has_sender_node_info() {
|
||||
if let Some(sender_node_id) = opt_sender_node_id {
|
||||
let sni_reader = operation_reader
|
||||
.get_sender_node_info()
|
||||
.map_err(RPCError::protocol)?;
|
||||
let sni = decode_signed_node_info(&sni_reader, sender_node_id, true)?;
|
||||
let sni = decode_signed_node_info(&sni_reader, sender_node_id)?;
|
||||
Some(sni)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let kind_reader = operation_reader.get_kind();
|
||||
let kind = RPCOperationKind::decode(&kind_reader, sender_node_id)?;
|
||||
let kind = RPCOperationKind::decode(&kind_reader, opt_sender_node_id)?;
|
||||
|
||||
Ok(RPCOperation {
|
||||
op_id,
|
||||
|
@ -0,0 +1,44 @@
|
||||
use crate::*;
|
||||
use rpc_processor::*;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RPCOperationAppCallQ {
|
||||
pub message: Vec<u8>,
|
||||
}
|
||||
|
||||
impl RPCOperationAppCallQ {
|
||||
pub fn decode(
|
||||
reader: &veilid_capnp::operation_app_call_q::Reader,
|
||||
) -> Result<RPCOperationAppCallQ, RPCError> {
|
||||
let message = reader.get_message().map_err(RPCError::protocol)?.to_vec();
|
||||
Ok(RPCOperationAppCallQ { message })
|
||||
}
|
||||
pub fn encode(
|
||||
&self,
|
||||
builder: &mut veilid_capnp::operation_app_call_q::Builder,
|
||||
) -> Result<(), RPCError> {
|
||||
builder.set_message(&self.message);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RPCOperationAppCallA {
|
||||
pub message: Vec<u8>,
|
||||
}
|
||||
|
||||
impl RPCOperationAppCallA {
|
||||
pub fn decode(
|
||||
reader: &veilid_capnp::operation_app_call_a::Reader,
|
||||
) -> Result<RPCOperationAppCallA, RPCError> {
|
||||
let message = reader.get_message().map_err(RPCError::protocol)?.to_vec();
|
||||
Ok(RPCOperationAppCallA { message })
|
||||
}
|
||||
pub fn encode(
|
||||
&self,
|
||||
builder: &mut veilid_capnp::operation_app_call_a::Builder,
|
||||
) -> Result<(), RPCError> {
|
||||
builder.set_message(&self.message);
|
||||
Ok(())
|
||||
}
|
||||
}
|
@ -0,0 +1,23 @@
|
||||
use crate::*;
|
||||
use rpc_processor::*;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RPCOperationAppMessage {
|
||||
pub message: Vec<u8>,
|
||||
}
|
||||
|
||||
impl RPCOperationAppMessage {
|
||||
pub fn decode(
|
||||
reader: &veilid_capnp::operation_app_message::Reader,
|
||||
) -> Result<RPCOperationAppMessage, RPCError> {
|
||||
let message = reader.get_message().map_err(RPCError::protocol)?.to_vec();
|
||||
Ok(RPCOperationAppMessage { message })
|
||||
}
|
||||
pub fn encode(
|
||||
&self,
|
||||
builder: &mut veilid_capnp::operation_app_message::Builder,
|
||||
) -> Result<(), RPCError> {
|
||||
builder.set_message(&self.message);
|
||||
Ok(())
|
||||
}
|
||||
}
|
@ -11,7 +11,7 @@ impl RPCOperationFindBlockQ {
|
||||
reader: &veilid_capnp::operation_find_block_q::Reader,
|
||||
) -> Result<RPCOperationFindBlockQ, RPCError> {
|
||||
let bi_reader = reader.get_block_id().map_err(RPCError::protocol)?;
|
||||
let block_id = decode_block_id(&bi_reader);
|
||||
let block_id = decode_dht_key(&bi_reader);
|
||||
|
||||
Ok(RPCOperationFindBlockQ { block_id })
|
||||
}
|
||||
@ -20,7 +20,7 @@ impl RPCOperationFindBlockQ {
|
||||
builder: &mut veilid_capnp::operation_find_block_q::Builder,
|
||||
) -> Result<(), RPCError> {
|
||||
let mut bi_builder = builder.reborrow().init_block_id();
|
||||
encode_block_id(&self.block_id, &mut bi_builder)?;
|
||||
encode_dht_key(&self.block_id, &mut bi_builder)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -47,7 +47,7 @@ impl RPCOperationFindBlockA {
|
||||
.map_err(RPCError::map_internal("too many suppliers"))?,
|
||||
);
|
||||
for s in suppliers_reader.iter() {
|
||||
let peer_info = decode_peer_info(&s, true)?;
|
||||
let peer_info = decode_peer_info(&s)?;
|
||||
suppliers.push(peer_info);
|
||||
}
|
||||
|
||||
@ -59,7 +59,7 @@ impl RPCOperationFindBlockA {
|
||||
.map_err(RPCError::map_internal("too many peers"))?,
|
||||
);
|
||||
for p in peers_reader.iter() {
|
||||
let peer_info = decode_peer_info(&p, true)?;
|
||||
let peer_info = decode_peer_info(&p)?;
|
||||
peers.push(peer_info);
|
||||
}
|
||||
|
||||
|
@ -11,7 +11,7 @@ impl RPCOperationFindNodeQ {
|
||||
reader: &veilid_capnp::operation_find_node_q::Reader,
|
||||
) -> Result<RPCOperationFindNodeQ, RPCError> {
|
||||
let ni_reader = reader.get_node_id().map_err(RPCError::protocol)?;
|
||||
let node_id = decode_public_key(&ni_reader);
|
||||
let node_id = decode_dht_key(&ni_reader);
|
||||
Ok(RPCOperationFindNodeQ { node_id })
|
||||
}
|
||||
pub fn encode(
|
||||
@ -19,7 +19,7 @@ impl RPCOperationFindNodeQ {
|
||||
builder: &mut veilid_capnp::operation_find_node_q::Builder,
|
||||
) -> Result<(), RPCError> {
|
||||
let mut ni_builder = builder.reborrow().init_node_id();
|
||||
encode_public_key(&self.node_id, &mut ni_builder)?;
|
||||
encode_dht_key(&self.node_id, &mut ni_builder)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@ -41,7 +41,7 @@ impl RPCOperationFindNodeA {
|
||||
.map_err(RPCError::map_internal("too many peers"))?,
|
||||
);
|
||||
for p in peers_reader.iter() {
|
||||
let peer_info = decode_peer_info(&p, true)?;
|
||||
let peer_info = decode_peer_info(&p)?;
|
||||
peers.push(peer_info);
|
||||
}
|
||||
|
||||
|
@ -48,7 +48,7 @@ impl RPCOperationGetValueA {
|
||||
.map_err(RPCError::map_internal("too many peers"))?,
|
||||
);
|
||||
for p in peers_reader.iter() {
|
||||
let peer_info = decode_peer_info(&p, true)?;
|
||||
let peer_info = decode_peer_info(&p)?;
|
||||
peers.push(peer_info);
|
||||
}
|
||||
|
||||
|
@ -9,10 +9,16 @@ pub struct RPCOperationNodeInfoUpdate {
|
||||
impl RPCOperationNodeInfoUpdate {
|
||||
pub fn decode(
|
||||
reader: &veilid_capnp::operation_node_info_update::Reader,
|
||||
sender_node_id: &DHTKey,
|
||||
opt_sender_node_id: Option<&DHTKey>,
|
||||
) -> Result<RPCOperationNodeInfoUpdate, RPCError> {
|
||||
if opt_sender_node_id.is_none() {
|
||||
return Err(RPCError::protocol(
|
||||
"can't decode node info update without sender node id",
|
||||
));
|
||||
}
|
||||
let sender_node_id = opt_sender_node_id.unwrap();
|
||||
let sni_reader = reader.get_signed_node_info().map_err(RPCError::protocol)?;
|
||||
let signed_node_info = decode_signed_node_info(&sni_reader, sender_node_id, true)?;
|
||||
let signed_node_info = decode_signed_node_info(&sni_reader, sender_node_id)?;
|
||||
|
||||
Ok(RPCOperationNodeInfoUpdate { signed_node_info })
|
||||
}
|
||||
|
@ -3,14 +3,16 @@ use rpc_processor::*;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RoutedOperation {
|
||||
pub version: u8,
|
||||
pub signatures: Vec<DHTSignature>,
|
||||
pub nonce: Nonce,
|
||||
pub data: Vec<u8>,
|
||||
}
|
||||
|
||||
impl RoutedOperation {
|
||||
pub fn new(nonce: Nonce, data: Vec<u8>) -> Self {
|
||||
pub fn new(version: u8, nonce: Nonce, data: Vec<u8>) -> Self {
|
||||
Self {
|
||||
version,
|
||||
signatures: Vec::new(),
|
||||
nonce,
|
||||
data,
|
||||
@ -32,11 +34,13 @@ impl RoutedOperation {
|
||||
signatures.push(sig);
|
||||
}
|
||||
|
||||
let version = reader.get_version();
|
||||
let n_reader = reader.get_nonce().map_err(RPCError::protocol)?;
|
||||
let nonce = decode_nonce(&n_reader);
|
||||
let data = reader.get_data().map_err(RPCError::protocol)?.to_vec();
|
||||
|
||||
Ok(RoutedOperation {
|
||||
version,
|
||||
signatures,
|
||||
nonce,
|
||||
data,
|
||||
@ -47,6 +51,7 @@ impl RoutedOperation {
|
||||
&self,
|
||||
builder: &mut veilid_capnp::routed_operation::Builder,
|
||||
) -> Result<(), RPCError> {
|
||||
builder.reborrow().set_version(self.version);
|
||||
let mut sigs_builder = builder.reborrow().init_signatures(
|
||||
self.signatures
|
||||
.len()
|
||||
|
@ -53,7 +53,7 @@ impl RPCOperationSetValueA {
|
||||
.map_err(RPCError::map_internal("too many peers"))?,
|
||||
);
|
||||
for p in peers_reader.iter() {
|
||||
let peer_info = decode_peer_info(&p, true)?;
|
||||
let peer_info = decode_peer_info(&p)?;
|
||||
peers.push(peer_info);
|
||||
}
|
||||
|
||||
|
@ -3,42 +3,59 @@ use rpc_processor::*;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RPCOperationStatusQ {
|
||||
pub node_status: NodeStatus,
|
||||
pub node_status: Option<NodeStatus>,
|
||||
}
|
||||
|
||||
impl RPCOperationStatusQ {
|
||||
pub fn decode(
|
||||
reader: &veilid_capnp::operation_status_q::Reader,
|
||||
) -> Result<RPCOperationStatusQ, RPCError> {
|
||||
let node_status = if reader.has_node_status() {
|
||||
let ns_reader = reader.get_node_status().map_err(RPCError::protocol)?;
|
||||
let node_status = decode_node_status(&ns_reader)?;
|
||||
Some(node_status)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
Ok(RPCOperationStatusQ { node_status })
|
||||
}
|
||||
pub fn encode(
|
||||
&self,
|
||||
builder: &mut veilid_capnp::operation_status_q::Builder,
|
||||
) -> Result<(), RPCError> {
|
||||
if let Some(ns) = &self.node_status {
|
||||
let mut ns_builder = builder.reborrow().init_node_status();
|
||||
encode_node_status(&self.node_status, &mut ns_builder)?;
|
||||
encode_node_status(&ns, &mut ns_builder)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RPCOperationStatusA {
|
||||
pub node_status: NodeStatus,
|
||||
pub sender_info: SenderInfo,
|
||||
pub node_status: Option<NodeStatus>,
|
||||
pub sender_info: Option<SenderInfo>,
|
||||
}
|
||||
|
||||
impl RPCOperationStatusA {
|
||||
pub fn decode(
|
||||
reader: &veilid_capnp::operation_status_a::Reader,
|
||||
) -> Result<RPCOperationStatusA, RPCError> {
|
||||
let node_status = if reader.has_node_status() {
|
||||
let ns_reader = reader.get_node_status().map_err(RPCError::protocol)?;
|
||||
let node_status = decode_node_status(&ns_reader)?;
|
||||
Some(node_status)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let sender_info = if reader.has_sender_info() {
|
||||
let si_reader = reader.get_sender_info().map_err(RPCError::protocol)?;
|
||||
let sender_info = decode_sender_info(&si_reader)?;
|
||||
Some(sender_info)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(RPCOperationStatusA {
|
||||
node_status,
|
||||
@ -49,10 +66,14 @@ impl RPCOperationStatusA {
|
||||
&self,
|
||||
builder: &mut veilid_capnp::operation_status_a::Builder,
|
||||
) -> Result<(), RPCError> {
|
||||
if let Some(ns) = &self.node_status {
|
||||
let mut ns_builder = builder.reborrow().init_node_status();
|
||||
encode_node_status(&self.node_status, &mut ns_builder)?;
|
||||
encode_node_status(&ns, &mut ns_builder)?;
|
||||
}
|
||||
if let Some(si) = &self.sender_info {
|
||||
let mut si_builder = builder.reborrow().init_sender_info();
|
||||
encode_sender_info(&self.sender_info, &mut si_builder)?;
|
||||
encode_sender_info(&si, &mut si_builder)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -11,7 +11,7 @@ impl RPCOperationSupplyBlockQ {
|
||||
reader: &veilid_capnp::operation_supply_block_q::Reader,
|
||||
) -> Result<RPCOperationSupplyBlockQ, RPCError> {
|
||||
let bi_reader = reader.get_block_id().map_err(RPCError::protocol)?;
|
||||
let block_id = decode_block_id(&bi_reader);
|
||||
let block_id = decode_dht_key(&bi_reader);
|
||||
|
||||
Ok(RPCOperationSupplyBlockQ { block_id })
|
||||
}
|
||||
@ -20,7 +20,7 @@ impl RPCOperationSupplyBlockQ {
|
||||
builder: &mut veilid_capnp::operation_supply_block_q::Builder,
|
||||
) -> Result<(), RPCError> {
|
||||
let mut bi_builder = builder.reborrow().init_block_id();
|
||||
encode_block_id(&self.block_id, &mut bi_builder)?;
|
||||
encode_dht_key(&self.block_id, &mut bi_builder)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -49,7 +49,7 @@ impl RPCOperationSupplyBlockA {
|
||||
.map_err(RPCError::map_internal("too many peers"))?,
|
||||
);
|
||||
for p in peers_reader.iter() {
|
||||
let peer_info = decode_peer_info(&p, true)?;
|
||||
let peer_info = decode_peer_info(&p)?;
|
||||
peers.push(peer_info);
|
||||
}
|
||||
|
||||
|
@ -43,7 +43,7 @@ impl RPCOperationWatchValueA {
|
||||
.map_err(RPCError::map_internal("too many peers"))?,
|
||||
);
|
||||
for p in peers_reader.iter() {
|
||||
let peer_info = decode_peer_info(&p, true)?;
|
||||
let peer_info = decode_peer_info(&p)?;
|
||||
peers.push(peer_info);
|
||||
}
|
||||
|
||||
|
@ -40,6 +40,7 @@ impl RPCQuestion {
|
||||
pub enum RPCQuestionDetail {
|
||||
StatusQ(RPCOperationStatusQ),
|
||||
FindNodeQ(RPCOperationFindNodeQ),
|
||||
AppCallQ(RPCOperationAppCallQ),
|
||||
GetValueQ(RPCOperationGetValueQ),
|
||||
SetValueQ(RPCOperationSetValueQ),
|
||||
WatchValueQ(RPCOperationWatchValueQ),
|
||||
@ -55,6 +56,7 @@ impl RPCQuestionDetail {
|
||||
match self {
|
||||
RPCQuestionDetail::StatusQ(_) => "StatusQ",
|
||||
RPCQuestionDetail::FindNodeQ(_) => "FindNodeQ",
|
||||
RPCQuestionDetail::AppCallQ(_) => "AppCallQ",
|
||||
RPCQuestionDetail::GetValueQ(_) => "GetValueQ",
|
||||
RPCQuestionDetail::SetValueQ(_) => "SetValueQ",
|
||||
RPCQuestionDetail::WatchValueQ(_) => "WatchValueQ",
|
||||
@ -81,6 +83,11 @@ impl RPCQuestionDetail {
|
||||
let out = RPCOperationFindNodeQ::decode(&op_reader)?;
|
||||
RPCQuestionDetail::FindNodeQ(out)
|
||||
}
|
||||
veilid_capnp::question::detail::Which::AppCallQ(r) => {
|
||||
let op_reader = r.map_err(RPCError::protocol)?;
|
||||
let out = RPCOperationAppCallQ::decode(&op_reader)?;
|
||||
RPCQuestionDetail::AppCallQ(out)
|
||||
}
|
||||
veilid_capnp::question::detail::GetValueQ(r) => {
|
||||
let op_reader = r.map_err(RPCError::protocol)?;
|
||||
let out = RPCOperationGetValueQ::decode(&op_reader)?;
|
||||
@ -131,6 +138,7 @@ impl RPCQuestionDetail {
|
||||
match self {
|
||||
RPCQuestionDetail::StatusQ(d) => d.encode(&mut builder.reborrow().init_status_q()),
|
||||
RPCQuestionDetail::FindNodeQ(d) => d.encode(&mut builder.reborrow().init_find_node_q()),
|
||||
RPCQuestionDetail::AppCallQ(d) => d.encode(&mut builder.reborrow().init_app_call_q()),
|
||||
RPCQuestionDetail::GetValueQ(d) => d.encode(&mut builder.reborrow().init_get_value_q()),
|
||||
RPCQuestionDetail::SetValueQ(d) => d.encode(&mut builder.reborrow().init_set_value_q()),
|
||||
RPCQuestionDetail::WatchValueQ(d) => {
|
||||
|
@ -22,10 +22,10 @@ impl RPCStatement {
|
||||
}
|
||||
pub fn decode(
|
||||
reader: &veilid_capnp::statement::Reader,
|
||||
sender_node_id: &DHTKey,
|
||||
opt_sender_node_id: Option<&DHTKey>,
|
||||
) -> Result<RPCStatement, RPCError> {
|
||||
let d_reader = reader.get_detail();
|
||||
let detail = RPCStatementDetail::decode(&d_reader, sender_node_id)?;
|
||||
let detail = RPCStatementDetail::decode(&d_reader, opt_sender_node_id)?;
|
||||
Ok(RPCStatement { detail })
|
||||
}
|
||||
pub fn encode(&self, builder: &mut veilid_capnp::statement::Builder) -> Result<(), RPCError> {
|
||||
@ -42,6 +42,7 @@ pub enum RPCStatementDetail {
|
||||
ValueChanged(RPCOperationValueChanged),
|
||||
Signal(RPCOperationSignal),
|
||||
ReturnReceipt(RPCOperationReturnReceipt),
|
||||
AppMessage(RPCOperationAppMessage),
|
||||
}
|
||||
|
||||
impl RPCStatementDetail {
|
||||
@ -53,11 +54,12 @@ impl RPCStatementDetail {
|
||||
RPCStatementDetail::ValueChanged(_) => "ValueChanged",
|
||||
RPCStatementDetail::Signal(_) => "Signal",
|
||||
RPCStatementDetail::ReturnReceipt(_) => "ReturnReceipt",
|
||||
RPCStatementDetail::AppMessage(_) => "AppMessage",
|
||||
}
|
||||
}
|
||||
pub fn decode(
|
||||
reader: &veilid_capnp::statement::detail::Reader,
|
||||
sender_node_id: &DHTKey,
|
||||
opt_sender_node_id: Option<&DHTKey>,
|
||||
) -> Result<RPCStatementDetail, RPCError> {
|
||||
let which_reader = reader.which().map_err(RPCError::protocol)?;
|
||||
let out = match which_reader {
|
||||
@ -73,7 +75,7 @@ impl RPCStatementDetail {
|
||||
}
|
||||
veilid_capnp::statement::detail::NodeInfoUpdate(r) => {
|
||||
let op_reader = r.map_err(RPCError::protocol)?;
|
||||
let out = RPCOperationNodeInfoUpdate::decode(&op_reader, sender_node_id)?;
|
||||
let out = RPCOperationNodeInfoUpdate::decode(&op_reader, opt_sender_node_id)?;
|
||||
RPCStatementDetail::NodeInfoUpdate(out)
|
||||
}
|
||||
veilid_capnp::statement::detail::ValueChanged(r) => {
|
||||
@ -91,6 +93,11 @@ impl RPCStatementDetail {
|
||||
let out = RPCOperationReturnReceipt::decode(&op_reader)?;
|
||||
RPCStatementDetail::ReturnReceipt(out)
|
||||
}
|
||||
veilid_capnp::statement::detail::AppMessage(r) => {
|
||||
let op_reader = r.map_err(RPCError::protocol)?;
|
||||
let out = RPCOperationAppMessage::decode(&op_reader)?;
|
||||
RPCStatementDetail::AppMessage(out)
|
||||
}
|
||||
};
|
||||
Ok(out)
|
||||
}
|
||||
@ -113,6 +120,9 @@ impl RPCStatementDetail {
|
||||
RPCStatementDetail::ReturnReceipt(d) => {
|
||||
d.encode(&mut builder.reborrow().init_return_receipt())
|
||||
}
|
||||
RPCStatementDetail::AppMessage(d) => {
|
||||
d.encode(&mut builder.reborrow().init_app_message())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -7,17 +7,14 @@ pub fn encode_peer_info(
|
||||
) -> Result<(), RPCError> {
|
||||
//
|
||||
let mut nid_builder = builder.reborrow().init_node_id();
|
||||
encode_public_key(&peer_info.node_id.key, &mut nid_builder)?;
|
||||
encode_dht_key(&peer_info.node_id.key, &mut nid_builder)?;
|
||||
let mut sni_builder = builder.reborrow().init_signed_node_info();
|
||||
encode_signed_node_info(&peer_info.signed_node_info, &mut sni_builder)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn decode_peer_info(
|
||||
reader: &veilid_capnp::peer_info::Reader,
|
||||
allow_relay_peer_info: bool,
|
||||
) -> Result<PeerInfo, RPCError> {
|
||||
pub fn decode_peer_info(reader: &veilid_capnp::peer_info::Reader) -> Result<PeerInfo, RPCError> {
|
||||
let nid_reader = reader
|
||||
.reborrow()
|
||||
.get_node_id()
|
||||
@ -26,9 +23,8 @@ pub fn decode_peer_info(
|
||||
.reborrow()
|
||||
.get_signed_node_info()
|
||||
.map_err(RPCError::protocol)?;
|
||||
let node_id = NodeId::new(decode_public_key(&nid_reader));
|
||||
let signed_node_info =
|
||||
decode_signed_node_info(&sni_reader, &node_id.key, allow_relay_peer_info)?;
|
||||
let node_id = NodeId::new(decode_dht_key(&nid_reader));
|
||||
let signed_node_info = decode_signed_node_info(&sni_reader, &node_id.key)?;
|
||||
|
||||
Ok(PeerInfo {
|
||||
node_id,
|
||||
|
@ -2,80 +2,6 @@ use super::*;
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct RouteHopData {
|
||||
pub nonce: Nonce,
|
||||
pub blob: Vec<u8>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct RouteHop {
|
||||
pub dial_info: NodeDialInfo,
|
||||
pub next_hop: Option<RouteHopData>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct PrivateRoute {
|
||||
pub public_key: DHTKey,
|
||||
pub hop_count: u8,
|
||||
pub hops: Option<RouteHop>,
|
||||
}
|
||||
|
||||
impl PrivateRoute {
|
||||
pub fn new_stub(public_key: DHTKey) -> Self {
|
||||
Self {
|
||||
public_key,
|
||||
hop_count: 0,
|
||||
hops: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for PrivateRoute {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"PR({:?}+{}{})",
|
||||
self.public_key,
|
||||
self.hop_count,
|
||||
if let Some(hops) = &self.hops {
|
||||
format!("->{}", hops.dial_info)
|
||||
} else {
|
||||
"".to_owned()
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum SafetyRouteHops {
|
||||
Data(RouteHopData),
|
||||
Private(PrivateRoute),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SafetyRoute {
|
||||
pub public_key: DHTKey,
|
||||
pub hop_count: u8,
|
||||
pub hops: SafetyRouteHops,
|
||||
}
|
||||
|
||||
impl fmt::Display for SafetyRoute {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"SR({:?}+{}{})",
|
||||
self.public_key,
|
||||
self.hop_count,
|
||||
match &self.hops {
|
||||
SafetyRouteHops::Data(_) => "".to_owned(),
|
||||
SafetyRouteHops::Private(p) => format!("->{}", p),
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
pub fn encode_route_hop_data(
|
||||
route_hop_data: &RouteHopData,
|
||||
builder: &mut veilid_capnp::route_hop_data::Builder,
|
||||
@ -98,62 +24,6 @@ pub fn encode_route_hop_data(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn encode_route_hop(
|
||||
route_hop: &RouteHop,
|
||||
builder: &mut veilid_capnp::route_hop::Builder,
|
||||
) -> Result<(), RPCError> {
|
||||
encode_node_dial_info(
|
||||
&route_hop.dial_info,
|
||||
&mut builder.reborrow().init_dial_info(),
|
||||
)?;
|
||||
if let Some(rhd) = &route_hop.next_hop {
|
||||
let mut rhd_builder = builder.reborrow().init_next_hop();
|
||||
encode_route_hop_data(rhd, &mut rhd_builder)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn encode_private_route(
|
||||
private_route: &PrivateRoute,
|
||||
builder: &mut veilid_capnp::private_route::Builder,
|
||||
) -> Result<(), RPCError> {
|
||||
encode_public_key(
|
||||
&private_route.public_key,
|
||||
&mut builder.reborrow().init_public_key(),
|
||||
)?;
|
||||
builder.set_hop_count(private_route.hop_count);
|
||||
if let Some(rh) = &private_route.hops {
|
||||
let mut rh_builder = builder.reborrow().init_first_hop();
|
||||
encode_route_hop(rh, &mut rh_builder)?;
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn encode_safety_route(
|
||||
safety_route: &SafetyRoute,
|
||||
builder: &mut veilid_capnp::safety_route::Builder,
|
||||
) -> Result<(), RPCError> {
|
||||
encode_public_key(
|
||||
&safety_route.public_key,
|
||||
&mut builder.reborrow().init_public_key(),
|
||||
)?;
|
||||
builder.set_hop_count(safety_route.hop_count);
|
||||
let h_builder = builder.reborrow().init_hops();
|
||||
match &safety_route.hops {
|
||||
SafetyRouteHops::Data(rhd) => {
|
||||
let mut rhd_builder = h_builder.init_data();
|
||||
encode_route_hop_data(rhd, &mut rhd_builder)?;
|
||||
}
|
||||
SafetyRouteHops::Private(pr) => {
|
||||
let mut pr_builder = h_builder.init_private();
|
||||
encode_private_route(pr, &mut pr_builder)?;
|
||||
}
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn decode_route_hop_data(
|
||||
reader: &veilid_capnp::route_hop_data::Reader,
|
||||
) -> Result<RouteHopData, RPCError> {
|
||||
@ -173,13 +43,45 @@ pub fn decode_route_hop_data(
|
||||
Ok(RouteHopData { nonce, blob })
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
pub fn encode_route_hop(
|
||||
route_hop: &RouteHop,
|
||||
builder: &mut veilid_capnp::route_hop::Builder,
|
||||
) -> Result<(), RPCError> {
|
||||
let node_builder = builder.reborrow().init_node();
|
||||
match &route_hop.node {
|
||||
RouteNode::NodeId(ni) => {
|
||||
let mut ni_builder = node_builder.init_node_id();
|
||||
encode_dht_key(&ni.key, &mut ni_builder)?;
|
||||
}
|
||||
RouteNode::PeerInfo(pi) => {
|
||||
let mut pi_builder = node_builder.init_peer_info();
|
||||
encode_peer_info(&pi, &mut pi_builder)?;
|
||||
}
|
||||
}
|
||||
if let Some(rhd) = &route_hop.next_hop {
|
||||
let mut rhd_builder = builder.reborrow().init_next_hop();
|
||||
encode_route_hop_data(rhd, &mut rhd_builder)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn decode_route_hop(reader: &veilid_capnp::route_hop::Reader) -> Result<RouteHop, RPCError> {
|
||||
let dial_info = decode_node_dial_info(
|
||||
&reader
|
||||
.reborrow()
|
||||
.get_dial_info()
|
||||
.map_err(RPCError::map_protocol("invalid dial info in route hop"))?,
|
||||
)?;
|
||||
let n_reader = reader.reborrow().get_node();
|
||||
let node = match n_reader.which().map_err(RPCError::protocol)? {
|
||||
veilid_capnp::route_hop::node::Which::NodeId(ni) => {
|
||||
let ni_reader = ni.map_err(RPCError::protocol)?;
|
||||
RouteNode::NodeId(NodeId::new(decode_dht_key(&ni_reader)))
|
||||
}
|
||||
veilid_capnp::route_hop::node::Which::PeerInfo(pi) => {
|
||||
let pi_reader = pi.map_err(RPCError::protocol)?;
|
||||
RouteNode::PeerInfo(
|
||||
decode_peer_info(&pi_reader)
|
||||
.map_err(RPCError::map_protocol("invalid peer info in route hop"))?,
|
||||
)
|
||||
}
|
||||
};
|
||||
|
||||
let next_hop = if reader.has_next_hop() {
|
||||
let rhd_reader = reader
|
||||
@ -190,26 +92,55 @@ pub fn decode_route_hop(reader: &veilid_capnp::route_hop::Reader) -> Result<Rout
|
||||
None
|
||||
};
|
||||
|
||||
Ok(RouteHop {
|
||||
dial_info,
|
||||
next_hop,
|
||||
})
|
||||
Ok(RouteHop { node, next_hop })
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
pub fn encode_private_route(
|
||||
private_route: &PrivateRoute,
|
||||
builder: &mut veilid_capnp::private_route::Builder,
|
||||
) -> Result<(), RPCError> {
|
||||
encode_dht_key(
|
||||
&private_route.public_key,
|
||||
&mut builder.reborrow().init_public_key(),
|
||||
)?;
|
||||
builder.set_hop_count(private_route.hop_count);
|
||||
let mut h_builder = builder.reborrow().init_hops();
|
||||
match &private_route.hops {
|
||||
PrivateRouteHops::FirstHop(first_hop) => {
|
||||
let mut rh_builder = h_builder.init_first_hop();
|
||||
encode_route_hop(first_hop, &mut rh_builder)?;
|
||||
}
|
||||
PrivateRouteHops::Data(data) => {
|
||||
let mut rhd_builder = h_builder.init_data();
|
||||
encode_route_hop_data(data, &mut rhd_builder)?;
|
||||
}
|
||||
PrivateRouteHops::Empty => {
|
||||
h_builder.set_empty(());
|
||||
}
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn decode_private_route(
|
||||
reader: &veilid_capnp::private_route::Reader,
|
||||
) -> Result<PrivateRoute, RPCError> {
|
||||
let public_key = decode_public_key(&reader.get_public_key().map_err(
|
||||
RPCError::map_protocol("invalid public key in private route"),
|
||||
)?);
|
||||
let public_key = decode_dht_key(&reader.get_public_key().map_err(RPCError::map_protocol(
|
||||
"invalid public key in private route",
|
||||
))?);
|
||||
let hop_count = reader.get_hop_count();
|
||||
let hops = if reader.has_first_hop() {
|
||||
let rh_reader = reader
|
||||
.get_first_hop()
|
||||
.map_err(RPCError::map_protocol("invalid first hop in private route"))?;
|
||||
Some(decode_route_hop(&rh_reader)?)
|
||||
} else {
|
||||
None
|
||||
|
||||
let hops = match reader.get_hops().which().map_err(RPCError::protocol)? {
|
||||
veilid_capnp::private_route::hops::Which::FirstHop(rh_reader) => {
|
||||
let rh_reader = rh_reader.map_err(RPCError::protocol)?;
|
||||
PrivateRouteHops::FirstHop(decode_route_hop(&rh_reader)?)
|
||||
}
|
||||
veilid_capnp::private_route::hops::Which::Data(rhd_reader) => {
|
||||
let rhd_reader = rhd_reader.map_err(RPCError::protocol)?;
|
||||
PrivateRouteHops::Data(decode_route_hop_data(&rhd_reader)?)
|
||||
}
|
||||
veilid_capnp::private_route::hops::Which::Empty(_) => PrivateRouteHops::Empty,
|
||||
};
|
||||
|
||||
Ok(PrivateRoute {
|
||||
@ -219,10 +150,36 @@ pub fn decode_private_route(
|
||||
})
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
pub fn encode_safety_route(
|
||||
safety_route: &SafetyRoute,
|
||||
builder: &mut veilid_capnp::safety_route::Builder,
|
||||
) -> Result<(), RPCError> {
|
||||
encode_dht_key(
|
||||
&safety_route.public_key,
|
||||
&mut builder.reborrow().init_public_key(),
|
||||
)?;
|
||||
builder.set_hop_count(safety_route.hop_count);
|
||||
let h_builder = builder.reborrow().init_hops();
|
||||
match &safety_route.hops {
|
||||
SafetyRouteHops::Data(rhd) => {
|
||||
let mut rhd_builder = h_builder.init_data();
|
||||
encode_route_hop_data(rhd, &mut rhd_builder)?;
|
||||
}
|
||||
SafetyRouteHops::Private(pr) => {
|
||||
let mut pr_builder = h_builder.init_private();
|
||||
encode_private_route(pr, &mut pr_builder)?;
|
||||
}
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn decode_safety_route(
|
||||
reader: &veilid_capnp::safety_route::Reader,
|
||||
) -> Result<SafetyRoute, RPCError> {
|
||||
let public_key = decode_public_key(
|
||||
let public_key = decode_dht_key(
|
||||
&reader
|
||||
.get_public_key()
|
||||
.map_err(RPCError::map_protocol("invalid public key in safety route"))?,
|
||||
|
@ -5,30 +5,21 @@ pub fn encode_sender_info(
|
||||
sender_info: &SenderInfo,
|
||||
builder: &mut veilid_capnp::sender_info::Builder,
|
||||
) -> Result<(), RPCError> {
|
||||
if let Some(socket_address) = &sender_info.socket_address {
|
||||
let mut sab = builder.reborrow().init_socket_address();
|
||||
encode_socket_address(socket_address, &mut sab)?;
|
||||
}
|
||||
encode_socket_address(&sender_info.socket_address, &mut sab)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn decode_sender_info(
|
||||
reader: &veilid_capnp::sender_info::Reader,
|
||||
) -> Result<SenderInfo, RPCError> {
|
||||
if !reader.has_socket_address() {
|
||||
return Err(RPCError::internal("invalid socket address type"));
|
||||
}
|
||||
let socket_address = if reader.has_socket_address() {
|
||||
Some(decode_socket_address(
|
||||
&reader
|
||||
let sa_reader = reader
|
||||
.reborrow()
|
||||
.get_socket_address()
|
||||
.map_err(RPCError::map_internal(
|
||||
"invalid socket address in sender_info",
|
||||
))?,
|
||||
)?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
))?;
|
||||
let socket_address = decode_socket_address(&sa_reader)?;
|
||||
|
||||
Ok(SenderInfo { socket_address })
|
||||
}
|
||||
|
@ -53,7 +53,7 @@ pub fn decode_signal_info(
|
||||
let pi_reader = r.get_peer_info().map_err(RPCError::map_protocol(
|
||||
"invalid peer info in hole punch signal info",
|
||||
))?;
|
||||
let peer_info = decode_peer_info(&pi_reader, true)?;
|
||||
let peer_info = decode_peer_info(&pi_reader)?;
|
||||
|
||||
SignalInfo::HolePunch { receipt, peer_info }
|
||||
}
|
||||
@ -69,7 +69,7 @@ pub fn decode_signal_info(
|
||||
let pi_reader = r.get_peer_info().map_err(RPCError::map_protocol(
|
||||
"invalid peer info in reverse connect signal info",
|
||||
))?;
|
||||
let peer_info = decode_peer_info(&pi_reader, true)?;
|
||||
let peer_info = decode_peer_info(&pi_reader)?;
|
||||
|
||||
SignalInfo::ReverseConnect { receipt, peer_info }
|
||||
}
|
||||
|
@ -0,0 +1,46 @@
|
||||
use crate::*;
|
||||
use rpc_processor::*;
|
||||
|
||||
pub fn encode_signed_direct_node_info(
|
||||
signed_direct_node_info: &SignedDirectNodeInfo,
|
||||
builder: &mut veilid_capnp::signed_direct_node_info::Builder,
|
||||
) -> Result<(), RPCError> {
|
||||
//
|
||||
let mut ni_builder = builder.reborrow().init_node_info();
|
||||
encode_node_info(&signed_direct_node_info.node_info, &mut ni_builder)?;
|
||||
|
||||
builder
|
||||
.reborrow()
|
||||
.set_timestamp(signed_direct_node_info.timestamp);
|
||||
|
||||
let mut sig_builder = builder.reborrow().init_signature();
|
||||
let Some(signature) = &signed_direct_node_info.signature else {
|
||||
return Err(RPCError::internal("Should not encode SignedDirectNodeInfo without signature!"));
|
||||
};
|
||||
encode_signature(signature, &mut sig_builder);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn decode_signed_direct_node_info(
|
||||
reader: &veilid_capnp::signed_direct_node_info::Reader,
|
||||
node_id: &DHTKey,
|
||||
) -> Result<SignedDirectNodeInfo, RPCError> {
|
||||
let ni_reader = reader
|
||||
.reborrow()
|
||||
.get_node_info()
|
||||
.map_err(RPCError::protocol)?;
|
||||
let node_info = decode_node_info(&ni_reader)?;
|
||||
|
||||
let sig_reader = reader
|
||||
.reborrow()
|
||||
.get_signature()
|
||||
.map_err(RPCError::protocol)?;
|
||||
|
||||
let timestamp = reader.reborrow().get_timestamp();
|
||||
|
||||
let signature = decode_signature(&sig_reader);
|
||||
|
||||
SignedDirectNodeInfo::new(NodeId::new(*node_id), node_info, timestamp, signature)
|
||||
.map_err(RPCError::protocol)
|
||||
}
|
@ -5,14 +5,16 @@ pub fn encode_signed_node_info(
|
||||
signed_node_info: &SignedNodeInfo,
|
||||
builder: &mut veilid_capnp::signed_node_info::Builder,
|
||||
) -> Result<(), RPCError> {
|
||||
//
|
||||
let mut ni_builder = builder.reborrow().init_node_info();
|
||||
encode_node_info(&signed_node_info.node_info, &mut ni_builder)?;
|
||||
|
||||
let mut sig_builder = builder.reborrow().init_signature();
|
||||
encode_signature(&signed_node_info.signature, &mut sig_builder);
|
||||
|
||||
builder.reborrow().set_timestamp(signed_node_info.timestamp);
|
||||
match signed_node_info {
|
||||
SignedNodeInfo::Direct(d) => {
|
||||
let mut d_builder = builder.reborrow().init_direct();
|
||||
encode_signed_direct_node_info(d, &mut d_builder)?;
|
||||
}
|
||||
SignedNodeInfo::Relayed(r) => {
|
||||
let mut r_builder = builder.reborrow().init_relayed();
|
||||
encode_signed_relayed_node_info(r, &mut r_builder)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -20,22 +22,20 @@ pub fn encode_signed_node_info(
|
||||
pub fn decode_signed_node_info(
|
||||
reader: &veilid_capnp::signed_node_info::Reader,
|
||||
node_id: &DHTKey,
|
||||
allow_relay_peer_info: bool,
|
||||
) -> Result<SignedNodeInfo, RPCError> {
|
||||
let ni_reader = reader
|
||||
.reborrow()
|
||||
.get_node_info()
|
||||
.map_err(RPCError::protocol)?;
|
||||
let node_info = decode_node_info(&ni_reader, allow_relay_peer_info)?;
|
||||
|
||||
let sig_reader = reader
|
||||
.reborrow()
|
||||
.get_signature()
|
||||
.map_err(RPCError::protocol)?;
|
||||
let signature = decode_signature(&sig_reader);
|
||||
|
||||
let timestamp = reader.reborrow().get_timestamp();
|
||||
|
||||
SignedNodeInfo::new(node_info, NodeId::new(*node_id), signature, timestamp)
|
||||
.map_err(RPCError::protocol)
|
||||
match reader
|
||||
.which()
|
||||
.map_err(RPCError::map_internal("invalid signal operation"))?
|
||||
{
|
||||
veilid_capnp::signed_node_info::Direct(d) => {
|
||||
let d_reader = d.map_err(RPCError::protocol)?;
|
||||
let sdni = decode_signed_direct_node_info(&d_reader, node_id)?;
|
||||
Ok(SignedNodeInfo::Direct(sdni))
|
||||
}
|
||||
veilid_capnp::signed_node_info::Relayed(r) => {
|
||||
let r_reader = r.map_err(RPCError::protocol)?;
|
||||
let srni = decode_signed_relayed_node_info(&r_reader, node_id)?;
|
||||
Ok(SignedNodeInfo::Relayed(srni))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user