mirror of
https://gitlab.com/veilid/veilid.git
synced 2024-10-01 01:26:08 -04:00
Merge branch 'dev' into 'main'
Latest from dev: Private Routing See merge request veilid/veilid!12
This commit is contained in:
commit
2f3485e9b7
@ -1,3 +1,6 @@
|
|||||||
|
[build]
|
||||||
|
rustflags = ["--cfg", "tokio_unstable"]
|
||||||
|
|
||||||
[target.aarch64-unknown-linux-gnu]
|
[target.aarch64-unknown-linux-gnu]
|
||||||
linker = "aarch64-linux-gnu-gcc"
|
linker = "aarch64-linux-gnu-gcc"
|
||||||
|
|
||||||
|
1544
Cargo.lock
generated
1544
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
40
Earthfile
40
Earthfile
@ -7,7 +7,7 @@ FROM --platform amd64 ubuntu:16.04
|
|||||||
# Install build prerequisites
|
# Install build prerequisites
|
||||||
deps-base:
|
deps-base:
|
||||||
RUN apt-get -y update
|
RUN apt-get -y update
|
||||||
RUN apt-get install -y iproute2 curl build-essential cmake libssl-dev openssl file git pkg-config libdbus-1-dev libdbus-glib-1-dev libgirepository1.0-dev libcairo2-dev
|
RUN apt-get install -y iproute2 curl build-essential cmake libssl-dev openssl file git pkg-config libdbus-1-dev libdbus-glib-1-dev libgirepository1.0-dev libcairo2-dev checkinstall unzip
|
||||||
|
|
||||||
# Install Cap'n Proto
|
# Install Cap'n Proto
|
||||||
deps-capnp:
|
deps-capnp:
|
||||||
@ -15,9 +15,15 @@ deps-capnp:
|
|||||||
COPY scripts/earthly/install_capnproto.sh /
|
COPY scripts/earthly/install_capnproto.sh /
|
||||||
RUN /bin/bash /install_capnproto.sh; rm /install_capnproto.sh
|
RUN /bin/bash /install_capnproto.sh; rm /install_capnproto.sh
|
||||||
|
|
||||||
|
# Install protoc
|
||||||
|
deps-protoc:
|
||||||
|
FROM +deps-capnp
|
||||||
|
COPY scripts/earthly/install_protoc.sh /
|
||||||
|
RUN /bin/bash /install_protoc.sh; rm /install_protoc.sh
|
||||||
|
|
||||||
# Install Rust
|
# Install Rust
|
||||||
deps-rust:
|
deps-rust:
|
||||||
FROM +deps-capnp
|
FROM +deps-protoc
|
||||||
ENV RUSTUP_HOME=/usr/local/rustup
|
ENV RUSTUP_HOME=/usr/local/rustup
|
||||||
ENV CARGO_HOME=/usr/local/cargo
|
ENV CARGO_HOME=/usr/local/cargo
|
||||||
ENV PATH=/usr/local/cargo/bin:$PATH
|
ENV PATH=/usr/local/cargo/bin:$PATH
|
||||||
@ -49,35 +55,43 @@ deps-android:
|
|||||||
RUN curl -o /Android/cmdline-tools.zip https://dl.google.com/android/repository/commandlinetools-linux-7583922_latest.zip
|
RUN curl -o /Android/cmdline-tools.zip https://dl.google.com/android/repository/commandlinetools-linux-7583922_latest.zip
|
||||||
RUN cd /Android; unzip /Android/cmdline-tools.zip
|
RUN cd /Android; unzip /Android/cmdline-tools.zip
|
||||||
RUN yes | /Android/cmdline-tools/bin/sdkmanager --sdk_root=/Android/Sdk build-tools\;30.0.3 ndk\;22.0.7026061 cmake\;3.18.1 platform-tools platforms\;android-30
|
RUN yes | /Android/cmdline-tools/bin/sdkmanager --sdk_root=/Android/Sdk build-tools\;30.0.3 ndk\;22.0.7026061 cmake\;3.18.1 platform-tools platforms\;android-30
|
||||||
|
RUN apt-get clean
|
||||||
|
|
||||||
# Clean up the apt cache to save space
|
# Just linux build not android
|
||||||
deps:
|
deps-linux:
|
||||||
FROM +deps-android
|
FROM +deps-cross
|
||||||
RUN apt-get clean
|
RUN apt-get clean
|
||||||
|
|
||||||
code:
|
# Code + Linux deps
|
||||||
FROM +deps
|
code-linux:
|
||||||
|
FROM +deps-linux
|
||||||
|
COPY --dir .cargo external files scripts veilid-cli veilid-core veilid-server veilid-flutter veilid-wasm Cargo.lock Cargo.toml /veilid
|
||||||
|
WORKDIR /veilid
|
||||||
|
|
||||||
|
# Code + Linux + Android deps
|
||||||
|
code-android:
|
||||||
|
FROM +deps-android
|
||||||
COPY --dir .cargo external files scripts veilid-cli veilid-core veilid-server veilid-flutter veilid-wasm Cargo.lock Cargo.toml /veilid
|
COPY --dir .cargo external files scripts veilid-cli veilid-core veilid-server veilid-flutter veilid-wasm Cargo.lock Cargo.toml /veilid
|
||||||
WORKDIR /veilid
|
WORKDIR /veilid
|
||||||
|
|
||||||
# Clippy only
|
# Clippy only
|
||||||
clippy:
|
clippy:
|
||||||
FROM +code
|
FROM +code-linux
|
||||||
RUN cargo clippy
|
RUN cargo clippy
|
||||||
|
|
||||||
# Build
|
# Build
|
||||||
build-linux-amd64:
|
build-linux-amd64:
|
||||||
FROM +code
|
FROM +code-linux
|
||||||
RUN cargo build --target x86_64-unknown-linux-gnu --release
|
RUN cargo build --target x86_64-unknown-linux-gnu --release
|
||||||
SAVE ARTIFACT ./target/x86_64-unknown-linux-gnu AS LOCAL ./target/artifacts/x86_64-unknown-linux-gnu
|
SAVE ARTIFACT ./target/x86_64-unknown-linux-gnu AS LOCAL ./target/artifacts/x86_64-unknown-linux-gnu
|
||||||
|
|
||||||
build-linux-arm64:
|
build-linux-arm64:
|
||||||
FROM +code
|
FROM +code-linux
|
||||||
RUN cargo build --target aarch64-unknown-linux-gnu --release
|
RUN cargo build --target aarch64-unknown-linux-gnu --release
|
||||||
SAVE ARTIFACT ./target/aarch64-unknown-linux-gnu AS LOCAL ./target/artifacts/aarch64-unknown-linux-gnu
|
SAVE ARTIFACT ./target/aarch64-unknown-linux-gnu AS LOCAL ./target/artifacts/aarch64-unknown-linux-gnu
|
||||||
|
|
||||||
build-android:
|
build-android:
|
||||||
FROM +code
|
FROM +code-android
|
||||||
WORKDIR /veilid/veilid-core
|
WORKDIR /veilid/veilid-core
|
||||||
ENV PATH=$PATH:/Android/Sdk/ndk/22.0.7026061/toolchains/llvm/prebuilt/linux-x86_64/bin/
|
ENV PATH=$PATH:/Android/Sdk/ndk/22.0.7026061/toolchains/llvm/prebuilt/linux-x86_64/bin/
|
||||||
RUN cargo build --target aarch64-linux-android --release
|
RUN cargo build --target aarch64-linux-android --release
|
||||||
@ -92,11 +106,11 @@ build-android:
|
|||||||
|
|
||||||
# Unit tests
|
# Unit tests
|
||||||
unit-tests-linux-amd64:
|
unit-tests-linux-amd64:
|
||||||
FROM +code
|
FROM +code-linux
|
||||||
RUN cargo test --target x86_64-unknown-linux-gnu --release
|
RUN cargo test --target x86_64-unknown-linux-gnu --release
|
||||||
|
|
||||||
unit-tests-linux-arm64:
|
unit-tests-linux-arm64:
|
||||||
FROM +code
|
FROM +code-linux
|
||||||
RUN cargo test --target aarch64-unknown-linux-gnu --release
|
RUN cargo test --target aarch64-unknown-linux-gnu --release
|
||||||
|
|
||||||
# Package
|
# Package
|
||||||
|
@ -63,7 +63,9 @@ core:
|
|||||||
max_timestamp_behind_ms: 10000
|
max_timestamp_behind_ms: 10000
|
||||||
max_timestamp_ahead_ms: 10000
|
max_timestamp_ahead_ms: 10000
|
||||||
timeout_ms: 10000
|
timeout_ms: 10000
|
||||||
max_route_hop_count: 7
|
max_route_hop_count: 4
|
||||||
|
default_route_hop_count: 1
|
||||||
|
|
||||||
dht:
|
dht:
|
||||||
resolve_node_timeout:
|
resolve_node_timeout:
|
||||||
resolve_node_count: 20
|
resolve_node_count: 20
|
||||||
|
@ -228,7 +228,8 @@ rpc:
|
|||||||
max_timestamp_behind_ms: 10000
|
max_timestamp_behind_ms: 10000
|
||||||
max_timestamp_ahead_ms: 10000
|
max_timestamp_ahead_ms: 10000
|
||||||
timeout_ms: 10000
|
timeout_ms: 10000
|
||||||
max_route_hop_count: 7
|
max_route_hop_count: 4
|
||||||
|
default_route_hop_count: 1
|
||||||
```
|
```
|
||||||
|
|
||||||
#### core:network:dht
|
#### core:network:dht
|
||||||
|
2
external/cursive
vendored
2
external/cursive
vendored
@ -1 +1 @@
|
|||||||
Subproject commit fea04c2f9bb8c4c9551ca6eb4f2cb1268551120f
|
Subproject commit f1504cf37a7021454020cda5cfba815755399794
|
2
external/cursive-flexi-logger-view
vendored
2
external/cursive-flexi-logger-view
vendored
@ -1 +1 @@
|
|||||||
Subproject commit fd560c499be0f34305e0d48aca7f1bc3d015a17f
|
Subproject commit effa60cea24e99f294865ed325ffc57612d72785
|
2
external/hashlink
vendored
2
external/hashlink
vendored
@ -1 +1 @@
|
|||||||
Subproject commit c8da3a58485c850f4029a58de99b1af83112ba8a
|
Subproject commit a089b448071ef36633947693b90023c67dc8485f
|
@ -1,11 +1,28 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
mkdir /tmp/capnproto-install
|
mkdir /tmp/capnproto-install
|
||||||
cd /tmp/capnproto-install
|
pushd /tmp/capnproto-install
|
||||||
curl -O https://capnproto.org/capnproto-c++-0.9.1.tar.gz
|
curl -O https://capnproto.org/capnproto-c++-0.10.2.tar.gz
|
||||||
tar zxf capnproto-c++-0.9.1.tar.gz
|
tar zxf capnproto-c++-0.10.2.tar.gz
|
||||||
cd capnproto-c++-0.9.1
|
cd capnproto-c++-0.10.2
|
||||||
./configure
|
./configure --without-openssl
|
||||||
make -j6 check
|
make -j6 check
|
||||||
make install
|
if [ "$EUID" -ne 0 ]; then
|
||||||
cd /
|
if command -v checkinstall &> /dev/null; then
|
||||||
rm -rf /tmp/capnproto-install
|
sudo checkinstall -y
|
||||||
|
cp *.deb ~
|
||||||
|
else
|
||||||
|
sudo make install
|
||||||
|
|
||||||
|
fi
|
||||||
|
popd
|
||||||
|
sudo rm -rf /tmp/capnproto-install
|
||||||
|
else
|
||||||
|
if command -v checkinstall &> /dev/null; then
|
||||||
|
checkinstall -y
|
||||||
|
cp *.deb ~
|
||||||
|
else
|
||||||
|
make install
|
||||||
|
fi
|
||||||
|
popd
|
||||||
|
rm -rf /tmp/capnproto-install
|
||||||
|
fi
|
||||||
|
26
scripts/earthly/install_protoc.sh
Executable file
26
scripts/earthly/install_protoc.sh
Executable file
@ -0,0 +1,26 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
VERSION=21.9
|
||||||
|
|
||||||
|
mkdir /tmp/protoc-install
|
||||||
|
pushd /tmp/protoc-install
|
||||||
|
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v$VERSION/protoc-$VERSION-linux-x86_64.zip
|
||||||
|
unzip protoc-$VERSION-linux-x86_64.zip
|
||||||
|
if [ "$EUID" -ne 0 ]; then
|
||||||
|
if command -v checkinstall &> /dev/null; then
|
||||||
|
sudo checkinstall --pkgversion=$VERSION -y cp -r bin include /usr/local/
|
||||||
|
cp *.deb ~
|
||||||
|
else
|
||||||
|
sudo make install
|
||||||
|
fi
|
||||||
|
popd
|
||||||
|
sudo rm -rf /tmp/protoc-install
|
||||||
|
else
|
||||||
|
if command -v checkinstall &> /dev/null; then
|
||||||
|
checkinstall --pkgversion=$VERSION -y cp -r bin include /usr/local/
|
||||||
|
cp *.deb ~
|
||||||
|
else
|
||||||
|
make install
|
||||||
|
fi
|
||||||
|
popd
|
||||||
|
rm -rf /tmp/protoc-install
|
||||||
|
fi
|
@ -74,7 +74,12 @@ fi
|
|||||||
rustup target add aarch64-linux-android armv7-linux-androideabi i686-linux-android x86_64-linux-android wasm32-unknown-unknown
|
rustup target add aarch64-linux-android armv7-linux-androideabi i686-linux-android x86_64-linux-android wasm32-unknown-unknown
|
||||||
|
|
||||||
# install cargo packages
|
# install cargo packages
|
||||||
cargo install wasm-bindgen-cli
|
cargo install wasm-bindgen-cli wasm-pack
|
||||||
|
|
||||||
# Ensure packages are installed
|
# Ensure packages are installed
|
||||||
sudo apt-get install libc6-dev-i386 libc6:i386 libncurses5:i386 libstdc++6:i386 lib32z1 libbz2-1.0:i386 openjdk-11-jdk llvm wabt capnproto
|
sudo apt-get install libc6-dev-i386 libc6:i386 libncurses5:i386 libstdc++6:i386 lib32z1 libbz2-1.0:i386 openjdk-11-jdk llvm wabt checkinstall
|
||||||
|
|
||||||
|
# Install capnproto using the same mechanism as our earthly build
|
||||||
|
$SCRIPTDIR/scripts/earthly/install_capnproto.sh
|
||||||
|
# Install protoc using the same mechanism as our earthly build
|
||||||
|
$SCRIPTDIR/scripts/earthly/install_protoc.sh
|
||||||
|
@ -90,13 +90,7 @@ fi
|
|||||||
rustup target add aarch64-apple-darwin aarch64-apple-ios x86_64-apple-darwin x86_64-apple-ios wasm32-unknown-unknown aarch64-linux-android armv7-linux-androideabi i686-linux-android x86_64-linux-android
|
rustup target add aarch64-apple-darwin aarch64-apple-ios x86_64-apple-darwin x86_64-apple-ios wasm32-unknown-unknown aarch64-linux-android armv7-linux-androideabi i686-linux-android x86_64-linux-android
|
||||||
|
|
||||||
# install cargo packages
|
# install cargo packages
|
||||||
cargo install wasm-bindgen-cli
|
cargo install wasm-bindgen-cli wasm-pack
|
||||||
|
|
||||||
# install bitcode compatible ios toolchain
|
|
||||||
# echo Manual Step:
|
|
||||||
# echo install +ios-arm64-1.57.0 toolchain for bitcode from https://github.com/getditto/rust-bitcode/releases/latest and unzip
|
|
||||||
# echo xattr -d -r com.apple.quarantine .
|
|
||||||
# echo ./install.sh
|
|
||||||
|
|
||||||
# ensure we have command line tools
|
# ensure we have command line tools
|
||||||
xcode-select --install
|
xcode-select --install
|
||||||
@ -114,5 +108,5 @@ if [ "$BREW_USER" == "" ]; then
|
|||||||
BREW_USER=`whoami`
|
BREW_USER=`whoami`
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
sudo -H -u $BREW_USER brew install capnp cmake wabt llvm
|
sudo -H -u $BREW_USER brew install capnp cmake wabt llvm protobuf
|
||||||
|
|
||||||
|
@ -42,7 +42,9 @@ bugsalot = "^0"
|
|||||||
flexi_logger = { version = "^0", features = ["use_chrono_for_offset"] }
|
flexi_logger = { version = "^0", features = ["use_chrono_for_offset"] }
|
||||||
thiserror = "^1"
|
thiserror = "^1"
|
||||||
crossbeam-channel = "^0"
|
crossbeam-channel = "^0"
|
||||||
veilid-core = { path = "../veilid-core", default_features = false}
|
hex = "^0"
|
||||||
|
veilid-core = { path = "../veilid-core", default_features = false }
|
||||||
|
json = "^0"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
serial_test = "^0"
|
serial_test = "^0"
|
||||||
|
@ -3,6 +3,7 @@ use crate::tools::*;
|
|||||||
use crate::veilid_client_capnp::*;
|
use crate::veilid_client_capnp::*;
|
||||||
use capnp::capability::Promise;
|
use capnp::capability::Promise;
|
||||||
use capnp_rpc::{pry, rpc_twoparty_capnp, twoparty, Disconnector, RpcSystem};
|
use capnp_rpc::{pry, rpc_twoparty_capnp, twoparty, Disconnector, RpcSystem};
|
||||||
|
use futures::future::FutureExt;
|
||||||
use serde::de::DeserializeOwned;
|
use serde::de::DeserializeOwned;
|
||||||
use std::cell::RefCell;
|
use std::cell::RefCell;
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
@ -76,12 +77,21 @@ impl veilid_client::Server for VeilidClientImpl {
|
|||||||
VeilidUpdate::Log(log) => {
|
VeilidUpdate::Log(log) => {
|
||||||
self.comproc.update_log(log);
|
self.comproc.update_log(log);
|
||||||
}
|
}
|
||||||
|
VeilidUpdate::AppMessage(msg) => {
|
||||||
|
self.comproc.update_app_message(msg);
|
||||||
|
}
|
||||||
|
VeilidUpdate::AppCall(call) => {
|
||||||
|
self.comproc.update_app_call(call);
|
||||||
|
}
|
||||||
VeilidUpdate::Attachment(attachment) => {
|
VeilidUpdate::Attachment(attachment) => {
|
||||||
self.comproc.update_attachment(attachment);
|
self.comproc.update_attachment(attachment);
|
||||||
}
|
}
|
||||||
VeilidUpdate::Network(network) => {
|
VeilidUpdate::Network(network) => {
|
||||||
self.comproc.update_network_status(network);
|
self.comproc.update_network_status(network);
|
||||||
}
|
}
|
||||||
|
VeilidUpdate::Config(config) => {
|
||||||
|
self.comproc.update_config(config);
|
||||||
|
}
|
||||||
VeilidUpdate::Shutdown => self.comproc.update_shutdown(),
|
VeilidUpdate::Shutdown => self.comproc.update_shutdown(),
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -94,7 +104,9 @@ struct ClientApiConnectionInner {
|
|||||||
connect_addr: Option<SocketAddr>,
|
connect_addr: Option<SocketAddr>,
|
||||||
disconnector: Option<Disconnector<rpc_twoparty_capnp::Side>>,
|
disconnector: Option<Disconnector<rpc_twoparty_capnp::Side>>,
|
||||||
server: Option<Rc<RefCell<veilid_server::Client>>>,
|
server: Option<Rc<RefCell<veilid_server::Client>>>,
|
||||||
|
server_settings: Option<String>,
|
||||||
disconnect_requested: bool,
|
disconnect_requested: bool,
|
||||||
|
cancel_eventual: Eventual,
|
||||||
}
|
}
|
||||||
|
|
||||||
type Handle<T> = Rc<RefCell<T>>;
|
type Handle<T> = Rc<RefCell<T>>;
|
||||||
@ -112,10 +124,21 @@ impl ClientApiConnection {
|
|||||||
connect_addr: None,
|
connect_addr: None,
|
||||||
disconnector: None,
|
disconnector: None,
|
||||||
server: None,
|
server: None,
|
||||||
|
server_settings: None,
|
||||||
disconnect_requested: false,
|
disconnect_requested: false,
|
||||||
|
cancel_eventual: Eventual::new(),
|
||||||
})),
|
})),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn cancel(&self) {
|
||||||
|
let eventual = {
|
||||||
|
let inner = self.inner.borrow();
|
||||||
|
inner.cancel_eventual.clone()
|
||||||
|
};
|
||||||
|
eventual.resolve(); // don't need to await this
|
||||||
|
}
|
||||||
|
|
||||||
async fn process_veilid_state<'a>(
|
async fn process_veilid_state<'a>(
|
||||||
&'a mut self,
|
&'a mut self,
|
||||||
veilid_state: VeilidState,
|
veilid_state: VeilidState,
|
||||||
@ -123,7 +146,7 @@ impl ClientApiConnection {
|
|||||||
let mut inner = self.inner.borrow_mut();
|
let mut inner = self.inner.borrow_mut();
|
||||||
inner.comproc.update_attachment(veilid_state.attachment);
|
inner.comproc.update_attachment(veilid_state.attachment);
|
||||||
inner.comproc.update_network_status(veilid_state.network);
|
inner.comproc.update_network_status(veilid_state.network);
|
||||||
|
inner.comproc.update_config(veilid_state.config);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -191,19 +214,27 @@ impl ClientApiConnection {
|
|||||||
.map_err(|e| format!("failed to get deserialize veilid state: {}", e))?;
|
.map_err(|e| format!("failed to get deserialize veilid state: {}", e))?;
|
||||||
self.process_veilid_state(veilid_state).await?;
|
self.process_veilid_state(veilid_state).await?;
|
||||||
|
|
||||||
|
// Save server settings
|
||||||
|
let server_settings = response
|
||||||
|
.get_settings()
|
||||||
|
.map_err(|e| format!("failed to get initial veilid server settings: {}", e))?
|
||||||
|
.to_owned();
|
||||||
|
self.inner.borrow_mut().server_settings = Some(server_settings.clone());
|
||||||
|
|
||||||
// Don't drop the registration, doing so will remove the client
|
// Don't drop the registration, doing so will remove the client
|
||||||
// object mapping from the server which we need for the update backchannel
|
// object mapping from the server which we need for the update backchannel
|
||||||
|
|
||||||
// Wait until rpc system completion or disconnect was requested
|
// Wait until rpc system completion or disconnect was requested
|
||||||
let res = rpc_jh.await;
|
let res = rpc_jh.await;
|
||||||
#[cfg(feature="rt-tokio")]
|
#[cfg(feature = "rt-tokio")]
|
||||||
let res = res.map_err(|e| format!("join error: {}", e))?;
|
let res = res.map_err(|e| format!("join error: {}", e))?;
|
||||||
res.map_err(|e| format!("client RPC system error: {}", e))
|
res.map_err(|e| format!("client RPC system error: {}", e))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_connection(&mut self) -> Result<(), String> {
|
async fn handle_connection(&mut self, connect_addr: SocketAddr) -> Result<(), String> {
|
||||||
trace!("ClientApiConnection::handle_connection");
|
trace!("ClientApiConnection::handle_connection");
|
||||||
let connect_addr = self.inner.borrow().connect_addr.unwrap();
|
|
||||||
|
self.inner.borrow_mut().connect_addr = Some(connect_addr);
|
||||||
// Connect the TCP socket
|
// Connect the TCP socket
|
||||||
let stream = TcpStream::connect(connect_addr)
|
let stream = TcpStream::connect(connect_addr)
|
||||||
.await
|
.await
|
||||||
@ -245,9 +276,11 @@ impl ClientApiConnection {
|
|||||||
// Drop the server and disconnector too (if we still have it)
|
// Drop the server and disconnector too (if we still have it)
|
||||||
let mut inner = self.inner.borrow_mut();
|
let mut inner = self.inner.borrow_mut();
|
||||||
let disconnect_requested = inner.disconnect_requested;
|
let disconnect_requested = inner.disconnect_requested;
|
||||||
|
inner.server_settings = None;
|
||||||
inner.server = None;
|
inner.server = None;
|
||||||
inner.disconnector = None;
|
inner.disconnector = None;
|
||||||
inner.disconnect_requested = false;
|
inner.disconnect_requested = false;
|
||||||
|
inner.connect_addr = None;
|
||||||
|
|
||||||
if !disconnect_requested {
|
if !disconnect_requested {
|
||||||
// Connection lost
|
// Connection lost
|
||||||
@ -258,6 +291,34 @@ impl ClientApiConnection {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn cancellable<T>(&mut self, p: Promise<T, capnp::Error>) -> Promise<T, capnp::Error>
|
||||||
|
where
|
||||||
|
T: 'static,
|
||||||
|
{
|
||||||
|
let (mut cancel_instance, cancel_eventual) = {
|
||||||
|
let inner = self.inner.borrow();
|
||||||
|
(
|
||||||
|
inner.cancel_eventual.instance_empty().fuse(),
|
||||||
|
inner.cancel_eventual.clone(),
|
||||||
|
)
|
||||||
|
};
|
||||||
|
let mut p = p.fuse();
|
||||||
|
|
||||||
|
Promise::from_future(async move {
|
||||||
|
let out = select! {
|
||||||
|
a = p => {
|
||||||
|
a
|
||||||
|
},
|
||||||
|
_ = cancel_instance => {
|
||||||
|
Err(capnp::Error::failed("cancelled".into()))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
drop(cancel_instance);
|
||||||
|
cancel_eventual.reset();
|
||||||
|
out
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn server_attach(&mut self) -> Result<(), String> {
|
pub async fn server_attach(&mut self) -> Result<(), String> {
|
||||||
trace!("ClientApiConnection::server_attach");
|
trace!("ClientApiConnection::server_attach");
|
||||||
let server = {
|
let server = {
|
||||||
@ -269,7 +330,10 @@ impl ClientApiConnection {
|
|||||||
.clone()
|
.clone()
|
||||||
};
|
};
|
||||||
let request = server.borrow().attach_request();
|
let request = server.borrow().attach_request();
|
||||||
let response = request.send().promise.await.map_err(map_to_string)?;
|
let response = self
|
||||||
|
.cancellable(request.send().promise)
|
||||||
|
.await
|
||||||
|
.map_err(map_to_string)?;
|
||||||
let reader = response
|
let reader = response
|
||||||
.get()
|
.get()
|
||||||
.map_err(map_to_string)?
|
.map_err(map_to_string)?
|
||||||
@ -290,7 +354,10 @@ impl ClientApiConnection {
|
|||||||
.clone()
|
.clone()
|
||||||
};
|
};
|
||||||
let request = server.borrow().detach_request();
|
let request = server.borrow().detach_request();
|
||||||
let response = request.send().promise.await.map_err(map_to_string)?;
|
let response = self
|
||||||
|
.cancellable(request.send().promise)
|
||||||
|
.await
|
||||||
|
.map_err(map_to_string)?;
|
||||||
let reader = response
|
let reader = response
|
||||||
.get()
|
.get()
|
||||||
.map_err(map_to_string)?
|
.map_err(map_to_string)?
|
||||||
@ -311,7 +378,10 @@ impl ClientApiConnection {
|
|||||||
.clone()
|
.clone()
|
||||||
};
|
};
|
||||||
let request = server.borrow().shutdown_request();
|
let request = server.borrow().shutdown_request();
|
||||||
let response = request.send().promise.await.map_err(map_to_string)?;
|
let response = self
|
||||||
|
.cancellable(request.send().promise)
|
||||||
|
.await
|
||||||
|
.map_err(map_to_string)?;
|
||||||
response.get().map(drop).map_err(map_to_string)
|
response.get().map(drop).map_err(map_to_string)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -327,7 +397,10 @@ impl ClientApiConnection {
|
|||||||
};
|
};
|
||||||
let mut request = server.borrow().debug_request();
|
let mut request = server.borrow().debug_request();
|
||||||
request.get().set_command(&what);
|
request.get().set_command(&what);
|
||||||
let response = request.send().promise.await.map_err(map_to_string)?;
|
let response = self
|
||||||
|
.cancellable(request.send().promise)
|
||||||
|
.await
|
||||||
|
.map_err(map_to_string)?;
|
||||||
let reader = response
|
let reader = response
|
||||||
.get()
|
.get()
|
||||||
.map_err(map_to_string)?
|
.map_err(map_to_string)?
|
||||||
@ -355,7 +428,36 @@ impl ClientApiConnection {
|
|||||||
request.get().set_layer(&layer);
|
request.get().set_layer(&layer);
|
||||||
let log_level_json = veilid_core::serialize_json(&log_level);
|
let log_level_json = veilid_core::serialize_json(&log_level);
|
||||||
request.get().set_log_level(&log_level_json);
|
request.get().set_log_level(&log_level_json);
|
||||||
let response = request.send().promise.await.map_err(map_to_string)?;
|
let response = self
|
||||||
|
.cancellable(request.send().promise)
|
||||||
|
.await
|
||||||
|
.map_err(map_to_string)?;
|
||||||
|
let reader = response
|
||||||
|
.get()
|
||||||
|
.map_err(map_to_string)?
|
||||||
|
.get_result()
|
||||||
|
.map_err(map_to_string)?;
|
||||||
|
let res: Result<(), VeilidAPIError> = decode_api_result(&reader);
|
||||||
|
res.map_err(map_to_string)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn server_appcall_reply(&mut self, id: u64, msg: Vec<u8>) -> Result<(), String> {
|
||||||
|
trace!("ClientApiConnection::appcall_reply");
|
||||||
|
let server = {
|
||||||
|
let inner = self.inner.borrow();
|
||||||
|
inner
|
||||||
|
.server
|
||||||
|
.as_ref()
|
||||||
|
.ok_or_else(|| "Not connected, ignoring change_log_level request".to_owned())?
|
||||||
|
.clone()
|
||||||
|
};
|
||||||
|
let mut request = server.borrow().app_call_reply_request();
|
||||||
|
request.get().set_id(id);
|
||||||
|
request.get().set_message(&msg);
|
||||||
|
let response = self
|
||||||
|
.cancellable(request.send().promise)
|
||||||
|
.await
|
||||||
|
.map_err(map_to_string)?;
|
||||||
let reader = response
|
let reader = response
|
||||||
.get()
|
.get()
|
||||||
.map_err(map_to_string)?
|
.map_err(map_to_string)?
|
||||||
@ -369,9 +471,7 @@ impl ClientApiConnection {
|
|||||||
pub async fn connect(&mut self, connect_addr: SocketAddr) -> Result<(), String> {
|
pub async fn connect(&mut self, connect_addr: SocketAddr) -> Result<(), String> {
|
||||||
trace!("ClientApiConnection::connect");
|
trace!("ClientApiConnection::connect");
|
||||||
// Save the address to connect to
|
// Save the address to connect to
|
||||||
self.inner.borrow_mut().connect_addr = Some(connect_addr);
|
self.handle_connection(connect_addr).await
|
||||||
|
|
||||||
self.handle_connection().await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// End Client API connection
|
// End Client API connection
|
||||||
@ -382,7 +482,6 @@ impl ClientApiConnection {
|
|||||||
Some(d) => {
|
Some(d) => {
|
||||||
self.inner.borrow_mut().disconnect_requested = true;
|
self.inner.borrow_mut().disconnect_requested = true;
|
||||||
d.await.unwrap();
|
d.await.unwrap();
|
||||||
self.inner.borrow_mut().connect_addr = None;
|
|
||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
debug!("disconnector doesn't exist");
|
debug!("disconnector doesn't exist");
|
||||||
|
@ -49,6 +49,7 @@ struct CommandProcessorInner {
|
|||||||
autoreconnect: bool,
|
autoreconnect: bool,
|
||||||
server_addr: Option<SocketAddr>,
|
server_addr: Option<SocketAddr>,
|
||||||
connection_waker: Eventual,
|
connection_waker: Eventual,
|
||||||
|
last_call_id: Option<u64>,
|
||||||
}
|
}
|
||||||
|
|
||||||
type Handle<T> = Rc<RefCell<T>>;
|
type Handle<T> = Rc<RefCell<T>>;
|
||||||
@ -70,6 +71,7 @@ impl CommandProcessor {
|
|||||||
autoreconnect: settings.autoreconnect,
|
autoreconnect: settings.autoreconnect,
|
||||||
server_addr: None,
|
server_addr: None,
|
||||||
connection_waker: Eventual::new(),
|
connection_waker: Eventual::new(),
|
||||||
|
last_call_id: None,
|
||||||
})),
|
})),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -100,6 +102,12 @@ impl CommandProcessor {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn cancel_command(&self) {
|
||||||
|
trace!("CommandProcessor::cancel_command");
|
||||||
|
let capi = self.capi();
|
||||||
|
capi.cancel();
|
||||||
|
}
|
||||||
|
|
||||||
pub fn cmd_help(&self, _rest: Option<String>, callback: UICallback) -> Result<(), String> {
|
pub fn cmd_help(&self, _rest: Option<String>, callback: UICallback) -> Result<(), String> {
|
||||||
trace!("CommandProcessor::cmd_help");
|
trace!("CommandProcessor::cmd_help");
|
||||||
self.ui().add_node_event(
|
self.ui().add_node_event(
|
||||||
@ -111,6 +119,7 @@ attach - attach the server to the Veilid network
|
|||||||
detach - detach the server from the Veilid network
|
detach - detach the server from the Veilid network
|
||||||
debug - send a debugging command to the Veilid server
|
debug - send a debugging command to the Veilid server
|
||||||
change_log_level - change the log level for a tracing layer
|
change_log_level - change the log level for a tracing layer
|
||||||
|
reply - reply to an AppCall not handled directly by the server
|
||||||
"#
|
"#
|
||||||
.to_owned(),
|
.to_owned(),
|
||||||
);
|
);
|
||||||
@ -225,6 +234,66 @@ change_log_level - change the log level for a tracing layer
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn cmd_reply(&self, rest: Option<String>, callback: UICallback) -> Result<(), String> {
|
||||||
|
trace!("CommandProcessor::cmd_reply");
|
||||||
|
|
||||||
|
let mut capi = self.capi();
|
||||||
|
let ui = self.ui();
|
||||||
|
let some_last_id = self.inner_mut().last_call_id.take();
|
||||||
|
spawn_detached_local(async move {
|
||||||
|
let (first, second) = Self::word_split(&rest.clone().unwrap_or_default());
|
||||||
|
let (id, msg) = if let Some(second) = second {
|
||||||
|
let id = match u64::from_str(&first) {
|
||||||
|
Err(e) => {
|
||||||
|
ui.add_node_event(format!("invalid appcall id: {}", e));
|
||||||
|
ui.send_callback(callback);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
Ok(v) => v,
|
||||||
|
};
|
||||||
|
(id, second)
|
||||||
|
} else {
|
||||||
|
let id = match some_last_id {
|
||||||
|
None => {
|
||||||
|
ui.add_node_event("must specify last call id".to_owned());
|
||||||
|
ui.send_callback(callback);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
Some(v) => v,
|
||||||
|
};
|
||||||
|
(id, rest.unwrap_or_default())
|
||||||
|
};
|
||||||
|
let msg = if msg[0..1] == "#".to_owned() {
|
||||||
|
match hex::decode(msg[1..].as_bytes().to_vec()) {
|
||||||
|
Err(e) => {
|
||||||
|
ui.add_node_event(format!("invalid hex message: {}", e));
|
||||||
|
ui.send_callback(callback);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
Ok(v) => v,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
msg[1..].as_bytes().to_vec()
|
||||||
|
};
|
||||||
|
let msglen = msg.len();
|
||||||
|
match capi.server_appcall_reply(id, msg).await {
|
||||||
|
Ok(()) => {
|
||||||
|
ui.add_node_event(format!("reply sent to {} : {} bytes", id, msglen));
|
||||||
|
ui.send_callback(callback);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
ui.display_string_dialog(
|
||||||
|
"Server command 'appcall_reply' failed",
|
||||||
|
e.to_string(),
|
||||||
|
callback,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
pub fn run_command(&self, command_line: &str, callback: UICallback) -> Result<(), String> {
|
pub fn run_command(&self, command_line: &str, callback: UICallback) -> Result<(), String> {
|
||||||
//
|
//
|
||||||
let (cmd, rest) = Self::word_split(command_line);
|
let (cmd, rest) = Self::word_split(command_line);
|
||||||
@ -238,6 +307,7 @@ change_log_level - change the log level for a tracing layer
|
|||||||
"detach" => self.cmd_detach(callback),
|
"detach" => self.cmd_detach(callback),
|
||||||
"debug" => self.cmd_debug(rest, callback),
|
"debug" => self.cmd_debug(rest, callback),
|
||||||
"change_log_level" => self.cmd_change_log_level(rest, callback),
|
"change_log_level" => self.cmd_change_log_level(rest, callback),
|
||||||
|
"reply" => self.cmd_reply(rest, callback),
|
||||||
_ => {
|
_ => {
|
||||||
let ui = self.ui();
|
let ui = self.ui();
|
||||||
ui.send_callback(callback);
|
ui.send_callback(callback);
|
||||||
@ -318,6 +388,7 @@ change_log_level - change the log level for a tracing layer
|
|||||||
// called by client_api_connection
|
// called by client_api_connection
|
||||||
// calls into ui
|
// calls into ui
|
||||||
////////////////////////////////////////////
|
////////////////////////////////////////////
|
||||||
|
|
||||||
pub fn update_attachment(&mut self, attachment: veilid_core::VeilidStateAttachment) {
|
pub fn update_attachment(&mut self, attachment: veilid_core::VeilidStateAttachment) {
|
||||||
self.inner_mut().ui.set_attachment_state(attachment.state);
|
self.inner_mut().ui.set_attachment_state(attachment.state);
|
||||||
}
|
}
|
||||||
@ -330,8 +401,11 @@ change_log_level - change the log level for a tracing layer
|
|||||||
network.peers,
|
network.peers,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
pub fn update_config(&mut self, config: veilid_core::VeilidStateConfig) {
|
||||||
|
self.inner_mut().ui.set_config(config.config)
|
||||||
|
}
|
||||||
|
|
||||||
pub fn update_log(&mut self, log: veilid_core::VeilidStateLog) {
|
pub fn update_log(&mut self, log: veilid_core::VeilidLog) {
|
||||||
self.inner().ui.add_node_event(format!(
|
self.inner().ui.add_node_event(format!(
|
||||||
"{}: {}{}",
|
"{}: {}{}",
|
||||||
log.log_level,
|
log.log_level,
|
||||||
@ -344,6 +418,49 @@ change_log_level - change the log level for a tracing layer
|
|||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn update_app_message(&mut self, msg: veilid_core::VeilidAppMessage) {
|
||||||
|
// check is message body is ascii printable
|
||||||
|
let mut printable = true;
|
||||||
|
for c in &msg.message {
|
||||||
|
if *c < 32 || *c > 126 {
|
||||||
|
printable = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let strmsg = if printable {
|
||||||
|
String::from_utf8_lossy(&msg.message).to_string()
|
||||||
|
} else {
|
||||||
|
hex::encode(&msg.message)
|
||||||
|
};
|
||||||
|
|
||||||
|
self.inner()
|
||||||
|
.ui
|
||||||
|
.add_node_event(format!("AppMessage ({:?}): {}", msg.sender, strmsg));
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn update_app_call(&mut self, call: veilid_core::VeilidAppCall) {
|
||||||
|
// check is message body is ascii printable
|
||||||
|
let mut printable = true;
|
||||||
|
for c in &call.message {
|
||||||
|
if *c < 32 || *c > 126 {
|
||||||
|
printable = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let strmsg = if printable {
|
||||||
|
String::from_utf8_lossy(&call.message).to_string()
|
||||||
|
} else {
|
||||||
|
format!("#{}", hex::encode(&call.message))
|
||||||
|
};
|
||||||
|
|
||||||
|
self.inner().ui.add_node_event(format!(
|
||||||
|
"AppCall ({:?}) id = {:016x} : {}",
|
||||||
|
call.sender, call.id, strmsg
|
||||||
|
));
|
||||||
|
|
||||||
|
self.inner_mut().last_call_id = Some(call.id);
|
||||||
|
}
|
||||||
|
|
||||||
pub fn update_shutdown(&mut self) {
|
pub fn update_shutdown(&mut self) {
|
||||||
// Do nothing with this, we'll process shutdown when rpc connection closes
|
// Do nothing with this, we'll process shutdown when rpc connection closes
|
||||||
}
|
}
|
||||||
@ -381,7 +498,6 @@ change_log_level - change the log level for a tracing layer
|
|||||||
// calls into client_api_connection
|
// calls into client_api_connection
|
||||||
////////////////////////////////////////////
|
////////////////////////////////////////////
|
||||||
pub fn attach(&mut self) {
|
pub fn attach(&mut self) {
|
||||||
trace!("CommandProcessor::attach");
|
|
||||||
let mut capi = self.capi();
|
let mut capi = self.capi();
|
||||||
|
|
||||||
spawn_detached_local(async move {
|
spawn_detached_local(async move {
|
||||||
@ -392,7 +508,6 @@ change_log_level - change the log level for a tracing layer
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn detach(&mut self) {
|
pub fn detach(&mut self) {
|
||||||
trace!("CommandProcessor::detach");
|
|
||||||
let mut capi = self.capi();
|
let mut capi = self.capi();
|
||||||
|
|
||||||
spawn_detached_local(async move {
|
spawn_detached_local(async move {
|
||||||
|
@ -55,6 +55,7 @@ struct UIState {
|
|||||||
network_down_up: Dirty<(f32, f32)>,
|
network_down_up: Dirty<(f32, f32)>,
|
||||||
connection_state: Dirty<ConnectionState>,
|
connection_state: Dirty<ConnectionState>,
|
||||||
peers_state: Dirty<Vec<PeerTableData>>,
|
peers_state: Dirty<Vec<PeerTableData>>,
|
||||||
|
node_id: Dirty<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl UIState {
|
impl UIState {
|
||||||
@ -65,6 +66,7 @@ impl UIState {
|
|||||||
network_down_up: Dirty::new((0.0, 0.0)),
|
network_down_up: Dirty::new((0.0, 0.0)),
|
||||||
connection_state: Dirty::new(ConnectionState::Disconnected),
|
connection_state: Dirty::new(ConnectionState::Disconnected),
|
||||||
peers_state: Dirty::new(Vec::new()),
|
peers_state: Dirty::new(Vec::new()),
|
||||||
|
node_id: Dirty::new("".to_owned()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -213,7 +215,13 @@ impl UI {
|
|||||||
UI::setup_quit_handler(s);
|
UI::setup_quit_handler(s);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
fn clear_handler(siv: &mut Cursive) {
|
||||||
|
cursive_flexi_logger_view::clear_log();
|
||||||
|
UI::update_cb(siv);
|
||||||
|
}
|
||||||
|
fn node_events_panel(s: &mut Cursive) -> ViewRef<Panel<ScrollView<FlexiLoggerView>>> {
|
||||||
|
s.find_name("node-events-panel").unwrap()
|
||||||
|
}
|
||||||
fn command_line(s: &mut Cursive) -> ViewRef<EditView> {
|
fn command_line(s: &mut Cursive) -> ViewRef<EditView> {
|
||||||
s.find_name("command-line").unwrap()
|
s.find_name("command-line").unwrap()
|
||||||
}
|
}
|
||||||
@ -306,11 +314,18 @@ impl UI {
|
|||||||
fn run_command(s: &mut Cursive, text: &str) -> Result<(), String> {
|
fn run_command(s: &mut Cursive, text: &str) -> Result<(), String> {
|
||||||
// disable ui
|
// disable ui
|
||||||
Self::enable_command_ui(s, false);
|
Self::enable_command_ui(s, false);
|
||||||
|
|
||||||
// run command
|
// run command
|
||||||
|
s.set_global_callback(cursive::event::Event::Key(Key::Esc), |s| {
|
||||||
|
let cmdproc = Self::command_processor(s);
|
||||||
|
cmdproc.cancel_command();
|
||||||
|
});
|
||||||
|
|
||||||
let cmdproc = Self::command_processor(s);
|
let cmdproc = Self::command_processor(s);
|
||||||
cmdproc.run_command(
|
cmdproc.run_command(
|
||||||
text,
|
text,
|
||||||
Box::new(|s| {
|
Box::new(|s| {
|
||||||
|
s.set_global_callback(cursive::event::Event::Key(Key::Esc), UI::quit_handler);
|
||||||
Self::enable_command_ui(s, true);
|
Self::enable_command_ui(s, true);
|
||||||
}),
|
}),
|
||||||
)
|
)
|
||||||
@ -565,6 +580,12 @@ impl UI {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn refresh_main_titlebar(s: &mut Cursive) {
|
||||||
|
let mut main_window = UI::node_events_panel(s);
|
||||||
|
let inner = Self::inner_mut(s);
|
||||||
|
main_window.set_title(format!("Node: {}", inner.ui_state.node_id.get()));
|
||||||
|
}
|
||||||
|
|
||||||
fn refresh_statusbar(s: &mut Cursive) {
|
fn refresh_statusbar(s: &mut Cursive) {
|
||||||
let mut statusbar = UI::status_bar(s);
|
let mut statusbar = UI::status_bar(s);
|
||||||
|
|
||||||
@ -627,6 +648,7 @@ impl UI {
|
|||||||
let mut refresh_button_attach = false;
|
let mut refresh_button_attach = false;
|
||||||
let mut refresh_connection_dialog = false;
|
let mut refresh_connection_dialog = false;
|
||||||
let mut refresh_peers = false;
|
let mut refresh_peers = false;
|
||||||
|
let mut refresh_main_titlebar = false;
|
||||||
if inner.ui_state.attachment_state.take_dirty() {
|
if inner.ui_state.attachment_state.take_dirty() {
|
||||||
refresh_statusbar = true;
|
refresh_statusbar = true;
|
||||||
refresh_button_attach = true;
|
refresh_button_attach = true;
|
||||||
@ -647,6 +669,9 @@ impl UI {
|
|||||||
if inner.ui_state.peers_state.take_dirty() {
|
if inner.ui_state.peers_state.take_dirty() {
|
||||||
refresh_peers = true;
|
refresh_peers = true;
|
||||||
}
|
}
|
||||||
|
if inner.ui_state.node_id.take_dirty() {
|
||||||
|
refresh_main_titlebar = true;
|
||||||
|
}
|
||||||
|
|
||||||
drop(inner);
|
drop(inner);
|
||||||
|
|
||||||
@ -662,6 +687,9 @@ impl UI {
|
|||||||
if refresh_peers {
|
if refresh_peers {
|
||||||
Self::refresh_peers(s);
|
Self::refresh_peers(s);
|
||||||
}
|
}
|
||||||
|
if refresh_main_titlebar {
|
||||||
|
Self::refresh_main_titlebar(s);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////
|
||||||
@ -709,13 +737,11 @@ impl UI {
|
|||||||
|
|
||||||
// Create layouts
|
// Create layouts
|
||||||
|
|
||||||
let node_events_view = Panel::new(
|
let node_events_view = Panel::new(FlexiLoggerView::new_scrollable())
|
||||||
FlexiLoggerView::new_scrollable()
|
.title_position(HAlign::Left)
|
||||||
.with_name("node-events")
|
.title("Node Events")
|
||||||
.full_screen(),
|
.with_name("node-events-panel")
|
||||||
)
|
.full_screen();
|
||||||
.title_position(HAlign::Left)
|
|
||||||
.title("Node Events");
|
|
||||||
|
|
||||||
let peers_table_view = PeersTableView::new()
|
let peers_table_view = PeersTableView::new()
|
||||||
.column(PeerTableColumn::NodeId, "Node Id", |c| c.width(43))
|
.column(PeerTableColumn::NodeId, "Node Id", |c| c.width(43))
|
||||||
@ -794,6 +820,7 @@ impl UI {
|
|||||||
|
|
||||||
UI::setup_colors(&mut siv, &mut inner, settings);
|
UI::setup_colors(&mut siv, &mut inner, settings);
|
||||||
UI::setup_quit_handler(&mut siv);
|
UI::setup_quit_handler(&mut siv);
|
||||||
|
siv.set_global_callback(cursive::event::Event::CtrlChar('k'), UI::clear_handler);
|
||||||
|
|
||||||
drop(inner);
|
drop(inner);
|
||||||
drop(siv);
|
drop(siv);
|
||||||
@ -832,6 +859,16 @@ impl UI {
|
|||||||
inner.ui_state.peers_state.set(peers);
|
inner.ui_state.peers_state.set(peers);
|
||||||
let _ = inner.cb_sink.send(Box::new(UI::update_cb));
|
let _ = inner.cb_sink.send(Box::new(UI::update_cb));
|
||||||
}
|
}
|
||||||
|
pub fn set_config(&mut self, config: VeilidConfigInner) {
|
||||||
|
let mut inner = self.inner.borrow_mut();
|
||||||
|
inner.ui_state.node_id.set(
|
||||||
|
config
|
||||||
|
.network
|
||||||
|
.node_id
|
||||||
|
.map(|x| x.encode())
|
||||||
|
.unwrap_or("<unknown>".to_owned()),
|
||||||
|
);
|
||||||
|
}
|
||||||
pub fn set_connection_state(&mut self, state: ConnectionState) {
|
pub fn set_connection_state(&mut self, state: ConnectionState) {
|
||||||
let mut inner = self.inner.borrow_mut();
|
let mut inner = self.inner.borrow_mut();
|
||||||
inner.ui_state.connection_state.set(state);
|
inner.ui_state.connection_state.set(state);
|
||||||
|
@ -58,7 +58,11 @@ digest = "0.9.0"
|
|||||||
rtnetlink = { version = "^0", default-features = false, optional = true }
|
rtnetlink = { version = "^0", default-features = false, optional = true }
|
||||||
async-std-resolver = { version = "^0", optional = true }
|
async-std-resolver = { version = "^0", optional = true }
|
||||||
trust-dns-resolver = { version = "^0", optional = true }
|
trust-dns-resolver = { version = "^0", optional = true }
|
||||||
|
keyvaluedb = { path = "../external/keyvaluedb/keyvaluedb" }
|
||||||
|
serde_bytes = { version = "^0" }
|
||||||
|
#rkyv = { version = "^0", default_features = false, features = ["std", "alloc", "strict", "size_64", "validation"] }
|
||||||
|
rkyv = { git = "https://github.com/crioux/rkyv.git", branch = "issue_326", default_features = false, features = ["std", "alloc", "strict", "size_64", "validation"] }
|
||||||
|
bytecheck = "^0"
|
||||||
|
|
||||||
# Dependencies for native builds only
|
# Dependencies for native builds only
|
||||||
# Linux, Windows, Mac, iOS, Android
|
# Linux, Windows, Mac, iOS, Android
|
||||||
@ -72,7 +76,6 @@ async-tungstenite = { version = "^0", features = ["async-tls"] }
|
|||||||
maplit = "^1"
|
maplit = "^1"
|
||||||
config = { version = "^0", features = ["yaml"] }
|
config = { version = "^0", features = ["yaml"] }
|
||||||
keyring-manager = { path = "../external/keyring-manager" }
|
keyring-manager = { path = "../external/keyring-manager" }
|
||||||
lru = "^0"
|
|
||||||
async-tls = "^0.11"
|
async-tls = "^0.11"
|
||||||
igd = { path = "../external/rust-igd" }
|
igd = { path = "../external/rust-igd" }
|
||||||
webpki = "^0"
|
webpki = "^0"
|
||||||
@ -83,7 +86,6 @@ futures-util = { version = "^0", default-features = false, features = ["async-aw
|
|||||||
keyvaluedb-sqlite = { path = "../external/keyvaluedb/keyvaluedb-sqlite" }
|
keyvaluedb-sqlite = { path = "../external/keyvaluedb/keyvaluedb-sqlite" }
|
||||||
data-encoding = { version = "^2" }
|
data-encoding = { version = "^2" }
|
||||||
serde = { version = "^1", features = ["derive" ] }
|
serde = { version = "^1", features = ["derive" ] }
|
||||||
serde_cbor = { version = "^0" }
|
|
||||||
serde_json = { version = "^1" }
|
serde_json = { version = "^1" }
|
||||||
socket2 = "^0"
|
socket2 = "^0"
|
||||||
bugsalot = "^0"
|
bugsalot = "^0"
|
||||||
@ -96,13 +98,10 @@ nix = "^0"
|
|||||||
wasm-bindgen = "^0"
|
wasm-bindgen = "^0"
|
||||||
js-sys = "^0"
|
js-sys = "^0"
|
||||||
wasm-bindgen-futures = "^0"
|
wasm-bindgen-futures = "^0"
|
||||||
hashbrown = "^0"
|
|
||||||
lru = {version = "^0", features = ["hashbrown"] }
|
|
||||||
no-std-net = { path = "../external/no-std-net", features = ["serde"] }
|
no-std-net = { path = "../external/no-std-net", features = ["serde"] }
|
||||||
keyvaluedb-web = { path = "../external/keyvaluedb/keyvaluedb-web" }
|
keyvaluedb-web = { path = "../external/keyvaluedb/keyvaluedb-web" }
|
||||||
data-encoding = { version = "^2", default_features = false, features = ["alloc"] }
|
data-encoding = { version = "^2", default_features = false, features = ["alloc"] }
|
||||||
serde = { version = "^1", default-features = false, features = ["derive", "alloc"] }
|
serde = { version = "^1", default-features = false, features = ["derive", "alloc"] }
|
||||||
serde_cbor = { version = "^0", default-features = false, features = ["alloc"] }
|
|
||||||
serde_json = { version = "^1", default-features = false, features = ["alloc"] }
|
serde_json = { version = "^1", default-features = false, features = ["alloc"] }
|
||||||
getrandom = { version = "^0", features = ["js"] }
|
getrandom = { version = "^0", features = ["js"] }
|
||||||
ws_stream_wasm = "^0"
|
ws_stream_wasm = "^0"
|
||||||
|
@ -3,14 +3,14 @@
|
|||||||
# IDs And Hashes
|
# IDs And Hashes
|
||||||
##############################
|
##############################
|
||||||
|
|
||||||
struct Curve25519PublicKey {
|
struct Key256 @0xdde44e3286f6a90d {
|
||||||
u0 @0 :UInt64;
|
u0 @0 :UInt64;
|
||||||
u1 @1 :UInt64;
|
u1 @1 :UInt64;
|
||||||
u2 @2 :UInt64;
|
u2 @2 :UInt64;
|
||||||
u3 @3 :UInt64;
|
u3 @3 :UInt64;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct Ed25519Signature {
|
struct Signature512 @0x806749043a129c12 {
|
||||||
u0 @0 :UInt64;
|
u0 @0 :UInt64;
|
||||||
u1 @1 :UInt64;
|
u1 @1 :UInt64;
|
||||||
u2 @2 :UInt64;
|
u2 @2 :UInt64;
|
||||||
@ -21,79 +21,72 @@ struct Ed25519Signature {
|
|||||||
u7 @7 :UInt64;
|
u7 @7 :UInt64;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct XChaCha20Poly1305Nonce {
|
struct Nonce24 @0xb6260db25d8d7dfc {
|
||||||
u0 @0 :UInt64;
|
u0 @0 :UInt64;
|
||||||
u1 @1 :UInt64;
|
u1 @1 :UInt64;
|
||||||
u2 @2 :UInt64;
|
u2 @2 :UInt64;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct BLAKE3Hash {
|
using NodeID = Key256;
|
||||||
u0 @0 :UInt64;
|
using RoutePublicKey = Key256;
|
||||||
u1 @1 :UInt64;
|
using ValueID = Key256;
|
||||||
u2 @2 :UInt64;
|
using Nonce = Nonce24;
|
||||||
u3 @3 :UInt64;
|
using Signature = Signature512;
|
||||||
}
|
using BlockID = Key256;
|
||||||
|
|
||||||
using NodeID = Curve25519PublicKey;
|
|
||||||
using RoutePublicKey = Curve25519PublicKey;
|
|
||||||
using ValueID = Curve25519PublicKey;
|
|
||||||
using Nonce = XChaCha20Poly1305Nonce;
|
|
||||||
using Signature = Ed25519Signature;
|
|
||||||
using BlockID = BLAKE3Hash;
|
|
||||||
using TunnelID = UInt64;
|
using TunnelID = UInt64;
|
||||||
|
|
||||||
# Node Dial Info
|
# Node Dial Info
|
||||||
################################################################
|
################################################################
|
||||||
|
|
||||||
struct AddressIPV4 {
|
struct AddressIPV4 @0xdb8769881266a6a0 {
|
||||||
addr @0 :UInt32; # Address in big endian format
|
addr @0 :UInt32; # Address in big endian format
|
||||||
}
|
}
|
||||||
|
|
||||||
struct AddressIPV6 {
|
struct AddressIPV6 @0xb35d6e6011dc5c20 {
|
||||||
addr0 @0 :UInt32; # \
|
addr0 @0 :UInt32; # \
|
||||||
addr1 @1 :UInt32; # \ Address in big
|
addr1 @1 :UInt32; # \ Address in big
|
||||||
addr2 @2 :UInt32; # / endian format
|
addr2 @2 :UInt32; # / endian format
|
||||||
addr3 @3 :UInt32; # /
|
addr3 @3 :UInt32; # /
|
||||||
}
|
}
|
||||||
|
|
||||||
struct Address {
|
struct Address @0x812706e9e57d108b {
|
||||||
union {
|
union {
|
||||||
ipv4 @0 :AddressIPV4;
|
ipv4 @0 :AddressIPV4;
|
||||||
ipv6 @1 :AddressIPV6;
|
ipv6 @1 :AddressIPV6;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct SocketAddress {
|
struct SocketAddress @0x82df4272f4dd3a62 {
|
||||||
address @0 :Address;
|
address @0 :Address;
|
||||||
port @1 :UInt16;
|
port @1 :UInt16;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum ProtocolKind {
|
enum ProtocolKind @0xde0bf5787c067d5a {
|
||||||
udp @0;
|
udp @0;
|
||||||
ws @1;
|
ws @1;
|
||||||
wss @2;
|
wss @2;
|
||||||
tcp @3;
|
tcp @3;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct DialInfoUDP {
|
struct DialInfoUDP @0xbb38a8b8b7024a7c {
|
||||||
socketAddress @0 :SocketAddress;
|
socketAddress @0 :SocketAddress;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct DialInfoTCP {
|
struct DialInfoTCP @0x9e0a9371b9a9f7fc {
|
||||||
socketAddress @0 :SocketAddress;
|
socketAddress @0 :SocketAddress;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct DialInfoWS {
|
struct DialInfoWS @0xd7795f7a92ab15b0 {
|
||||||
socketAddress @0 :SocketAddress;
|
socketAddress @0 :SocketAddress;
|
||||||
request @1 :Text;
|
request @1 :Text;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct DialInfoWSS {
|
struct DialInfoWSS @0xe639faa41b7d7b04 {
|
||||||
socketAddress @0 :SocketAddress;
|
socketAddress @0 :SocketAddress;
|
||||||
request @1 :Text;
|
request @1 :Text;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct DialInfo {
|
struct DialInfo @0xe1cd1c39fc2defdf {
|
||||||
union {
|
union {
|
||||||
udp @0 :DialInfoUDP;
|
udp @0 :DialInfoUDP;
|
||||||
tcp @1 :DialInfoTCP;
|
tcp @1 :DialInfoTCP;
|
||||||
@ -102,20 +95,15 @@ struct DialInfo {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct NodeDialInfo {
|
|
||||||
nodeId @0 :NodeID; # node id
|
|
||||||
dialInfo @1 :DialInfo; # how to get to the node
|
|
||||||
}
|
|
||||||
|
|
||||||
# Signals
|
# Signals
|
||||||
##############################
|
##############################
|
||||||
|
|
||||||
struct SignalInfoHolePunch {
|
struct SignalInfoHolePunch @0xeeb9ab6861890c9a {
|
||||||
receipt @0 :Data; # receipt to return with hole punch
|
receipt @0 :Data; # receipt to return with hole punch
|
||||||
peerInfo @1 :PeerInfo; # peer info of the signal sender for hole punch attempt
|
peerInfo @1 :PeerInfo; # peer info of the signal sender for hole punch attempt
|
||||||
}
|
}
|
||||||
|
|
||||||
struct SignalInfoReverseConnect {
|
struct SignalInfoReverseConnect @0xd9ebd3bd0d46e013 {
|
||||||
receipt @0 :Data; # receipt to return with reverse connect
|
receipt @0 :Data; # receipt to return with reverse connect
|
||||||
peerInfo @1 :PeerInfo; # peer info of the signal sender for reverse connect attempt
|
peerInfo @1 :PeerInfo; # peer info of the signal sender for reverse connect attempt
|
||||||
}
|
}
|
||||||
@ -123,29 +111,38 @@ struct SignalInfoReverseConnect {
|
|||||||
# Private Routes
|
# Private Routes
|
||||||
##############################
|
##############################
|
||||||
|
|
||||||
struct RouteHopData {
|
struct RouteHopData @0x8ce231f9d1b7adf2 {
|
||||||
nonce @0 :Nonce; # nonce for encrypted blob
|
nonce @0 :Nonce; # nonce for encrypted blob
|
||||||
blob @1 :Data; # encrypted blob with ENC(nonce,DH(PK,SK))
|
blob @1 :Data; # encrypted blob with ENC(nonce,DH(PK,SK))
|
||||||
# can be one of:
|
# if this is a safety route RouteHopData, there is a single byte tag appended to the end of the encrypted blob
|
||||||
# if more hops remain in this route: RouteHop (0 byte appended as key)
|
# it can be one of:
|
||||||
# if end of safety route and starting private route: PrivateRoute (1 byte appended as key)
|
# if more hops remain in this route: RouteHop (0 byte appended as tag)
|
||||||
|
# if end of safety route and starting private route: PrivateRoute (1 byte appended as tag)
|
||||||
|
# if this is a private route RouteHopData, only can decode to RouteHop, no tag is appended
|
||||||
}
|
}
|
||||||
|
|
||||||
struct RouteHop {
|
struct RouteHop @0xf8f672d75cce0c3b {
|
||||||
dialInfo @0 :NodeDialInfo; # dial info for this hop
|
node :union {
|
||||||
nextHop @1 :RouteHopData; # Optional: next hop in encrypted blob
|
nodeId @0 :NodeID; # node id only for established routes
|
||||||
# Null means no next hop, at destination (only used in private route, safety routes must enclose a stub private route)
|
peerInfo @1 :PeerInfo; # full peer info for this hop to establish the route
|
||||||
|
}
|
||||||
|
nextHop @2 :RouteHopData; # optional: If this the end of a private route, this field will not exist
|
||||||
|
# if this is a safety route routehop, this field is not optional and must exist
|
||||||
}
|
}
|
||||||
|
|
||||||
struct PrivateRoute {
|
struct PrivateRoute @0x8a83fccb0851e776 {
|
||||||
publicKey @0 :RoutePublicKey; # private route public key (unique per private route)
|
publicKey @0 :RoutePublicKey; # private route public key (unique per private route)
|
||||||
hopCount @1 :UInt8; # Count of hops left in the private route
|
hopCount @1 :UInt8; # Count of hops left in the private route (for timeout calculation purposes only)
|
||||||
firstHop @2 :RouteHop; # Optional: first hop in the private route
|
hops :union {
|
||||||
}
|
firstHop @2 :RouteHop; # first hop of a private route is unencrypted (hopcount > 0)
|
||||||
|
data @3 :RouteHopData; # private route has more hops (hopcount > 0 && hopcount < total_hopcount)
|
||||||
|
empty @4 :Void; # private route has ended (hopcount = 0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
struct SafetyRoute {
|
struct SafetyRoute @0xf554734d07cb5d59 {
|
||||||
publicKey @0 :RoutePublicKey; # safety route public key (unique per safety route)
|
publicKey @0 :RoutePublicKey; # safety route public key (unique per safety route)
|
||||||
hopCount @1 :UInt8; # Count of hops left in the safety route
|
hopCount @1 :UInt8; # Count of hops left in the safety route (for timeout calculation purposes only)
|
||||||
hops :union {
|
hops :union {
|
||||||
data @2 :RouteHopData; # safety route has more hops
|
data @2 :RouteHopData; # safety route has more hops
|
||||||
private @3 :PrivateRoute; # safety route has ended and private route follows
|
private @3 :PrivateRoute; # safety route has ended and private route follows
|
||||||
@ -157,7 +154,7 @@ struct SafetyRoute {
|
|||||||
|
|
||||||
using ValueSeqNum = UInt32; # sequence numbers for values
|
using ValueSeqNum = UInt32; # sequence numbers for values
|
||||||
|
|
||||||
struct ValueKey {
|
struct ValueKey @0xe64b0992c21a0736 {
|
||||||
publicKey @0 :ValueID; # the location of the value
|
publicKey @0 :ValueID; # the location of the value
|
||||||
subkey @1 :Text; # the name of the subkey (or empty if the whole key)
|
subkey @1 :Text; # the name of the subkey (or empty if the whole key)
|
||||||
}
|
}
|
||||||
@ -167,21 +164,22 @@ struct ValueKey {
|
|||||||
# seq @1 :ValueSeqNum; # the sequence number of the value subkey
|
# seq @1 :ValueSeqNum; # the sequence number of the value subkey
|
||||||
# }
|
# }
|
||||||
|
|
||||||
struct ValueData {
|
struct ValueData @0xb4b7416f169f2a3d {
|
||||||
data @0 :Data; # value or subvalue contents in CBOR format
|
data @0 :Data; # value or subvalue contents
|
||||||
seq @1 :ValueSeqNum; # sequence number of value
|
seq @1 :ValueSeqNum; # sequence number of value
|
||||||
}
|
}
|
||||||
|
|
||||||
# Operations
|
# Operations
|
||||||
##############################
|
##############################
|
||||||
|
|
||||||
enum NetworkClass {
|
enum NetworkClass @0x8cebfc2a6230717f {
|
||||||
inboundCapable @0; # I = Inbound capable without relay, may require signal
|
invalid @0; # X = Invalid network class, network is not yet set up
|
||||||
outboundOnly @1; # O = Outbound only, inbound relay required except with reverse connect signal
|
inboundCapable @1; # I = Inbound capable without relay, may require signal
|
||||||
webApp @2; # W = PWA, outbound relay is required in most cases
|
outboundOnly @2; # O = Outbound only, inbound relay required except with reverse connect signal
|
||||||
|
webApp @3; # W = PWA, outbound relay is required in most cases
|
||||||
}
|
}
|
||||||
|
|
||||||
enum DialInfoClass {
|
enum DialInfoClass @0x880005edfdd38b1e {
|
||||||
direct @0; # D = Directly reachable with public IP and no firewall, with statically configured port
|
direct @0; # D = Directly reachable with public IP and no firewall, with statically configured port
|
||||||
mapped @1; # M = Directly reachable with via portmap behind any NAT or firewalled with dynamically negotiated port
|
mapped @1; # M = Directly reachable with via portmap behind any NAT or firewalled with dynamically negotiated port
|
||||||
fullConeNAT @2; # F = Directly reachable device without portmap behind full-cone NAT
|
fullConeNAT @2; # F = Directly reachable device without portmap behind full-cone NAT
|
||||||
@ -190,12 +188,12 @@ enum DialInfoClass {
|
|||||||
portRestrictedNAT @5; # P = Device without portmap behind address-and-port restricted NAT
|
portRestrictedNAT @5; # P = Device without portmap behind address-and-port restricted NAT
|
||||||
}
|
}
|
||||||
|
|
||||||
struct DialInfoDetail {
|
struct DialInfoDetail @0x96423aa1d67b74d8 {
|
||||||
dialInfo @0 :DialInfo;
|
dialInfo @0 :DialInfo;
|
||||||
class @1 :DialInfoClass;
|
class @1 :DialInfoClass;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct PublicInternetNodeStatus {
|
struct PublicInternetNodeStatus @0x9c9d7f1f12eb088f {
|
||||||
willRoute @0 :Bool;
|
willRoute @0 :Bool;
|
||||||
willTunnel @1 :Bool;
|
willTunnel @1 :Bool;
|
||||||
willSignal @2 :Bool;
|
willSignal @2 :Bool;
|
||||||
@ -203,225 +201,253 @@ struct PublicInternetNodeStatus {
|
|||||||
willValidateDialInfo @4 :Bool;
|
willValidateDialInfo @4 :Bool;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct LocalNetworkNodeStatus {
|
struct LocalNetworkNodeStatus @0x957f5bfed2d0b5a5 {
|
||||||
willRelay @0 :Bool;
|
willRelay @0 :Bool;
|
||||||
willValidateDialInfo @1 :Bool;
|
willValidateDialInfo @1 :Bool;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct NodeStatus {
|
struct NodeStatus @0xd36b9e7a3bf3330d {
|
||||||
union {
|
union {
|
||||||
publicInternet @0 :PublicInternetNodeStatus;
|
publicInternet @0 :PublicInternetNodeStatus;
|
||||||
localNetwork @1 :LocalNetworkNodeStatus;
|
localNetwork @1 :LocalNetworkNodeStatus;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ProtocolTypeSet {
|
struct ProtocolTypeSet @0x82f12f55a1b73326 {
|
||||||
udp @0 :Bool;
|
udp @0 :Bool;
|
||||||
tcp @1 :Bool;
|
tcp @1 :Bool;
|
||||||
ws @2 :Bool;
|
ws @2 :Bool;
|
||||||
wss @3 :Bool;
|
wss @3 :Bool;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct AddressTypeSet {
|
struct AddressTypeSet @0x9f52d5430d349e6b {
|
||||||
ipv4 @0 :Bool;
|
ipv4 @0 :Bool;
|
||||||
ipv6 @1 :Bool;
|
ipv6 @1 :Bool;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct NodeInfo {
|
struct SenderInfo @0x8a4464fab4b1d101 {
|
||||||
|
socketAddress @0 :SocketAddress; # socket address that for the sending peer
|
||||||
|
}
|
||||||
|
|
||||||
|
struct NodeInfo @0xe125d847e3f9f419 {
|
||||||
networkClass @0 :NetworkClass; # network class of this node
|
networkClass @0 :NetworkClass; # network class of this node
|
||||||
outboundProtocols @1 :ProtocolTypeSet; # protocols that can go outbound
|
outboundProtocols @1 :ProtocolTypeSet; # protocols that can go outbound
|
||||||
addressTypes @2 :AddressTypeSet; # address types supported
|
addressTypes @2 :AddressTypeSet; # address types supported
|
||||||
minVersion @3 :UInt8; # minimum protocol version for rpc
|
minVersion @3 :UInt8; # minimum protocol version for rpc
|
||||||
maxVersion @4 :UInt8; # maximum protocol version for rpc
|
maxVersion @4 :UInt8; # maximum protocol version for rpc
|
||||||
dialInfoDetailList @5 :List(DialInfoDetail); # inbound dial info details for this node
|
dialInfoDetailList @5 :List(DialInfoDetail); # inbound dial info details for this node
|
||||||
relayPeerInfo @6 :PeerInfo; # (optional) relay peer info for this node
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct SignedNodeInfo {
|
struct SignedDirectNodeInfo @0xe0e7ea3e893a3dd7 {
|
||||||
nodeInfo @0 :NodeInfo; # node info
|
nodeInfo @0 :NodeInfo; # node info
|
||||||
signature @1 :Signature; # signature
|
timestamp @1 :UInt64; # when signed node info was generated
|
||||||
timestamp @2 :UInt64; # when signed node info was generated
|
signature @2 :Signature; # signature
|
||||||
}
|
}
|
||||||
|
|
||||||
struct SenderInfo {
|
struct SignedRelayedNodeInfo @0xb39e8428ccd87cbb {
|
||||||
socketAddress @0 :SocketAddress; # socket address was available for peer
|
nodeInfo @0 :NodeInfo; # node info
|
||||||
|
relayId @1 :NodeID; # node id for relay
|
||||||
|
relayInfo @2 :SignedDirectNodeInfo; # signed node info for relay
|
||||||
|
timestamp @3 :UInt64; # when signed node info was generated
|
||||||
|
signature @4 :Signature; # signature
|
||||||
}
|
}
|
||||||
|
|
||||||
struct PeerInfo {
|
struct SignedNodeInfo @0xd2478ce5f593406a {
|
||||||
|
union {
|
||||||
|
direct @0 :SignedDirectNodeInfo; # node info for nodes reachable without a relay
|
||||||
|
relayed @1 :SignedRelayedNodeInfo; # node info for nodes requiring a relay
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct PeerInfo @0xfe2d722d5d3c4bcb {
|
||||||
nodeId @0 :NodeID; # node id for 'closer peer'
|
nodeId @0 :NodeID; # node id for 'closer peer'
|
||||||
signedNodeInfo @1 :SignedNodeInfo; # signed node info for 'closer peer'
|
signedNodeInfo @1 :SignedNodeInfo; # signed node info for 'closer peer'
|
||||||
}
|
}
|
||||||
|
|
||||||
struct RoutedOperation {
|
struct RoutedOperation @0xcbcb8535b839e9dd {
|
||||||
signatures @0 :List(Signature); # signatures from nodes that have handled the private route
|
version @0 :UInt8; # crypto version in use for the data
|
||||||
nonce @1 :Nonce; # nonce Xmsg
|
signatures @1 :List(Signature); # signatures from nodes that have handled the private route
|
||||||
data @2 :Data; # Operation encrypted with ENC(Xmsg,DH(PKapr,SKbsr))
|
nonce @2 :Nonce; # nonce Xmsg
|
||||||
|
data @3 :Data; # operation encrypted with ENC(Xmsg,DH(PKapr,SKbsr))
|
||||||
}
|
}
|
||||||
|
|
||||||
struct OperationStatusQ {
|
struct OperationStatusQ @0x865d80cea70d884a {
|
||||||
nodeStatus @0 :NodeStatus; # node status update about the statusq sender
|
nodeStatus @0 :NodeStatus; # Optional: node status update about the statusq sender
|
||||||
}
|
}
|
||||||
|
|
||||||
struct OperationStatusA {
|
struct OperationStatusA @0xb306f407fa812a55 {
|
||||||
nodeStatus @0 :NodeStatus; # returned node status
|
nodeStatus @0 :NodeStatus; # Optional: returned node status
|
||||||
senderInfo @1 :SenderInfo; # info about StatusQ sender from the perspective of the replier
|
senderInfo @1 :SenderInfo; # Optional: info about StatusQ sender from the perspective of the replier
|
||||||
}
|
}
|
||||||
|
|
||||||
struct OperationValidateDialInfo {
|
struct OperationValidateDialInfo @0xbc716ad7d5d060c8 {
|
||||||
dialInfo @0 :DialInfo; # dial info to use for the receipt
|
dialInfo @0 :DialInfo; # dial info to use for the receipt
|
||||||
receipt @1 :Data; # receipt to return to dial info to prove it is reachable
|
receipt @1 :Data; # receipt to return to dial info to prove it is reachable
|
||||||
redirect @2 :Bool; # request a different node do the validate
|
redirect @2 :Bool; # request a different node do the validate
|
||||||
}
|
}
|
||||||
|
|
||||||
struct OperationReturnReceipt {
|
struct OperationReturnReceipt @0xeb0fb5b5a9160eeb {
|
||||||
receipt @0 :Data; # receipt being returned to its origin
|
receipt @0 :Data; # receipt being returned to its origin
|
||||||
}
|
}
|
||||||
|
|
||||||
struct OperationFindNodeQ {
|
struct OperationFindNodeQ @0xfdef788fe9623bcd {
|
||||||
nodeId @0 :NodeID; # node id to locate
|
nodeId @0 :NodeID; # node id to locate
|
||||||
}
|
}
|
||||||
|
|
||||||
struct OperationFindNodeA {
|
struct OperationFindNodeA @0xa84cf2fb40c77089 {
|
||||||
peers @0 :List(PeerInfo); # returned 'closer peer' information
|
peers @0 :List(PeerInfo); # returned 'closer peer' information
|
||||||
}
|
}
|
||||||
|
|
||||||
struct OperationRoute {
|
struct OperationRoute @0x96741859ce6ac7dd {
|
||||||
safetyRoute @0 :SafetyRoute; # Where this should go
|
safetyRoute @0 :SafetyRoute; # Where this should go
|
||||||
operation @1 :RoutedOperation; # The operation to be routed
|
operation @1 :RoutedOperation; # The operation to be routed
|
||||||
}
|
}
|
||||||
|
|
||||||
struct OperationNodeInfoUpdate {
|
struct OperationNodeInfoUpdate @0xc9647b32a48b66ce {
|
||||||
signedNodeInfo @0 :SignedNodeInfo; # Our signed node info
|
signedNodeInfo @0 :SignedNodeInfo; # Our signed node info
|
||||||
}
|
}
|
||||||
|
|
||||||
struct OperationGetValueQ {
|
|
||||||
|
struct OperationAppCallQ @0xade67b9f09784507 {
|
||||||
|
message @0 :Data; # Opaque request to application
|
||||||
|
}
|
||||||
|
|
||||||
|
struct OperationAppCallA @0xf7c797ac85f214b8 {
|
||||||
|
message @0 :Data; # Opaque response from application
|
||||||
|
}
|
||||||
|
|
||||||
|
struct OperationAppMessage @0x9baf542d81b411f5 {
|
||||||
|
message @0 :Data; # Opaque message to application
|
||||||
|
}
|
||||||
|
|
||||||
|
struct OperationGetValueQ @0xf88a5b6da5eda5d0 {
|
||||||
key @0 :ValueKey; # key for value to get
|
key @0 :ValueKey; # key for value to get
|
||||||
}
|
}
|
||||||
|
|
||||||
struct OperationGetValueA {
|
struct OperationGetValueA @0xd896bb46f2e0249f {
|
||||||
union {
|
union {
|
||||||
data @0 :ValueData; # the value if successful
|
data @0 :ValueData; # the value if successful
|
||||||
peers @1 :List(PeerInfo); # returned 'closer peer' information if not successful
|
peers @1 :List(PeerInfo); # returned 'closer peer' information if not successful
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct OperationSetValueQ {
|
struct OperationSetValueQ @0xbac06191ff8bdbc5 {
|
||||||
key @0 :ValueKey; # key for value to update
|
key @0 :ValueKey; # key for value to update
|
||||||
value @1 :ValueData; # value or subvalue contents in CBOR format (older or equal seq number gets dropped)
|
value @1 :ValueData; # value or subvalue contents (older or equal seq number gets dropped)
|
||||||
}
|
}
|
||||||
|
|
||||||
struct OperationSetValueA {
|
struct OperationSetValueA @0x9378d0732dc95be2 {
|
||||||
union {
|
union {
|
||||||
data @0 :ValueData; # the new value if successful, may be a different value than what was set if the seq number was lower or equal
|
data @0 :ValueData; # the new value if successful, may be a different value than what was set if the seq number was lower or equal
|
||||||
peers @1 :List(PeerInfo); # returned 'closer peer' information if not successful
|
peers @1 :List(PeerInfo); # returned 'closer peer' information if not successful
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct OperationWatchValueQ {
|
struct OperationWatchValueQ @0xf9a5a6c547b9b228 {
|
||||||
key @0 :ValueKey; # key for value to watch
|
key @0 :ValueKey; # key for value to watch
|
||||||
}
|
}
|
||||||
|
|
||||||
struct OperationWatchValueA {
|
struct OperationWatchValueA @0xa726cab7064ba893 {
|
||||||
expiration @0 :UInt64; # timestamp when this watch will expire in usec since epoch (0 if watch failed)
|
expiration @0 :UInt64; # timestamp when this watch will expire in usec since epoch (0 if watch failed)
|
||||||
peers @1 :List(PeerInfo); # returned list of other nodes to ask that could propagate watches
|
peers @1 :List(PeerInfo); # returned list of other nodes to ask that could propagate watches
|
||||||
}
|
}
|
||||||
|
|
||||||
struct OperationValueChanged {
|
struct OperationValueChanged @0xd1c59ebdd8cc1bf6 {
|
||||||
key @0 :ValueKey; # key for value that changed
|
key @0 :ValueKey; # key for value that changed
|
||||||
value @1 :ValueData; # value or subvalue contents in CBOR format with sequence number
|
value @1 :ValueData; # value or subvalue contents with sequence number
|
||||||
}
|
}
|
||||||
|
|
||||||
struct OperationSupplyBlockQ {
|
struct OperationSupplyBlockQ @0xadbf4c542d749971 {
|
||||||
blockId @0 :BlockID; # hash of the block we can supply
|
blockId @0 :BlockID; # hash of the block we can supply
|
||||||
}
|
}
|
||||||
|
|
||||||
struct OperationSupplyBlockA {
|
struct OperationSupplyBlockA @0xf003822e83b5c0d7 {
|
||||||
union {
|
union {
|
||||||
expiration @0 :UInt64; # when the block supplier entry will need to be refreshed
|
expiration @0 :UInt64; # when the block supplier entry will need to be refreshed
|
||||||
peers @1 :List(PeerInfo); # returned 'closer peer' information if not successful
|
peers @1 :List(PeerInfo); # returned 'closer peer' information if not successful
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct OperationFindBlockQ {
|
struct OperationFindBlockQ @0xaf4353ff004c7156 {
|
||||||
blockId @0 :BlockID; # hash of the block to locate
|
blockId @0 :BlockID; # hash of the block to locate
|
||||||
}
|
}
|
||||||
|
|
||||||
struct OperationFindBlockA {
|
struct OperationFindBlockA @0xc51455bc4915465d {
|
||||||
data @0 :Data; # Optional: the actual block data if we have that block ourselves
|
data @0 :Data; # Optional: the actual block data if we have that block ourselves
|
||||||
# null if we don't have a block to return
|
# null if we don't have a block to return
|
||||||
suppliers @1 :List(PeerInfo); # returned list of suppliers if we have them
|
suppliers @1 :List(PeerInfo); # returned list of suppliers if we have them
|
||||||
peers @2 :List(PeerInfo); # returned 'closer peer' information
|
peers @2 :List(PeerInfo); # returned 'closer peer' information
|
||||||
}
|
}
|
||||||
|
|
||||||
struct OperationSignal {
|
struct OperationSignal @0xd4f94f2a5d207e49 {
|
||||||
union {
|
union {
|
||||||
holePunch @0 :SignalInfoHolePunch;
|
holePunch @0 :SignalInfoHolePunch;
|
||||||
reverseConnect @1 :SignalInfoReverseConnect;
|
reverseConnect @1 :SignalInfoReverseConnect;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
enum TunnelEndpointMode {
|
enum TunnelEndpointMode @0xef06f4c29beb7458 {
|
||||||
raw @0; # raw tunnel
|
raw @0; # raw tunnel
|
||||||
turn @1; # turn tunnel
|
turn @1; # turn tunnel
|
||||||
}
|
}
|
||||||
|
|
||||||
enum TunnelError {
|
enum TunnelError @0xb82c6bfb1ec38c7c {
|
||||||
badId @0; # Tunnel ID was rejected
|
badId @0; # Tunnel ID was rejected
|
||||||
noEndpoint @1; # Endpoint was unreachable
|
noEndpoint @1; # Endpoint was unreachable
|
||||||
rejectedMode @2; # Endpoint couldn't provide mode
|
rejectedMode @2; # Endpoint couldn't provide mode
|
||||||
noCapacity @3; # Endpoint is full
|
noCapacity @3; # Endpoint is full
|
||||||
}
|
}
|
||||||
|
|
||||||
struct TunnelEndpoint {
|
struct TunnelEndpoint @0xc2602aa983cc337d {
|
||||||
mode @0 :TunnelEndpointMode; # what kind of endpoint this is
|
mode @0 :TunnelEndpointMode; # what kind of endpoint this is
|
||||||
description @1 :Text; # endpoint description (TODO)
|
description @1 :Text; # endpoint description (TODO)
|
||||||
}
|
}
|
||||||
|
|
||||||
struct FullTunnel {
|
struct FullTunnel @0x9821c3dc75373f63 {
|
||||||
id @0 :TunnelID; # tunnel id to use everywhere
|
id @0 :TunnelID; # tunnel id to use everywhere
|
||||||
timeout @1 :UInt64; # duration from last data when this expires if no data is sent or received
|
timeout @1 :UInt64; # duration from last data when this expires if no data is sent or received
|
||||||
local @2 :TunnelEndpoint; # local endpoint
|
local @2 :TunnelEndpoint; # local endpoint
|
||||||
remote @3 :TunnelEndpoint; # remote endpoint
|
remote @3 :TunnelEndpoint; # remote endpoint
|
||||||
}
|
}
|
||||||
|
|
||||||
struct PartialTunnel {
|
struct PartialTunnel @0x827a7ebc02be2fc8 {
|
||||||
id @0 :TunnelID; # tunnel id to use everywhere
|
id @0 :TunnelID; # tunnel id to use everywhere
|
||||||
timeout @1 :UInt64; # timestamp when this expires if not completed
|
timeout @1 :UInt64; # timestamp when this expires if not completed
|
||||||
local @2 :TunnelEndpoint; # local endpoint
|
local @2 :TunnelEndpoint; # local endpoint
|
||||||
}
|
}
|
||||||
|
|
||||||
struct OperationStartTunnelQ {
|
struct OperationStartTunnelQ @0xa9c49afce44187af {
|
||||||
id @0 :TunnelID; # tunnel id to use everywhere
|
id @0 :TunnelID; # tunnel id to use everywhere
|
||||||
localMode @1 :TunnelEndpointMode; # what kind of local endpoint mode is being requested
|
localMode @1 :TunnelEndpointMode; # what kind of local endpoint mode is being requested
|
||||||
depth @2 :UInt8; # the number of nodes in the tunnel
|
depth @2 :UInt8; # the number of nodes in the tunnel
|
||||||
}
|
}
|
||||||
|
|
||||||
struct OperationStartTunnelA {
|
struct OperationStartTunnelA @0x818162e4cc61bf1e {
|
||||||
union {
|
union {
|
||||||
partial @0 :PartialTunnel; # the first half of the tunnel
|
partial @0 :PartialTunnel; # the first half of the tunnel
|
||||||
error @1 :TunnelError; # if we didn't start the tunnel, why not
|
error @1 :TunnelError; # if we didn't start the tunnel, why not
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct OperationCompleteTunnelQ {
|
struct OperationCompleteTunnelQ @0xe978594588eb950b {
|
||||||
id @0 :TunnelID; # tunnel id to use everywhere
|
id @0 :TunnelID; # tunnel id to use everywhere
|
||||||
localMode @1 :TunnelEndpointMode; # what kind of local endpoint mode is being requested
|
localMode @1 :TunnelEndpointMode; # what kind of local endpoint mode is being requested
|
||||||
depth @2 :UInt8; # the number of nodes in the tunnel
|
depth @2 :UInt8; # the number of nodes in the tunnel
|
||||||
endpoint @3 :TunnelEndpoint; # the remote endpoint to complete
|
endpoint @3 :TunnelEndpoint; # the remote endpoint to complete
|
||||||
}
|
}
|
||||||
|
|
||||||
struct OperationCompleteTunnelA {
|
struct OperationCompleteTunnelA @0x84090791bb765f2a {
|
||||||
union {
|
union {
|
||||||
tunnel @0 :FullTunnel; # the tunnel description
|
tunnel @0 :FullTunnel; # the tunnel description
|
||||||
error @1 :TunnelError; # if we didn't complete the tunnel, why not
|
error @1 :TunnelError; # if we didn't complete the tunnel, why not
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct OperationCancelTunnelQ {
|
struct OperationCancelTunnelQ @0xae2811ae0a003738 {
|
||||||
id @0 :TunnelID; # the tunnel id to cancel
|
id @0 :TunnelID; # the tunnel id to cancel
|
||||||
}
|
}
|
||||||
|
|
||||||
struct OperationCancelTunnelA {
|
struct OperationCancelTunnelA @0xbba23c992eff97bc {
|
||||||
union {
|
union {
|
||||||
tunnel @0 :TunnelID; # the tunnel id that was cancelled
|
tunnel @0 :TunnelID; # the tunnel id that was cancelled
|
||||||
error @1 :TunnelError; # if we couldn't cancel, why not
|
error @1 :TunnelError; # if we couldn't cancel, why not
|
||||||
@ -429,7 +455,7 @@ struct OperationCancelTunnelA {
|
|||||||
}
|
}
|
||||||
|
|
||||||
# Things that want an answer
|
# Things that want an answer
|
||||||
struct Question {
|
struct Question @0xd8510bc33492ef70 {
|
||||||
respondTo :union {
|
respondTo :union {
|
||||||
sender @0 :Void; # sender
|
sender @0 :Void; # sender
|
||||||
privateRoute @1 :PrivateRoute; # embedded private route to be used for reply
|
privateRoute @1 :PrivateRoute; # embedded private route to be used for reply
|
||||||
@ -445,16 +471,17 @@ struct Question {
|
|||||||
watchValueQ @6 :OperationWatchValueQ;
|
watchValueQ @6 :OperationWatchValueQ;
|
||||||
supplyBlockQ @7 :OperationSupplyBlockQ;
|
supplyBlockQ @7 :OperationSupplyBlockQ;
|
||||||
findBlockQ @8 :OperationFindBlockQ;
|
findBlockQ @8 :OperationFindBlockQ;
|
||||||
|
appCallQ @9 :OperationAppCallQ;
|
||||||
|
|
||||||
# Tunnel operations
|
# Tunnel operations
|
||||||
startTunnelQ @9 :OperationStartTunnelQ;
|
startTunnelQ @10 :OperationStartTunnelQ;
|
||||||
completeTunnelQ @10 :OperationCompleteTunnelQ;
|
completeTunnelQ @11 :OperationCompleteTunnelQ;
|
||||||
cancelTunnelQ @11 :OperationCancelTunnelQ;
|
cancelTunnelQ @12 :OperationCancelTunnelQ;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# Things that don't want an answer
|
# Things that don't want an answer
|
||||||
struct Statement {
|
struct Statement @0x990e20828f404ae1 {
|
||||||
detail :union {
|
detail :union {
|
||||||
# Direct operations
|
# Direct operations
|
||||||
validateDialInfo @0 :OperationValidateDialInfo;
|
validateDialInfo @0 :OperationValidateDialInfo;
|
||||||
@ -465,11 +492,12 @@ struct Statement {
|
|||||||
valueChanged @3 :OperationValueChanged;
|
valueChanged @3 :OperationValueChanged;
|
||||||
signal @4 :OperationSignal;
|
signal @4 :OperationSignal;
|
||||||
returnReceipt @5 :OperationReturnReceipt;
|
returnReceipt @5 :OperationReturnReceipt;
|
||||||
|
appMessage @6 :OperationAppMessage;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# Things that are answers
|
# Things that are answers
|
||||||
struct Answer {
|
struct Answer @0xacacb8b6988c1058 {
|
||||||
detail :union {
|
detail :union {
|
||||||
# Direct operations
|
# Direct operations
|
||||||
statusA @0 :OperationStatusA;
|
statusA @0 :OperationStatusA;
|
||||||
@ -480,16 +508,17 @@ struct Answer {
|
|||||||
setValueA @3 :OperationSetValueA;
|
setValueA @3 :OperationSetValueA;
|
||||||
watchValueA @4 :OperationWatchValueA;
|
watchValueA @4 :OperationWatchValueA;
|
||||||
supplyBlockA @5 :OperationSupplyBlockA;
|
supplyBlockA @5 :OperationSupplyBlockA;
|
||||||
findBlockA @6 :OperationFindBlockA;
|
findBlockA @6 :OperationFindBlockA;
|
||||||
|
appCallA @7 :OperationAppCallA;
|
||||||
|
|
||||||
# Tunnel operations
|
# Tunnel operations
|
||||||
startTunnelA @7 :OperationStartTunnelA;
|
startTunnelA @8 :OperationStartTunnelA;
|
||||||
completeTunnelA @8 :OperationCompleteTunnelA;
|
completeTunnelA @9 :OperationCompleteTunnelA;
|
||||||
cancelTunnelA @9 :OperationCancelTunnelA;
|
cancelTunnelA @10 :OperationCancelTunnelA;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct Operation {
|
struct Operation @0xbf2811c435403c3b {
|
||||||
opId @0 :UInt64; # Random RPC ID. Must be random to foil reply forgery attacks.
|
opId @0 :UInt64; # Random RPC ID. Must be random to foil reply forgery attacks.
|
||||||
senderNodeInfo @1 :SignedNodeInfo; # (optional) SignedNodeInfo for the sender to be cached by the receiver.
|
senderNodeInfo @1 :SignedNodeInfo; # (optional) SignedNodeInfo for the sender to be cached by the receiver.
|
||||||
kind :union {
|
kind :union {
|
||||||
|
@ -103,7 +103,7 @@ impl<S: Subscriber + for<'a> registry::LookupSpan<'a>> Layer<S> for ApiTracingLa
|
|||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
(inner.update_callback)(VeilidUpdate::Log(VeilidStateLog {
|
(inner.update_callback)(VeilidUpdate::Log(VeilidLog {
|
||||||
log_level,
|
log_level,
|
||||||
message,
|
message,
|
||||||
backtrace,
|
backtrace,
|
||||||
|
@ -1,15 +1,16 @@
|
|||||||
use crate::callback_state_machine::*;
|
use crate::callback_state_machine::*;
|
||||||
use crate::dht::Crypto;
|
use crate::crypto::Crypto;
|
||||||
use crate::network_manager::*;
|
use crate::network_manager::*;
|
||||||
use crate::routing_table::*;
|
use crate::routing_table::*;
|
||||||
use crate::xx::*;
|
use crate::xx::*;
|
||||||
use crate::*;
|
use crate::*;
|
||||||
use core::convert::TryFrom;
|
use core::convert::TryFrom;
|
||||||
use core::fmt;
|
use core::fmt;
|
||||||
|
use rkyv::{Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize};
|
||||||
use serde::*;
|
use serde::*;
|
||||||
|
|
||||||
state_machine! {
|
state_machine! {
|
||||||
derive(Debug, PartialEq, Eq, Clone, Copy, Serialize, Deserialize)
|
derive(Debug, PartialEq, Eq, Clone, Copy, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize,)
|
||||||
pub Attachment(Detached)
|
pub Attachment(Detached)
|
||||||
//---
|
//---
|
||||||
Detached(AttachRequested) => Attaching [StartAttachment],
|
Detached(AttachRequested) => Attaching [StartAttachment],
|
||||||
@ -102,48 +103,77 @@ impl TryFrom<String> for AttachmentState {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub struct AttachmentManagerInner {
|
pub struct AttachmentManagerInner {
|
||||||
config: VeilidConfig,
|
|
||||||
attachment_machine: CallbackStateMachine<Attachment>,
|
attachment_machine: CallbackStateMachine<Attachment>,
|
||||||
network_manager: NetworkManager,
|
|
||||||
maintain_peers: bool,
|
maintain_peers: bool,
|
||||||
attach_timestamp: Option<u64>,
|
attach_timestamp: Option<u64>,
|
||||||
update_callback: Option<UpdateCallback>,
|
update_callback: Option<UpdateCallback>,
|
||||||
attachment_maintainer_jh: Option<MustJoinHandle<()>>,
|
attachment_maintainer_jh: Option<MustJoinHandle<()>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub struct AttachmentManagerUnlockedInner {
|
||||||
|
config: VeilidConfig,
|
||||||
|
network_manager: NetworkManager,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct AttachmentManager {
|
pub struct AttachmentManager {
|
||||||
inner: Arc<Mutex<AttachmentManagerInner>>,
|
inner: Arc<Mutex<AttachmentManagerInner>>,
|
||||||
|
unlocked_inner: Arc<AttachmentManagerUnlockedInner>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AttachmentManager {
|
impl AttachmentManager {
|
||||||
fn new_inner(
|
fn new_unlocked_inner(
|
||||||
config: VeilidConfig,
|
config: VeilidConfig,
|
||||||
|
protected_store: ProtectedStore,
|
||||||
table_store: TableStore,
|
table_store: TableStore,
|
||||||
|
block_store: BlockStore,
|
||||||
crypto: Crypto,
|
crypto: Crypto,
|
||||||
) -> AttachmentManagerInner {
|
) -> AttachmentManagerUnlockedInner {
|
||||||
AttachmentManagerInner {
|
AttachmentManagerUnlockedInner {
|
||||||
config: config.clone(),
|
config: config.clone(),
|
||||||
|
network_manager: NetworkManager::new(
|
||||||
|
config,
|
||||||
|
protected_store,
|
||||||
|
table_store,
|
||||||
|
block_store,
|
||||||
|
crypto,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fn new_inner() -> AttachmentManagerInner {
|
||||||
|
AttachmentManagerInner {
|
||||||
attachment_machine: CallbackStateMachine::new(),
|
attachment_machine: CallbackStateMachine::new(),
|
||||||
network_manager: NetworkManager::new(config, table_store, crypto),
|
|
||||||
maintain_peers: false,
|
maintain_peers: false,
|
||||||
attach_timestamp: None,
|
attach_timestamp: None,
|
||||||
update_callback: None,
|
update_callback: None,
|
||||||
attachment_maintainer_jh: None,
|
attachment_maintainer_jh: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub fn new(config: VeilidConfig, table_store: TableStore, crypto: Crypto) -> Self {
|
pub fn new(
|
||||||
|
config: VeilidConfig,
|
||||||
|
protected_store: ProtectedStore,
|
||||||
|
table_store: TableStore,
|
||||||
|
block_store: BlockStore,
|
||||||
|
crypto: Crypto,
|
||||||
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
inner: Arc::new(Mutex::new(Self::new_inner(config, table_store, crypto))),
|
inner: Arc::new(Mutex::new(Self::new_inner())),
|
||||||
|
unlocked_inner: Arc::new(Self::new_unlocked_inner(
|
||||||
|
config,
|
||||||
|
protected_store,
|
||||||
|
table_store,
|
||||||
|
block_store,
|
||||||
|
crypto,
|
||||||
|
)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn config(&self) -> VeilidConfig {
|
pub fn config(&self) -> VeilidConfig {
|
||||||
self.inner.lock().config.clone()
|
self.unlocked_inner.config.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn network_manager(&self) -> NetworkManager {
|
pub fn network_manager(&self) -> NetworkManager {
|
||||||
self.inner.lock().network_manager.clone()
|
self.unlocked_inner.network_manager.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_attached(&self) -> bool {
|
pub fn is_attached(&self) -> bool {
|
||||||
@ -202,9 +232,10 @@ impl AttachmentManager {
|
|||||||
AttachmentManager::translate_attachment_state(&inner.attachment_machine.state());
|
AttachmentManager::translate_attachment_state(&inner.attachment_machine.state());
|
||||||
|
|
||||||
// get reliable peer count from routing table
|
// get reliable peer count from routing table
|
||||||
let routing_table = inner.network_manager.routing_table();
|
let routing_table = self.network_manager().routing_table();
|
||||||
let health = routing_table.get_routing_table_health();
|
let health = routing_table.get_routing_table_health();
|
||||||
let routing_table_config = &inner.config.get().network.routing_table;
|
let config = self.config();
|
||||||
|
let routing_table_config = &config.get().network.routing_table;
|
||||||
|
|
||||||
let new_peer_state_input =
|
let new_peer_state_input =
|
||||||
AttachmentManager::translate_routing_table_health(health, routing_table_config);
|
AttachmentManager::translate_routing_table_health(health, routing_table_config);
|
||||||
@ -223,11 +254,8 @@ impl AttachmentManager {
|
|||||||
#[instrument(level = "debug", skip(self))]
|
#[instrument(level = "debug", skip(self))]
|
||||||
async fn attachment_maintainer(self) {
|
async fn attachment_maintainer(self) {
|
||||||
debug!("attachment starting");
|
debug!("attachment starting");
|
||||||
let netman = {
|
self.inner.lock().attach_timestamp = Some(intf::get_timestamp());
|
||||||
let mut inner = self.inner.lock();
|
let netman = self.network_manager();
|
||||||
inner.attach_timestamp = Some(intf::get_timestamp());
|
|
||||||
inner.network_manager.clone()
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut restart;
|
let mut restart;
|
||||||
loop {
|
loop {
|
||||||
@ -286,7 +314,7 @@ impl AttachmentManager {
|
|||||||
#[instrument(level = "debug", skip_all, err)]
|
#[instrument(level = "debug", skip_all, err)]
|
||||||
pub async fn init(&self, update_callback: UpdateCallback) -> EyreResult<()> {
|
pub async fn init(&self, update_callback: UpdateCallback) -> EyreResult<()> {
|
||||||
trace!("init");
|
trace!("init");
|
||||||
let network_manager = {
|
{
|
||||||
let mut inner = self.inner.lock();
|
let mut inner = self.inner.lock();
|
||||||
inner.update_callback = Some(update_callback.clone());
|
inner.update_callback = Some(update_callback.clone());
|
||||||
let update_callback2 = update_callback.clone();
|
let update_callback2 = update_callback.clone();
|
||||||
@ -297,10 +325,9 @@ impl AttachmentManager {
|
|||||||
}))
|
}))
|
||||||
},
|
},
|
||||||
));
|
));
|
||||||
inner.network_manager.clone()
|
|
||||||
};
|
};
|
||||||
|
|
||||||
network_manager.init(update_callback).await?;
|
self.network_manager().init(update_callback).await?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -309,30 +336,33 @@ impl AttachmentManager {
|
|||||||
pub async fn terminate(&self) {
|
pub async fn terminate(&self) {
|
||||||
// Ensure we detached
|
// Ensure we detached
|
||||||
self.detach().await;
|
self.detach().await;
|
||||||
let network_manager = {
|
self.network_manager().terminate().await;
|
||||||
let inner = self.inner.lock();
|
self.inner.lock().update_callback = None;
|
||||||
inner.network_manager.clone()
|
|
||||||
};
|
|
||||||
network_manager.terminate().await;
|
|
||||||
let mut inner = self.inner.lock();
|
|
||||||
inner.update_callback = None;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "trace", skip(self))]
|
#[instrument(level = "trace", skip(self))]
|
||||||
fn attach(&self) {
|
fn attach(&self) {
|
||||||
// Create long-running connection maintenance routine
|
// Create long-running connection maintenance routine
|
||||||
let this = self.clone();
|
let mut inner = self.inner.lock();
|
||||||
self.inner.lock().maintain_peers = true;
|
if inner.attachment_maintainer_jh.is_some() {
|
||||||
self.inner.lock().attachment_maintainer_jh =
|
return;
|
||||||
Some(intf::spawn(this.attachment_maintainer()));
|
}
|
||||||
|
inner.maintain_peers = true;
|
||||||
|
inner.attachment_maintainer_jh = Some(intf::spawn(self.clone().attachment_maintainer()));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "trace", skip(self))]
|
#[instrument(level = "trace", skip(self))]
|
||||||
async fn detach(&self) {
|
async fn detach(&self) {
|
||||||
let attachment_maintainer_jh = self.inner.lock().attachment_maintainer_jh.take();
|
let attachment_maintainer_jh = {
|
||||||
|
let mut inner = self.inner.lock();
|
||||||
|
let attachment_maintainer_jh = inner.attachment_maintainer_jh.take();
|
||||||
|
if attachment_maintainer_jh.is_some() {
|
||||||
|
// Terminate long-running connection maintenance routine
|
||||||
|
inner.maintain_peers = false;
|
||||||
|
}
|
||||||
|
attachment_maintainer_jh
|
||||||
|
};
|
||||||
if let Some(jh) = attachment_maintainer_jh {
|
if let Some(jh) = attachment_maintainer_jh {
|
||||||
// Terminate long-running connection maintenance routine
|
|
||||||
self.inner.lock().maintain_peers = false;
|
|
||||||
jh.await;
|
jh.await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
use crate::api_tracing_layer::*;
|
use crate::api_tracing_layer::*;
|
||||||
use crate::attachment_manager::*;
|
use crate::attachment_manager::*;
|
||||||
use crate::dht::Crypto;
|
use crate::crypto::Crypto;
|
||||||
use crate::veilid_api::*;
|
use crate::veilid_api::*;
|
||||||
use crate::veilid_config::*;
|
use crate::veilid_config::*;
|
||||||
use crate::xx::*;
|
use crate::xx::*;
|
||||||
@ -103,7 +103,13 @@ impl ServicesContext {
|
|||||||
// Set up attachment manager
|
// Set up attachment manager
|
||||||
trace!("init attachment manager");
|
trace!("init attachment manager");
|
||||||
let update_callback = self.update_callback.clone();
|
let update_callback = self.update_callback.clone();
|
||||||
let attachment_manager = AttachmentManager::new(self.config.clone(), table_store, crypto);
|
let attachment_manager = AttachmentManager::new(
|
||||||
|
self.config.clone(),
|
||||||
|
protected_store,
|
||||||
|
table_store,
|
||||||
|
block_store,
|
||||||
|
crypto,
|
||||||
|
);
|
||||||
if let Err(e) = attachment_manager.init(update_callback).await {
|
if let Err(e) = attachment_manager.init(update_callback).await {
|
||||||
self.shutdown().await;
|
self.shutdown().await;
|
||||||
return Err(e);
|
return Err(e);
|
||||||
@ -171,7 +177,7 @@ impl VeilidCoreContext {
|
|||||||
// Set up config from callback
|
// Set up config from callback
|
||||||
trace!("setup config with callback");
|
trace!("setup config with callback");
|
||||||
let mut config = VeilidConfig::new();
|
let mut config = VeilidConfig::new();
|
||||||
config.setup(config_callback)?;
|
config.setup(config_callback, update_callback.clone())?;
|
||||||
|
|
||||||
Self::new_common(update_callback, config).await
|
Self::new_common(update_callback, config).await
|
||||||
}
|
}
|
||||||
@ -184,7 +190,7 @@ impl VeilidCoreContext {
|
|||||||
// Set up config from callback
|
// Set up config from callback
|
||||||
trace!("setup config with json");
|
trace!("setup config with json");
|
||||||
let mut config = VeilidConfig::new();
|
let mut config = VeilidConfig::new();
|
||||||
config.setup_from_json(config_json)?;
|
config.setup_from_json(config_json, update_callback.clone())?;
|
||||||
Self::new_common(update_callback, config).await
|
Self::new_common(update_callback, config).await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
#![allow(dead_code)]
|
#![allow(dead_code)]
|
||||||
#![allow(clippy::absurd_extreme_comparisons)]
|
#![allow(clippy::absurd_extreme_comparisons)]
|
||||||
use super::crypto::*;
|
use super::*;
|
||||||
use super::key::*;
|
use crate::routing_table::VersionRange;
|
||||||
use crate::xx::*;
|
use crate::xx::*;
|
||||||
use crate::*;
|
use crate::*;
|
||||||
use core::convert::TryInto;
|
use core::convert::TryInto;
|
||||||
@ -38,8 +38,6 @@ use core::convert::TryInto;
|
|||||||
pub const MAX_ENVELOPE_SIZE: usize = 65507;
|
pub const MAX_ENVELOPE_SIZE: usize = 65507;
|
||||||
pub const MIN_ENVELOPE_SIZE: usize = 0x6A + 0x40; // Header + Signature
|
pub const MIN_ENVELOPE_SIZE: usize = 0x6A + 0x40; // Header + Signature
|
||||||
pub const ENVELOPE_MAGIC: &[u8; 4] = b"VLID";
|
pub const ENVELOPE_MAGIC: &[u8; 4] = b"VLID";
|
||||||
pub const MIN_VERSION: u8 = 0u8;
|
|
||||||
pub const MAX_VERSION: u8 = 0u8;
|
|
||||||
pub type EnvelopeNonce = [u8; 24];
|
pub type EnvelopeNonce = [u8; 24];
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq, Default)]
|
#[derive(Debug, Clone, PartialEq, Eq, Default)]
|
||||||
@ -61,15 +59,12 @@ impl Envelope {
|
|||||||
sender_id: DHTKey,
|
sender_id: DHTKey,
|
||||||
recipient_id: DHTKey,
|
recipient_id: DHTKey,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
assert!(sender_id.valid);
|
assert!(version >= MIN_CRYPTO_VERSION);
|
||||||
assert!(recipient_id.valid);
|
assert!(version <= MAX_CRYPTO_VERSION);
|
||||||
|
|
||||||
assert!(version >= MIN_VERSION);
|
|
||||||
assert!(version <= MAX_VERSION);
|
|
||||||
Self {
|
Self {
|
||||||
version,
|
version,
|
||||||
min_version: MIN_VERSION,
|
min_version: MIN_CRYPTO_VERSION,
|
||||||
max_version: MAX_VERSION,
|
max_version: MAX_CRYPTO_VERSION,
|
||||||
timestamp,
|
timestamp,
|
||||||
nonce,
|
nonce,
|
||||||
sender_id,
|
sender_id,
|
||||||
@ -94,9 +89,9 @@ impl Envelope {
|
|||||||
|
|
||||||
// Check version
|
// Check version
|
||||||
let version = data[0x04];
|
let version = data[0x04];
|
||||||
if version > MAX_VERSION || version < MIN_VERSION {
|
if version > MAX_CRYPTO_VERSION || version < MIN_CRYPTO_VERSION {
|
||||||
return Err(VeilidAPIError::parse_error(
|
return Err(VeilidAPIError::parse_error(
|
||||||
"unsupported protocol version",
|
"unsupported cryptography version",
|
||||||
version,
|
version,
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
@ -208,15 +203,6 @@ impl Envelope {
|
|||||||
body: &[u8],
|
body: &[u8],
|
||||||
node_id_secret: &DHTKeySecret,
|
node_id_secret: &DHTKeySecret,
|
||||||
) -> Result<Vec<u8>, VeilidAPIError> {
|
) -> Result<Vec<u8>, VeilidAPIError> {
|
||||||
// Ensure sender node id is valid
|
|
||||||
if !self.sender_id.valid {
|
|
||||||
return Err(VeilidAPIError::generic("sender id is invalid"));
|
|
||||||
}
|
|
||||||
// Ensure recipient node id is valid
|
|
||||||
if !self.recipient_id.valid {
|
|
||||||
return Err(VeilidAPIError::generic("recipient id is invalid"));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure body isn't too long
|
// Ensure body isn't too long
|
||||||
let envelope_size: usize = body.len() + MIN_ENVELOPE_SIZE;
|
let envelope_size: usize = body.len() + MIN_ENVELOPE_SIZE;
|
||||||
if envelope_size > MAX_ENVELOPE_SIZE {
|
if envelope_size > MAX_ENVELOPE_SIZE {
|
||||||
@ -274,8 +260,11 @@ impl Envelope {
|
|||||||
self.version
|
self.version
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_min_max_version(&self) -> (u8, u8) {
|
pub fn get_min_max_version(&self) -> VersionRange {
|
||||||
(self.min_version, self.max_version)
|
VersionRange {
|
||||||
|
min: self.min_version,
|
||||||
|
max: self.max_version,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_timestamp(&self) -> u64 {
|
pub fn get_timestamp(&self) -> u64 {
|
@ -2,29 +2,35 @@ use crate::veilid_rng::*;
|
|||||||
use crate::xx::*;
|
use crate::xx::*;
|
||||||
use crate::*;
|
use crate::*;
|
||||||
|
|
||||||
use core::cmp::{Eq, Ord, Ordering, PartialEq, PartialOrd};
|
use core::cmp::{Eq, Ord, PartialEq, PartialOrd};
|
||||||
use core::convert::{TryFrom, TryInto};
|
use core::convert::{TryFrom, TryInto};
|
||||||
use core::fmt;
|
use core::fmt;
|
||||||
use core::hash::{Hash, Hasher};
|
use core::hash::Hash;
|
||||||
|
|
||||||
use data_encoding::BASE64URL_NOPAD;
|
use data_encoding::BASE64URL_NOPAD;
|
||||||
use digest::generic_array::typenum::U64;
|
use digest::generic_array::typenum::U64;
|
||||||
use digest::{Digest, Output};
|
use digest::{Digest, Output};
|
||||||
use ed25519_dalek::{Keypair, PublicKey, Signature};
|
use ed25519_dalek::{Keypair, PublicKey, Signature};
|
||||||
use generic_array::GenericArray;
|
use generic_array::GenericArray;
|
||||||
use serde::{Deserialize, Serialize};
|
use rkyv::{Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize};
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
/// Length of a DHT key in bytes
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
pub const DHT_KEY_LENGTH: usize = 32;
|
pub const DHT_KEY_LENGTH: usize = 32;
|
||||||
|
/// Length of a DHT key in bytes after encoding to base64url
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
pub const DHT_KEY_LENGTH_ENCODED: usize = 43;
|
pub const DHT_KEY_LENGTH_ENCODED: usize = 43;
|
||||||
|
/// Length of a DHT secret in bytes
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
pub const DHT_KEY_SECRET_LENGTH: usize = 32;
|
pub const DHT_KEY_SECRET_LENGTH: usize = 32;
|
||||||
|
/// Length of a DHT secret in bytes after encoding to base64url
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
pub const DHT_KEY_SECRET_LENGTH_ENCODED: usize = 43;
|
pub const DHT_KEY_SECRET_LENGTH_ENCODED: usize = 43;
|
||||||
|
/// Length of a DHT signature in bytes
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
|
/// Length of a DHT signature in bytes after encoding to base64url
|
||||||
pub const DHT_SIGNATURE_LENGTH: usize = 64;
|
pub const DHT_SIGNATURE_LENGTH: usize = 64;
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
pub const DHT_SIGNATURE_LENGTH_ENCODED: usize = 86;
|
pub const DHT_SIGNATURE_LENGTH_ENCODED: usize = 86;
|
||||||
@ -33,33 +39,47 @@ pub const DHT_SIGNATURE_LENGTH_ENCODED: usize = 86;
|
|||||||
|
|
||||||
macro_rules! byte_array_type {
|
macro_rules! byte_array_type {
|
||||||
($name:ident, $size:expr) => {
|
($name:ident, $size:expr) => {
|
||||||
#[derive(Clone, Copy)]
|
#[derive(
|
||||||
|
Clone,
|
||||||
|
Copy,
|
||||||
|
Hash,
|
||||||
|
Eq,
|
||||||
|
PartialEq,
|
||||||
|
PartialOrd,
|
||||||
|
Ord,
|
||||||
|
RkyvArchive,
|
||||||
|
RkyvSerialize,
|
||||||
|
RkyvDeserialize,
|
||||||
|
)]
|
||||||
|
#[archive_attr(repr(C), derive(CheckBytes, Hash, Eq, PartialEq, PartialOrd, Ord))]
|
||||||
pub struct $name {
|
pub struct $name {
|
||||||
pub bytes: [u8; $size],
|
pub bytes: [u8; $size],
|
||||||
pub valid: bool,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Serialize for $name {
|
impl Default for $name {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
bytes: [0u8; $size],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl serde::Serialize for $name {
|
||||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||||
where
|
where
|
||||||
S: serde::Serializer,
|
S: serde::Serializer,
|
||||||
{
|
{
|
||||||
let s: String;
|
let s = self.encode();
|
||||||
if self.valid {
|
serde::Serialize::serialize(&s, serializer)
|
||||||
s = self.encode();
|
|
||||||
} else {
|
|
||||||
s = "".to_owned();
|
|
||||||
}
|
|
||||||
s.serialize(serializer)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'de> Deserialize<'de> for $name {
|
impl<'de> serde::Deserialize<'de> for $name {
|
||||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||||
where
|
where
|
||||||
D: serde::Deserializer<'de>,
|
D: serde::Deserializer<'de>,
|
||||||
{
|
{
|
||||||
let s = String::deserialize(deserializer)?;
|
let s = <String as serde::Deserialize>::deserialize(deserializer)?;
|
||||||
if s == "" {
|
if s == "" {
|
||||||
return Ok($name::default());
|
return Ok($name::default());
|
||||||
}
|
}
|
||||||
@ -69,28 +89,19 @@ macro_rules! byte_array_type {
|
|||||||
|
|
||||||
impl $name {
|
impl $name {
|
||||||
pub fn new(bytes: [u8; $size]) -> Self {
|
pub fn new(bytes: [u8; $size]) -> Self {
|
||||||
Self { bytes, valid: true }
|
Self { bytes }
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn try_from_vec(v: Vec<u8>) -> Result<Self, VeilidAPIError> {
|
pub fn try_from_vec(v: Vec<u8>) -> Result<Self, VeilidAPIError> {
|
||||||
let mut this = Self {
|
let vl = v.len();
|
||||||
bytes: [0u8; $size],
|
Ok(Self {
|
||||||
valid: true,
|
bytes: v.try_into().map_err(|_| {
|
||||||
};
|
VeilidAPIError::generic(format!(
|
||||||
|
"Expected a Vec of length {} but it was {}",
|
||||||
if v.len() != $size {
|
$size, vl
|
||||||
apibail_generic!(format!(
|
))
|
||||||
"Expected a Vec of length {} but it was {}",
|
})?,
|
||||||
$size,
|
})
|
||||||
v.len()
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
for n in 0..v.len() {
|
|
||||||
this.bytes[n] = v[n];
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(this)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bit(&self, index: usize) -> bool {
|
pub fn bit(&self, index: usize) -> bool {
|
||||||
@ -136,14 +147,13 @@ macro_rules! byte_array_type {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn encode(&self) -> String {
|
pub fn encode(&self) -> String {
|
||||||
assert!(self.valid);
|
|
||||||
BASE64URL_NOPAD.encode(&self.bytes)
|
BASE64URL_NOPAD.encode(&self.bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn try_decode(input: &str) -> Result<Self, VeilidAPIError> {
|
pub fn try_decode<S: AsRef<str>>(input: S) -> Result<Self, VeilidAPIError> {
|
||||||
let mut bytes = [0u8; $size];
|
let mut bytes = [0u8; $size];
|
||||||
|
|
||||||
let res = BASE64URL_NOPAD.decode_len(input.len());
|
let res = BASE64URL_NOPAD.decode_len(input.as_ref().len());
|
||||||
match res {
|
match res {
|
||||||
Ok(v) => {
|
Ok(v) => {
|
||||||
if v != $size {
|
if v != $size {
|
||||||
@ -155,103 +165,38 @@ macro_rules! byte_array_type {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let res = BASE64URL_NOPAD.decode_mut(input.as_bytes(), &mut bytes);
|
let res = BASE64URL_NOPAD.decode_mut(input.as_ref().as_bytes(), &mut bytes);
|
||||||
match res {
|
match res {
|
||||||
Ok(_) => Ok(Self::new(bytes)),
|
Ok(_) => Ok(Self::new(bytes)),
|
||||||
Err(_) => apibail_generic!("Failed to decode"),
|
Err(_) => apibail_generic!("Failed to decode"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
impl PartialOrd for $name {
|
|
||||||
fn partial_cmp(&self, other: &$name) -> Option<Ordering> {
|
|
||||||
Some(self.cmp(other))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl Ord for $name {
|
|
||||||
fn cmp(&self, other: &$name) -> Ordering {
|
|
||||||
if !self.valid && !other.valid {
|
|
||||||
return Ordering::Equal;
|
|
||||||
}
|
|
||||||
if !self.valid && other.valid {
|
|
||||||
return Ordering::Less;
|
|
||||||
}
|
|
||||||
if self.valid && !other.valid {
|
|
||||||
return Ordering::Greater;
|
|
||||||
}
|
|
||||||
|
|
||||||
for n in 0..$size {
|
|
||||||
if self.bytes[n] < other.bytes[n] {
|
|
||||||
return Ordering::Less;
|
|
||||||
}
|
|
||||||
if self.bytes[n] > other.bytes[n] {
|
|
||||||
return Ordering::Greater;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ordering::Equal
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl PartialEq<$name> for $name {
|
|
||||||
fn eq(&self, other: &$name) -> bool {
|
|
||||||
if self.valid != other.valid {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
for n in 0..$size {
|
|
||||||
if self.bytes[n] != other.bytes[n] {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl Eq for $name {}
|
|
||||||
impl Hash for $name {
|
|
||||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
|
||||||
self.valid.hash(state);
|
|
||||||
if self.valid {
|
|
||||||
self.bytes.hash(state);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl Default for $name {
|
|
||||||
fn default() -> Self {
|
|
||||||
let mut this = $name::new([0u8; $size]);
|
|
||||||
this.valid = false;
|
|
||||||
this
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl fmt::Display for $name {
|
impl fmt::Display for $name {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
write!(f, "{}", String::from(self))
|
//write!(f, "{}", String::from(self))
|
||||||
|
write!(f, "{}", self.encode())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl fmt::Debug for $name {
|
impl fmt::Debug for $name {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
write!(f, concat!(stringify!($name), "("))?;
|
write!(f, concat!(stringify!($name), "("))?;
|
||||||
write!(
|
write!(f, "{}", self.encode())?;
|
||||||
f,
|
|
||||||
"{}",
|
|
||||||
if self.valid {
|
|
||||||
self.encode()
|
|
||||||
} else {
|
|
||||||
"".to_owned()
|
|
||||||
}
|
|
||||||
)?;
|
|
||||||
write!(f, ")")
|
write!(f, ")")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<&$name> for String {
|
impl From<&$name> for String {
|
||||||
fn from(value: &$name) -> Self {
|
fn from(value: &$name) -> Self {
|
||||||
if !value.valid {
|
// let mut s = String::new();
|
||||||
return "".to_owned();
|
// for n in 0..($size / 8) {
|
||||||
}
|
// let b: [u8; 8] = value.bytes[n * 8..(n + 1) * 8].try_into().unwrap();
|
||||||
let mut s = String::new();
|
// s.push_str(hex::encode(b).as_str());
|
||||||
for n in 0..($size / 8) {
|
// }
|
||||||
let b: [u8; 8] = value.bytes[n * 8..(n + 1) * 8].try_into().unwrap();
|
// s
|
||||||
s.push_str(hex::encode(b).as_str());
|
value.encode()
|
||||||
}
|
|
||||||
s
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -265,20 +210,18 @@ macro_rules! byte_array_type {
|
|||||||
impl TryFrom<&str> for $name {
|
impl TryFrom<&str> for $name {
|
||||||
type Error = VeilidAPIError;
|
type Error = VeilidAPIError;
|
||||||
fn try_from(value: &str) -> Result<Self, Self::Error> {
|
fn try_from(value: &str) -> Result<Self, Self::Error> {
|
||||||
let mut out = $name::default();
|
// let mut out = $name::default();
|
||||||
if value == "" {
|
// if value == "" {
|
||||||
return Ok(out);
|
// return Ok(out);
|
||||||
}
|
// }
|
||||||
if value.len() != ($size * 2) {
|
// if value.len() != ($size * 2) {
|
||||||
apibail_generic!(concat!(stringify!($name), " is incorrect length"));
|
// apibail_generic!(concat!(stringify!($name), " is incorrect length"));
|
||||||
}
|
// }
|
||||||
match hex::decode_to_slice(value, &mut out.bytes) {
|
// match hex::decode_to_slice(value, &mut out.bytes) {
|
||||||
Ok(_) => {
|
// Ok(_) => Ok(out),
|
||||||
out.valid = true;
|
// Err(err) => Err(VeilidAPIError::generic(err)),
|
||||||
Ok(out)
|
// }
|
||||||
}
|
Self::try_decode(value)
|
||||||
Err(err) => Err(VeilidAPIError::generic(err)),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -374,9 +317,6 @@ pub fn sign(
|
|||||||
dht_key_secret: &DHTKeySecret,
|
dht_key_secret: &DHTKeySecret,
|
||||||
data: &[u8],
|
data: &[u8],
|
||||||
) -> Result<DHTSignature, VeilidAPIError> {
|
) -> Result<DHTSignature, VeilidAPIError> {
|
||||||
assert!(dht_key.valid);
|
|
||||||
assert!(dht_key_secret.valid);
|
|
||||||
|
|
||||||
let mut kpb: [u8; DHT_KEY_SECRET_LENGTH + DHT_KEY_LENGTH] =
|
let mut kpb: [u8; DHT_KEY_SECRET_LENGTH + DHT_KEY_LENGTH] =
|
||||||
[0u8; DHT_KEY_SECRET_LENGTH + DHT_KEY_LENGTH];
|
[0u8; DHT_KEY_SECRET_LENGTH + DHT_KEY_LENGTH];
|
||||||
|
|
||||||
@ -401,8 +341,6 @@ pub fn verify(
|
|||||||
data: &[u8],
|
data: &[u8],
|
||||||
signature: &DHTSignature,
|
signature: &DHTSignature,
|
||||||
) -> Result<(), VeilidAPIError> {
|
) -> Result<(), VeilidAPIError> {
|
||||||
assert!(dht_key.valid);
|
|
||||||
assert!(signature.valid);
|
|
||||||
let pk = PublicKey::from_bytes(&dht_key.bytes)
|
let pk = PublicKey::from_bytes(&dht_key.bytes)
|
||||||
.map_err(|e| VeilidAPIError::parse_error("Public key is invalid", e))?;
|
.map_err(|e| VeilidAPIError::parse_error("Public key is invalid", e))?;
|
||||||
let sig = Signature::from_bytes(&signature.bytes)
|
let sig = Signature::from_bytes(&signature.bytes)
|
||||||
@ -421,7 +359,6 @@ pub fn generate_hash(data: &[u8]) -> DHTKey {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn validate_hash(data: &[u8], dht_key: &DHTKey) -> bool {
|
pub fn validate_hash(data: &[u8], dht_key: &DHTKey) -> bool {
|
||||||
assert!(dht_key.valid);
|
|
||||||
let bytes = *blake3::hash(data).as_bytes();
|
let bytes = *blake3::hash(data).as_bytes();
|
||||||
|
|
||||||
bytes == dht_key.bytes
|
bytes == dht_key.bytes
|
||||||
@ -439,8 +376,6 @@ pub fn validate_key(dht_key: &DHTKey, dht_key_secret: &DHTKeySecret) -> bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn distance(key1: &DHTKey, key2: &DHTKey) -> DHTKeyDistance {
|
pub fn distance(key1: &DHTKey, key2: &DHTKey) -> DHTKeyDistance {
|
||||||
assert!(key1.valid);
|
|
||||||
assert!(key2.valid);
|
|
||||||
let mut bytes = [0u8; DHT_KEY_LENGTH];
|
let mut bytes = [0u8; DHT_KEY_LENGTH];
|
||||||
|
|
||||||
for (n, byte) in bytes.iter_mut().enumerate() {
|
for (n, byte) in bytes.iter_mut().enumerate() {
|
@ -1,4 +1,18 @@
|
|||||||
use super::key::*;
|
mod envelope;
|
||||||
|
mod key;
|
||||||
|
mod receipt;
|
||||||
|
mod value;
|
||||||
|
|
||||||
|
pub mod tests;
|
||||||
|
|
||||||
|
pub use envelope::*;
|
||||||
|
pub use key::*;
|
||||||
|
pub use receipt::*;
|
||||||
|
pub use value::*;
|
||||||
|
|
||||||
|
pub const MIN_CRYPTO_VERSION: u8 = 0u8;
|
||||||
|
pub const MAX_CRYPTO_VERSION: u8 = 0u8;
|
||||||
|
|
||||||
use crate::xx::*;
|
use crate::xx::*;
|
||||||
use crate::*;
|
use crate::*;
|
||||||
use chacha20::cipher::{KeyIvInit, StreamCipher};
|
use chacha20::cipher::{KeyIvInit, StreamCipher};
|
||||||
@ -98,19 +112,19 @@ impl Crypto {
|
|||||||
let (table_store, node_id) = {
|
let (table_store, node_id) = {
|
||||||
let mut inner = self.inner.lock();
|
let mut inner = self.inner.lock();
|
||||||
let c = self.config.get();
|
let c = self.config.get();
|
||||||
inner.node_id = c.network.node_id;
|
inner.node_id = c.network.node_id.unwrap();
|
||||||
inner.node_id_secret = c.network.node_id_secret;
|
inner.node_id_secret = c.network.node_id_secret.unwrap();
|
||||||
(inner.table_store.clone(), c.network.node_id)
|
(inner.table_store.clone(), c.network.node_id)
|
||||||
};
|
};
|
||||||
|
|
||||||
// load caches if they are valid for this node id
|
// load caches if they are valid for this node id
|
||||||
let mut db = table_store.open("crypto_caches", 1).await?;
|
let mut db = table_store.open("crypto_caches", 1).await?;
|
||||||
let caches_valid = match db.load(0, b"node_id").await? {
|
let caches_valid = match db.load(0, b"node_id")? {
|
||||||
Some(v) => v.as_slice() == node_id.bytes,
|
Some(v) => v.as_slice() == node_id.unwrap().bytes,
|
||||||
None => false,
|
None => false,
|
||||||
};
|
};
|
||||||
if caches_valid {
|
if caches_valid {
|
||||||
if let Some(b) = db.load(0, b"dh_cache").await? {
|
if let Some(b) = db.load(0, b"dh_cache")? {
|
||||||
let mut inner = self.inner.lock();
|
let mut inner = self.inner.lock();
|
||||||
bytes_to_cache(&b, &mut inner.dh_cache);
|
bytes_to_cache(&b, &mut inner.dh_cache);
|
||||||
}
|
}
|
||||||
@ -118,7 +132,7 @@ impl Crypto {
|
|||||||
drop(db);
|
drop(db);
|
||||||
table_store.delete("crypto_caches").await?;
|
table_store.delete("crypto_caches").await?;
|
||||||
db = table_store.open("crypto_caches", 1).await?;
|
db = table_store.open("crypto_caches", 1).await?;
|
||||||
db.store(0, b"node_id", &node_id.bytes).await?;
|
db.store(0, b"node_id", &node_id.unwrap().bytes)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Schedule flushing
|
// Schedule flushing
|
||||||
@ -145,7 +159,7 @@ impl Crypto {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let db = table_store.open("crypto_caches", 1).await?;
|
let db = table_store.open("crypto_caches", 1).await?;
|
||||||
db.store(0, b"dh_cache", &cache_bytes).await?;
|
db.store(0, b"dh_cache", &cache_bytes)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -206,8 +220,6 @@ impl Crypto {
|
|||||||
// These are safe to use regardless of initialization status
|
// These are safe to use regardless of initialization status
|
||||||
|
|
||||||
pub fn compute_dh(key: &DHTKey, secret: &DHTKeySecret) -> Result<SharedSecret, VeilidAPIError> {
|
pub fn compute_dh(key: &DHTKey, secret: &DHTKeySecret) -> Result<SharedSecret, VeilidAPIError> {
|
||||||
assert!(key.valid);
|
|
||||||
assert!(secret.valid);
|
|
||||||
let pk_ed = ed::PublicKey::from_bytes(&key.bytes).map_err(VeilidAPIError::internal)?;
|
let pk_ed = ed::PublicKey::from_bytes(&key.bytes).map_err(VeilidAPIError::internal)?;
|
||||||
let pk_xd = Self::ed25519_to_x25519_pk(&pk_ed)?;
|
let pk_xd = Self::ed25519_to_x25519_pk(&pk_ed)?;
|
||||||
let sk_ed = ed::SecretKey::from_bytes(&secret.bytes).map_err(VeilidAPIError::internal)?;
|
let sk_ed = ed::SecretKey::from_bytes(&secret.bytes).map_err(VeilidAPIError::internal)?;
|
@ -1,7 +1,6 @@
|
|||||||
#![allow(dead_code)]
|
#![allow(dead_code)]
|
||||||
#![allow(clippy::absurd_extreme_comparisons)]
|
#![allow(clippy::absurd_extreme_comparisons)]
|
||||||
use super::envelope::{MAX_VERSION, MIN_VERSION};
|
use super::*;
|
||||||
use super::key::*;
|
|
||||||
use crate::xx::*;
|
use crate::xx::*;
|
||||||
use crate::*;
|
use crate::*;
|
||||||
use core::convert::TryInto;
|
use core::convert::TryInto;
|
||||||
@ -59,7 +58,6 @@ impl Receipt {
|
|||||||
sender_id: DHTKey,
|
sender_id: DHTKey,
|
||||||
extra_data: D,
|
extra_data: D,
|
||||||
) -> Result<Self, VeilidAPIError> {
|
) -> Result<Self, VeilidAPIError> {
|
||||||
assert!(sender_id.valid);
|
|
||||||
if extra_data.as_ref().len() > MAX_EXTRA_DATA_SIZE {
|
if extra_data.as_ref().len() > MAX_EXTRA_DATA_SIZE {
|
||||||
return Err(VeilidAPIError::parse_error(
|
return Err(VeilidAPIError::parse_error(
|
||||||
"extra data too large for receipt",
|
"extra data too large for receipt",
|
||||||
@ -90,9 +88,9 @@ impl Receipt {
|
|||||||
|
|
||||||
// Check version
|
// Check version
|
||||||
let version = data[0x04];
|
let version = data[0x04];
|
||||||
if version > MAX_VERSION || version < MIN_VERSION {
|
if version > MAX_CRYPTO_VERSION || version < MIN_CRYPTO_VERSION {
|
||||||
return Err(VeilidAPIError::parse_error(
|
return Err(VeilidAPIError::parse_error(
|
||||||
"unsupported protocol version",
|
"unsupported cryptography version",
|
||||||
version,
|
version,
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
@ -152,11 +150,6 @@ impl Receipt {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn to_signed_data(&self, secret: &DHTKeySecret) -> Result<Vec<u8>, VeilidAPIError> {
|
pub fn to_signed_data(&self, secret: &DHTKeySecret) -> Result<Vec<u8>, VeilidAPIError> {
|
||||||
// Ensure sender node id is valid
|
|
||||||
if !self.sender_id.valid {
|
|
||||||
return Err(VeilidAPIError::internal("sender id is invalid"));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure extra data isn't too long
|
// Ensure extra data isn't too long
|
||||||
let receipt_size: usize = self.extra_data.len() + MIN_RECEIPT_SIZE;
|
let receipt_size: usize = self.extra_data.len() + MIN_RECEIPT_SIZE;
|
||||||
if receipt_size > MAX_RECEIPT_SIZE {
|
if receipt_size > MAX_RECEIPT_SIZE {
|
@ -88,9 +88,7 @@ pub async fn test_key_conversions() {
|
|||||||
// Test default key
|
// Test default key
|
||||||
let (dht_key, dht_key_secret) = (key::DHTKey::default(), key::DHTKeySecret::default());
|
let (dht_key, dht_key_secret) = (key::DHTKey::default(), key::DHTKeySecret::default());
|
||||||
assert_eq!(dht_key.bytes, EMPTY_KEY);
|
assert_eq!(dht_key.bytes, EMPTY_KEY);
|
||||||
assert!(!dht_key.valid);
|
|
||||||
assert_eq!(dht_key_secret.bytes, EMPTY_KEY_SECRET);
|
assert_eq!(dht_key_secret.bytes, EMPTY_KEY_SECRET);
|
||||||
assert!(!dht_key_secret.valid);
|
|
||||||
let dht_key_string = String::from(&dht_key);
|
let dht_key_string = String::from(&dht_key);
|
||||||
trace!("dht_key_string: {:?}", dht_key_string);
|
trace!("dht_key_string: {:?}", dht_key_string);
|
||||||
let dht_key_string2 = String::from(&dht_key);
|
let dht_key_string2 = String::from(&dht_key);
|
||||||
@ -140,11 +138,11 @@ pub async fn test_key_conversions() {
|
|||||||
|
|
||||||
// Assert string roundtrip
|
// Assert string roundtrip
|
||||||
assert_eq!(String::from(&dht_key2_back), dht_key2_string);
|
assert_eq!(String::from(&dht_key2_back), dht_key2_string);
|
||||||
assert!(key::DHTKey::try_from("") == Ok(key::DHTKey::default()));
|
|
||||||
assert!(key::DHTKeySecret::try_from("") == Ok(key::DHTKeySecret::default()));
|
|
||||||
// These conversions should fail
|
// These conversions should fail
|
||||||
assert!(key::DHTKey::try_from("whatever").is_err());
|
assert!(key::DHTKey::try_from("whatever").is_err());
|
||||||
assert!(key::DHTKeySecret::try_from("whatever").is_err());
|
assert!(key::DHTKeySecret::try_from("whatever").is_err());
|
||||||
|
assert!(key::DHTKey::try_from("").is_err());
|
||||||
|
assert!(key::DHTKeySecret::try_from("").is_err());
|
||||||
assert!(key::DHTKey::try_from(" ").is_err());
|
assert!(key::DHTKey::try_from(" ").is_err());
|
||||||
assert!(key::DHTKeySecret::try_from(" ").is_err());
|
assert!(key::DHTKeySecret::try_from(" ").is_err());
|
||||||
assert!(key::DHTKey::try_from(
|
assert!(key::DHTKey::try_from(
|
@ -1,13 +0,0 @@
|
|||||||
mod crypto;
|
|
||||||
mod envelope;
|
|
||||||
mod key;
|
|
||||||
mod receipt;
|
|
||||||
mod value;
|
|
||||||
|
|
||||||
pub mod tests;
|
|
||||||
|
|
||||||
pub use crypto::*;
|
|
||||||
pub use envelope::*;
|
|
||||||
pub use key::*;
|
|
||||||
pub use receipt::*;
|
|
||||||
pub use value::*;
|
|
@ -2,6 +2,7 @@ use crate::xx::*;
|
|||||||
use crate::*;
|
use crate::*;
|
||||||
use data_encoding::BASE64URL_NOPAD;
|
use data_encoding::BASE64URL_NOPAD;
|
||||||
use keyring_manager::*;
|
use keyring_manager::*;
|
||||||
|
use rkyv::{Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize};
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
|
||||||
pub struct ProtectedStoreInner {
|
pub struct ProtectedStoreInner {
|
||||||
@ -31,15 +32,18 @@ impl ProtectedStore {
|
|||||||
#[instrument(level = "trace", skip(self), err)]
|
#[instrument(level = "trace", skip(self), err)]
|
||||||
pub async fn delete_all(&self) -> EyreResult<()> {
|
pub async fn delete_all(&self) -> EyreResult<()> {
|
||||||
// Delete all known keys
|
// Delete all known keys
|
||||||
if self.remove_user_secret_string("node_id").await? {
|
if self.remove_user_secret("node_id").await? {
|
||||||
debug!("deleted protected_store key 'node_id'");
|
debug!("deleted protected_store key 'node_id'");
|
||||||
}
|
}
|
||||||
if self.remove_user_secret_string("node_id_secret").await? {
|
if self.remove_user_secret("node_id_secret").await? {
|
||||||
debug!("deleted protected_store key 'node_id_secret'");
|
debug!("deleted protected_store key 'node_id_secret'");
|
||||||
}
|
}
|
||||||
if self.remove_user_secret_string("_test_key").await? {
|
if self.remove_user_secret("_test_key").await? {
|
||||||
debug!("deleted protected_store key '_test_key'");
|
debug!("deleted protected_store key '_test_key'");
|
||||||
}
|
}
|
||||||
|
if self.remove_user_secret("RouteSpecStore").await? {
|
||||||
|
debug!("deleted protected_store key 'RouteSpecStore'");
|
||||||
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -139,19 +143,60 @@ impl ProtectedStore {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "trace", skip(self), ret, err)]
|
#[instrument(level = "trace", skip(self, value))]
|
||||||
pub async fn remove_user_secret_string(&self, key: &str) -> EyreResult<bool> {
|
pub async fn save_user_secret_rkyv<T>(&self, key: &str, value: &T) -> EyreResult<bool>
|
||||||
let inner = self.inner.lock();
|
where
|
||||||
match inner
|
T: RkyvSerialize<rkyv::ser::serializers::AllocSerializer<1024>>,
|
||||||
.keyring_manager
|
{
|
||||||
.as_ref()
|
let v = to_rkyv(value)?;
|
||||||
.ok_or_else(|| eyre!("Protected store not initialized"))?
|
self.save_user_secret(&key, &v).await
|
||||||
.with_keyring(&self.service_name(), key, |kr| kr.delete_value())
|
}
|
||||||
{
|
|
||||||
Ok(_) => Ok(true),
|
#[instrument(level = "trace", skip(self, value))]
|
||||||
Err(KeyringError::NoPasswordFound) => Ok(false),
|
pub async fn save_user_secret_json<T>(&self, key: &str, value: &T) -> EyreResult<bool>
|
||||||
Err(e) => Err(eyre!("Failed to remove user secret: {}", e)),
|
where
|
||||||
}
|
T: serde::Serialize,
|
||||||
|
{
|
||||||
|
let v = serde_json::to_vec(value)?;
|
||||||
|
self.save_user_secret(&key, &v).await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "trace", skip(self))]
|
||||||
|
pub async fn load_user_secret_rkyv<T>(&self, key: &str) -> EyreResult<Option<T>>
|
||||||
|
where
|
||||||
|
T: RkyvArchive,
|
||||||
|
<T as RkyvArchive>::Archived:
|
||||||
|
for<'t> bytecheck::CheckBytes<rkyv::validation::validators::DefaultValidator<'t>>,
|
||||||
|
<T as RkyvArchive>::Archived:
|
||||||
|
RkyvDeserialize<T, rkyv::de::deserializers::SharedDeserializeMap>,
|
||||||
|
{
|
||||||
|
let out = self.load_user_secret(key).await?;
|
||||||
|
let b = match out {
|
||||||
|
Some(v) => v,
|
||||||
|
None => {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let obj = from_rkyv(b)?;
|
||||||
|
Ok(Some(obj))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "trace", skip(self))]
|
||||||
|
pub async fn load_user_secret_json<T>(&self, key: &str) -> EyreResult<Option<T>>
|
||||||
|
where
|
||||||
|
T: for<'de> serde::de::Deserialize<'de>,
|
||||||
|
{
|
||||||
|
let out = self.load_user_secret(key).await?;
|
||||||
|
let b = match out {
|
||||||
|
Some(v) => v,
|
||||||
|
None => {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let obj = serde_json::from_slice(&b)?;
|
||||||
|
Ok(Some(obj))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "trace", skip(self, value), ret, err)]
|
#[instrument(level = "trace", skip(self, value), ret, err)]
|
||||||
@ -195,6 +240,16 @@ impl ProtectedStore {
|
|||||||
|
|
||||||
#[instrument(level = "trace", skip(self), ret, err)]
|
#[instrument(level = "trace", skip(self), ret, err)]
|
||||||
pub async fn remove_user_secret(&self, key: &str) -> EyreResult<bool> {
|
pub async fn remove_user_secret(&self, key: &str) -> EyreResult<bool> {
|
||||||
self.remove_user_secret_string(key).await
|
let inner = self.inner.lock();
|
||||||
|
match inner
|
||||||
|
.keyring_manager
|
||||||
|
.as_ref()
|
||||||
|
.ok_or_else(|| eyre!("Protected store not initialized"))?
|
||||||
|
.with_keyring(&self.service_name(), key, |kr| kr.delete_value())
|
||||||
|
{
|
||||||
|
Ok(_) => Ok(true),
|
||||||
|
Err(KeyringError::NoPasswordFound) => Ok(false),
|
||||||
|
Err(e) => Err(eyre!("Failed to remove user secret: {}", e)),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -8,6 +8,8 @@ struct TableStoreInner {
|
|||||||
opened: BTreeMap<String, Weak<Mutex<TableDBInner>>>,
|
opened: BTreeMap<String, Weak<Mutex<TableDBInner>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Veilid Table Storage
|
||||||
|
/// Database for storing key value pairs persistently across runs
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct TableStore {
|
pub struct TableStore {
|
||||||
config: VeilidConfig,
|
config: VeilidConfig,
|
||||||
@ -20,31 +22,38 @@ impl TableStore {
|
|||||||
opened: BTreeMap::new(),
|
opened: BTreeMap::new(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub fn new(config: VeilidConfig) -> Self {
|
pub(crate) fn new(config: VeilidConfig) -> Self {
|
||||||
Self {
|
Self {
|
||||||
config,
|
config,
|
||||||
inner: Arc::new(Mutex::new(Self::new_inner())),
|
inner: Arc::new(Mutex::new(Self::new_inner())),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn delete_all(&self) -> EyreResult<()> {
|
/// Delete all known tables
|
||||||
// Delete all known keys
|
pub async fn delete_all(&self) {
|
||||||
self.delete("crypto_caches").await?;
|
if let Err(e) = self.delete("crypto_caches").await {
|
||||||
|
error!("failed to delete 'crypto_caches': {}", e);
|
||||||
|
}
|
||||||
|
if let Err(e) = self.delete("RouteSpecStore").await {
|
||||||
|
error!("failed to delete 'RouteSpecStore': {}", e);
|
||||||
|
}
|
||||||
|
if let Err(e) = self.delete("routing_table").await {
|
||||||
|
error!("failed to delete 'routing_table': {}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) async fn init(&self) -> EyreResult<()> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn init(&self) -> EyreResult<()> {
|
pub(crate) async fn terminate(&self) {
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn terminate(&self) {
|
|
||||||
assert!(
|
assert!(
|
||||||
self.inner.lock().opened.is_empty(),
|
self.inner.lock().opened.is_empty(),
|
||||||
"all open databases should have been closed"
|
"all open databases should have been closed"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn on_table_db_drop(&self, table: String) {
|
pub(crate) fn on_table_db_drop(&self, table: String) {
|
||||||
let mut inner = self.inner.lock();
|
let mut inner = self.inner.lock();
|
||||||
if inner.opened.remove(&table).is_none() {
|
if inner.opened.remove(&table).is_none() {
|
||||||
unreachable!("should have removed an item");
|
unreachable!("should have removed an item");
|
||||||
@ -82,6 +91,8 @@ impl TableStore {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get or create a TableDB database table. If the column count is greater than an
|
||||||
|
/// existing TableDB's column count, the database will be upgraded to add the missing columns
|
||||||
pub async fn open(&self, name: &str, column_count: u32) -> EyreResult<TableDB> {
|
pub async fn open(&self, name: &str, column_count: u32) -> EyreResult<TableDB> {
|
||||||
let table_name = self.get_table_name(name)?;
|
let table_name = self.get_table_name(name)?;
|
||||||
|
|
||||||
@ -121,6 +132,7 @@ impl TableStore {
|
|||||||
Ok(table_db)
|
Ok(table_db)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Delete a TableDB table by name
|
||||||
pub async fn delete(&self, name: &str) -> EyreResult<bool> {
|
pub async fn delete(&self, name: &str) -> EyreResult<bool> {
|
||||||
let table_name = self.get_table_name(name)?;
|
let table_name = self.get_table_name(name)?;
|
||||||
|
|
||||||
|
@ -1,12 +1,14 @@
|
|||||||
use crate::xx::*;
|
use crate::xx::*;
|
||||||
use crate::*;
|
use crate::*;
|
||||||
use serde::{Deserialize, Serialize};
|
use rkyv::{Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize};
|
||||||
|
|
||||||
cfg_if! {
|
cfg_if! {
|
||||||
if #[cfg(target_arch = "wasm32")] {
|
if #[cfg(target_arch = "wasm32")] {
|
||||||
use keyvaluedb_web::*;
|
use keyvaluedb_web::*;
|
||||||
|
use keyvaluedb::*;
|
||||||
} else {
|
} else {
|
||||||
use keyvaluedb_sqlite::*;
|
use keyvaluedb_sqlite::*;
|
||||||
|
use keyvaluedb::*;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -28,7 +30,7 @@ pub struct TableDB {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl TableDB {
|
impl TableDB {
|
||||||
pub fn new(table: String, table_store: TableStore, database: Database) -> Self {
|
pub(super) fn new(table: String, table_store: TableStore, database: Database) -> Self {
|
||||||
Self {
|
Self {
|
||||||
inner: Arc::new(Mutex::new(TableDBInner {
|
inner: Arc::new(Mutex::new(TableDBInner {
|
||||||
table,
|
table,
|
||||||
@ -38,22 +40,24 @@ impl TableDB {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn try_new_from_weak_inner(weak_inner: Weak<Mutex<TableDBInner>>) -> Option<Self> {
|
pub(super) fn try_new_from_weak_inner(weak_inner: Weak<Mutex<TableDBInner>>) -> Option<Self> {
|
||||||
weak_inner.upgrade().map(|table_db_inner| Self {
|
weak_inner.upgrade().map(|table_db_inner| Self {
|
||||||
inner: table_db_inner,
|
inner: table_db_inner,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn weak_inner(&self) -> Weak<Mutex<TableDBInner>> {
|
pub(super) fn weak_inner(&self) -> Weak<Mutex<TableDBInner>> {
|
||||||
Arc::downgrade(&self.inner)
|
Arc::downgrade(&self.inner)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_column_count(&self) -> EyreResult<u32> {
|
/// Get the total number of columns in the TableDB
|
||||||
|
pub fn get_column_count(&self) -> EyreResult<u32> {
|
||||||
let db = &self.inner.lock().database;
|
let db = &self.inner.lock().database;
|
||||||
db.num_columns().wrap_err("failed to get column count: {}")
|
db.num_columns().wrap_err("failed to get column count: {}")
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_keys(&self, col: u32) -> EyreResult<Vec<Box<[u8]>>> {
|
/// Get the list of keys in a column of the TableDB
|
||||||
|
pub fn get_keys(&self, col: u32) -> EyreResult<Vec<Box<[u8]>>> {
|
||||||
let db = &self.inner.lock().database;
|
let db = &self.inner.lock().database;
|
||||||
let mut out: Vec<Box<[u8]>> = Vec::new();
|
let mut out: Vec<Box<[u8]>> = Vec::new();
|
||||||
db.iter(col, None, &mut |kv| {
|
db.iter(col, None, &mut |kv| {
|
||||||
@ -64,18 +68,29 @@ impl TableDB {
|
|||||||
Ok(out)
|
Ok(out)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn store(&self, col: u32, key: &[u8], value: &[u8]) -> EyreResult<()> {
|
/// Start a TableDB write transaction. The transaction object must be committed or rolled back before dropping.
|
||||||
|
pub fn transact<'a>(&'a self) -> TableDBTransaction<'a> {
|
||||||
|
let dbt = {
|
||||||
|
let db = &self.inner.lock().database;
|
||||||
|
db.transaction()
|
||||||
|
};
|
||||||
|
TableDBTransaction::new(self, dbt)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Store a key with a value in a column in the TableDB. Performs a single transaction immediately.
|
||||||
|
pub fn store(&self, col: u32, key: &[u8], value: &[u8]) -> EyreResult<()> {
|
||||||
let db = &self.inner.lock().database;
|
let db = &self.inner.lock().database;
|
||||||
let mut dbt = db.transaction();
|
let mut dbt = db.transaction();
|
||||||
dbt.put(col, key, value);
|
dbt.put(col, key, value);
|
||||||
db.write(dbt).wrap_err("failed to store key")
|
db.write(dbt).wrap_err("failed to store key")
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn store_cbor<T>(&self, col: u32, key: &[u8], value: &T) -> EyreResult<()>
|
/// Store a key in rkyv format with a value in a column in the TableDB. Performs a single transaction immediately.
|
||||||
|
pub fn store_rkyv<T>(&self, col: u32, key: &[u8], value: &T) -> EyreResult<()>
|
||||||
where
|
where
|
||||||
T: Serialize,
|
T: RkyvSerialize<rkyv::ser::serializers::AllocSerializer<1024>>,
|
||||||
{
|
{
|
||||||
let v = serde_cbor::to_vec(value).wrap_err("couldn't store as CBOR")?;
|
let v = to_rkyv(value)?;
|
||||||
|
|
||||||
let db = &self.inner.lock().database;
|
let db = &self.inner.lock().database;
|
||||||
let mut dbt = db.transaction();
|
let mut dbt = db.transaction();
|
||||||
@ -83,14 +98,33 @@ impl TableDB {
|
|||||||
db.write(dbt).wrap_err("failed to store key")
|
db.write(dbt).wrap_err("failed to store key")
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn load(&self, col: u32, key: &[u8]) -> EyreResult<Option<Vec<u8>>> {
|
/// Store a key in json format with a value in a column in the TableDB. Performs a single transaction immediately.
|
||||||
|
pub fn store_json<T>(&self, col: u32, key: &[u8], value: &T) -> EyreResult<()>
|
||||||
|
where
|
||||||
|
T: serde::Serialize,
|
||||||
|
{
|
||||||
|
let v = serde_json::to_vec(value)?;
|
||||||
|
|
||||||
|
let db = &self.inner.lock().database;
|
||||||
|
let mut dbt = db.transaction();
|
||||||
|
dbt.put(col, key, v.as_slice());
|
||||||
|
db.write(dbt).wrap_err("failed to store key")
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Read a key from a column in the TableDB immediately.
|
||||||
|
pub fn load(&self, col: u32, key: &[u8]) -> EyreResult<Option<Vec<u8>>> {
|
||||||
let db = &self.inner.lock().database;
|
let db = &self.inner.lock().database;
|
||||||
db.get(col, key).wrap_err("failed to get key")
|
db.get(col, key).wrap_err("failed to get key")
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn load_cbor<T>(&self, col: u32, key: &[u8]) -> EyreResult<Option<T>>
|
/// Read an rkyv key from a column in the TableDB immediately
|
||||||
|
pub fn load_rkyv<T>(&self, col: u32, key: &[u8]) -> EyreResult<Option<T>>
|
||||||
where
|
where
|
||||||
T: for<'de> Deserialize<'de>,
|
T: RkyvArchive,
|
||||||
|
<T as RkyvArchive>::Archived:
|
||||||
|
for<'t> bytecheck::CheckBytes<rkyv::validation::validators::DefaultValidator<'t>>,
|
||||||
|
<T as RkyvArchive>::Archived:
|
||||||
|
RkyvDeserialize<T, rkyv::de::deserializers::SharedDeserializeMap>,
|
||||||
{
|
{
|
||||||
let db = &self.inner.lock().database;
|
let db = &self.inner.lock().database;
|
||||||
let out = db.get(col, key).wrap_err("failed to get key")?;
|
let out = db.get(col, key).wrap_err("failed to get key")?;
|
||||||
@ -100,11 +134,29 @@ impl TableDB {
|
|||||||
return Ok(None);
|
return Ok(None);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
let obj = serde_cbor::from_slice::<T>(&b).wrap_err("failed to deserialize")?;
|
let obj = from_rkyv(b)?;
|
||||||
Ok(Some(obj))
|
Ok(Some(obj))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn delete(&self, col: u32, key: &[u8]) -> EyreResult<bool> {
|
/// Read an serde-json key from a column in the TableDB immediately
|
||||||
|
pub fn load_json<T>(&self, col: u32, key: &[u8]) -> EyreResult<Option<T>>
|
||||||
|
where
|
||||||
|
T: for<'de> serde::Deserialize<'de>,
|
||||||
|
{
|
||||||
|
let db = &self.inner.lock().database;
|
||||||
|
let out = db.get(col, key).wrap_err("failed to get key")?;
|
||||||
|
let b = match out {
|
||||||
|
Some(v) => v,
|
||||||
|
None => {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let obj = serde_json::from_slice(&b)?;
|
||||||
|
Ok(Some(obj))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Delete key with from a column in the TableDB
|
||||||
|
pub fn delete(&self, col: u32, key: &[u8]) -> EyreResult<bool> {
|
||||||
let db = &self.inner.lock().database;
|
let db = &self.inner.lock().database;
|
||||||
let found = db.get(col, key).wrap_err("failed to get key")?;
|
let found = db.get(col, key).wrap_err("failed to get key")?;
|
||||||
match found {
|
match found {
|
||||||
@ -118,3 +170,76 @@ impl TableDB {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
/// A TableDB transaction
|
||||||
|
/// Atomically commits a group of writes or deletes to the TableDB
|
||||||
|
pub struct TableDBTransaction<'a> {
|
||||||
|
db: &'a TableDB,
|
||||||
|
dbt: Option<DBTransaction>,
|
||||||
|
_phantom: core::marker::PhantomData<&'a ()>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> TableDBTransaction<'a> {
|
||||||
|
fn new(db: &'a TableDB, dbt: DBTransaction) -> Self {
|
||||||
|
Self {
|
||||||
|
db,
|
||||||
|
dbt: Some(dbt),
|
||||||
|
_phantom: Default::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Commit the transaction. Performs all actions atomically.
|
||||||
|
pub fn commit(mut self) -> EyreResult<()> {
|
||||||
|
self.db
|
||||||
|
.inner
|
||||||
|
.lock()
|
||||||
|
.database
|
||||||
|
.write(self.dbt.take().unwrap())
|
||||||
|
.wrap_err("commit failed")
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Rollback the transaction. Does nothing to the TableDB.
|
||||||
|
pub fn rollback(mut self) {
|
||||||
|
self.dbt = None;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Store a key with a value in a column in the TableDB
|
||||||
|
pub fn store(&mut self, col: u32, key: &[u8], value: &[u8]) {
|
||||||
|
self.dbt.as_mut().unwrap().put(col, key, value);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Store a key in rkyv format with a value in a column in the TableDB
|
||||||
|
pub fn store_rkyv<T>(&mut self, col: u32, key: &[u8], value: &T) -> EyreResult<()>
|
||||||
|
where
|
||||||
|
T: RkyvSerialize<rkyv::ser::serializers::AllocSerializer<1024>>,
|
||||||
|
{
|
||||||
|
let v = to_rkyv(value)?;
|
||||||
|
self.dbt.as_mut().unwrap().put(col, key, v.as_slice());
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Store a key in rkyv format with a value in a column in the TableDB
|
||||||
|
pub fn store_json<T>(&mut self, col: u32, key: &[u8], value: &T) -> EyreResult<()>
|
||||||
|
where
|
||||||
|
T: serde::Serialize,
|
||||||
|
{
|
||||||
|
let v = serde_json::to_vec(value)?;
|
||||||
|
self.dbt.as_mut().unwrap().put(col, key, v.as_slice());
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Delete key with from a column in the TableDB
|
||||||
|
pub fn delete(&mut self, col: u32, key: &[u8]) {
|
||||||
|
self.dbt.as_mut().unwrap().delete(col, key);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> Drop for TableDBTransaction<'a> {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
if self.dbt.is_some() {
|
||||||
|
warn!("Dropped transaction without commit or rollback");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -3,52 +3,45 @@ use crate::xx::*;
|
|||||||
use crate::*;
|
use crate::*;
|
||||||
use data_encoding::BASE64URL_NOPAD;
|
use data_encoding::BASE64URL_NOPAD;
|
||||||
use js_sys::*;
|
use js_sys::*;
|
||||||
|
use send_wrapper::*;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
use wasm_bindgen_futures::*;
|
use wasm_bindgen_futures::*;
|
||||||
use web_sys::*;
|
use web_sys::*;
|
||||||
|
|
||||||
#[wasm_bindgen]
|
|
||||||
extern "C" {
|
|
||||||
#[wasm_bindgen(catch, js_name = setPassword, js_namespace = ["global", "wasmhost", "keytar"])]
|
|
||||||
fn keytar_setPassword(service: &str, account: &str, password: &str)
|
|
||||||
-> Result<Promise, JsValue>;
|
|
||||||
#[wasm_bindgen(catch, js_name = getPassword, js_namespace = ["global", "wasmhost", "keytar"])]
|
|
||||||
fn keytar_getPassword(service: &str, account: &str) -> Result<Promise, JsValue>;
|
|
||||||
#[wasm_bindgen(catch, js_name = deletePassword, js_namespace = ["global", "wasmhost", "keytar"])]
|
|
||||||
fn keytar_deletePassword(service: &str, account: &str) -> Result<Promise, JsValue>;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct ProtectedStore {
|
pub struct ProtectedStore {
|
||||||
config: VeilidConfig,
|
config: VeilidConfig,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ProtectedStore {
|
impl ProtectedStore {
|
||||||
|
|
||||||
pub fn new(config: VeilidConfig) -> Self {
|
pub fn new(config: VeilidConfig) -> Self {
|
||||||
Self {
|
Self { config }
|
||||||
config,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "trace", skip(self), err)]
|
||||||
pub async fn delete_all(&self) -> EyreResult<()> {
|
pub async fn delete_all(&self) -> EyreResult<()> {
|
||||||
// Delete all known keys
|
// Delete all known keys
|
||||||
if self.remove_user_secret_string("node_id").await? {
|
if self.remove_user_secret("node_id").await? {
|
||||||
debug!("deleted protected_store key 'node_id'");
|
debug!("deleted protected_store key 'node_id'");
|
||||||
}
|
}
|
||||||
if self.remove_user_secret_string("node_id_secret").await? {
|
if self.remove_user_secret("node_id_secret").await? {
|
||||||
debug!("deleted protected_store key 'node_id_secret'");
|
debug!("deleted protected_store key 'node_id_secret'");
|
||||||
}
|
}
|
||||||
if self.remove_user_secret_string("_test_key").await? {
|
if self.remove_user_secret("_test_key").await? {
|
||||||
debug!("deleted protected_store key '_test_key'");
|
debug!("deleted protected_store key '_test_key'");
|
||||||
}
|
}
|
||||||
|
if self.remove_user_secret("RouteSpecStore").await? {
|
||||||
|
debug!("deleted protected_store key 'RouteSpecStore'");
|
||||||
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "debug", skip(self), err)]
|
||||||
pub async fn init(&self) -> EyreResult<()> {
|
pub async fn init(&self) -> EyreResult<()> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "debug", skip(self))]
|
||||||
pub async fn terminate(&self) {}
|
pub async fn terminate(&self) {}
|
||||||
|
|
||||||
fn keyring_name(&self) -> String {
|
fn keyring_name(&self) -> String {
|
||||||
@ -69,32 +62,9 @@ impl ProtectedStore {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//#[instrument(level = "trace", skip(self, value), ret, err)]
|
||||||
pub async fn save_user_secret_string(&self, key: &str, value: &str) -> EyreResult<bool> {
|
pub async fn save_user_secret_string(&self, key: &str, value: &str) -> EyreResult<bool> {
|
||||||
if is_nodejs() {
|
if is_browser() {
|
||||||
let prev = match JsFuture::from(
|
|
||||||
keytar_getPassword(self.keyring_name().as_str(), key)
|
|
||||||
.map_err(map_jsvalue_error)
|
|
||||||
.wrap_err("exception thrown")?,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(v) => v.is_truthy(),
|
|
||||||
Err(_) => false,
|
|
||||||
};
|
|
||||||
|
|
||||||
match JsFuture::from(
|
|
||||||
keytar_setPassword(self.keyring_name().as_str(), key, value)
|
|
||||||
.map_err(map_jsvalue_error)
|
|
||||||
.wrap_err("exception thrown")?,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(_) => {}
|
|
||||||
Err(_) => bail!("Failed to set password"),
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(prev)
|
|
||||||
} else if is_browser() {
|
|
||||||
let win = match window() {
|
let win = match window() {
|
||||||
Some(w) => w,
|
Some(w) => w,
|
||||||
None => {
|
None => {
|
||||||
@ -134,25 +104,9 @@ impl ProtectedStore {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "trace", skip(self), err)]
|
||||||
pub async fn load_user_secret_string(&self, key: &str) -> EyreResult<Option<String>> {
|
pub async fn load_user_secret_string(&self, key: &str) -> EyreResult<Option<String>> {
|
||||||
if is_nodejs() {
|
if is_browser() {
|
||||||
let prev = match JsFuture::from(
|
|
||||||
keytar_getPassword(self.keyring_name().as_str(), key)
|
|
||||||
.map_err(map_jsvalue_error)
|
|
||||||
.wrap_err("exception thrown")?,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(p) => p,
|
|
||||||
Err(_) => JsValue::UNDEFINED,
|
|
||||||
};
|
|
||||||
|
|
||||||
if prev.is_undefined() || prev.is_null() {
|
|
||||||
return Ok(None);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(prev.as_string())
|
|
||||||
} else if is_browser() {
|
|
||||||
let win = match window() {
|
let win = match window() {
|
||||||
Some(w) => w,
|
Some(w) => w,
|
||||||
None => {
|
None => {
|
||||||
@ -181,19 +135,78 @@ impl ProtectedStore {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn remove_user_secret_string(&self, key: &str) -> EyreResult<bool> {
|
#[instrument(level = "trace", skip(self, value))]
|
||||||
if is_nodejs() {
|
pub async fn save_user_secret_frozen<T>(&self, key: &str, value: &T) -> EyreResult<bool>
|
||||||
match JsFuture::from(
|
where
|
||||||
keytar_deletePassword(self.keyring_name().as_str(), key)
|
T: RkyvSerialize<rkyv::ser::serializers::AllocSerializer<1024>>,
|
||||||
.map_err(map_jsvalue_error)
|
{
|
||||||
.wrap_err("exception thrown")?,
|
let v = to_frozen(value)?;
|
||||||
)
|
self.save_user_secret(&key, &v).await
|
||||||
.await
|
}
|
||||||
{
|
|
||||||
Ok(v) => Ok(v.is_truthy()),
|
#[instrument(level = "trace", skip(self))]
|
||||||
Err(_) => bail!("Failed to delete"),
|
pub async fn load_user_secret_frozen<T>(&self, key: &str) -> EyreResult<Option<T>>
|
||||||
|
where
|
||||||
|
T: RkyvArchive,
|
||||||
|
<T as RkyvArchive>::Archived:
|
||||||
|
for<'t> bytecheck::CheckBytes<rkyv::validation::validators::DefaultValidator<'t>>,
|
||||||
|
<T as RkyvArchive>::Archived:
|
||||||
|
rkyv::Deserialize<T, rkyv::de::deserializers::SharedDeserializeMap>,
|
||||||
|
{
|
||||||
|
let out = self.load_user_secret(key).await?;
|
||||||
|
let b = match out {
|
||||||
|
Some(v) => v,
|
||||||
|
None => {
|
||||||
|
return Ok(None);
|
||||||
}
|
}
|
||||||
} else if is_browser() {
|
};
|
||||||
|
|
||||||
|
let obj = from_frozen(&b)?;
|
||||||
|
Ok(Some(obj))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "trace", skip(self, value), ret, err)]
|
||||||
|
pub async fn save_user_secret(&self, key: &str, value: &[u8]) -> EyreResult<bool> {
|
||||||
|
let mut s = BASE64URL_NOPAD.encode(value);
|
||||||
|
s.push('!');
|
||||||
|
|
||||||
|
self.save_user_secret_string(key, s.as_str()).await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "trace", skip(self), err)]
|
||||||
|
pub async fn load_user_secret(&self, key: &str) -> EyreResult<Option<Vec<u8>>> {
|
||||||
|
let mut s = match self.load_user_secret_string(key).await? {
|
||||||
|
Some(s) => s,
|
||||||
|
None => {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if s.pop() != Some('!') {
|
||||||
|
bail!("User secret is not a buffer");
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut bytes = Vec::<u8>::new();
|
||||||
|
let res = BASE64URL_NOPAD.decode_len(s.len());
|
||||||
|
match res {
|
||||||
|
Ok(l) => {
|
||||||
|
bytes.resize(l, 0u8);
|
||||||
|
}
|
||||||
|
Err(_) => {
|
||||||
|
bail!("Failed to decode");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let res = BASE64URL_NOPAD.decode_mut(s.as_bytes(), &mut bytes);
|
||||||
|
match res {
|
||||||
|
Ok(_) => Ok(Some(bytes)),
|
||||||
|
Err(_) => bail!("Failed to decode"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "trace", skip(self), ret, err)]
|
||||||
|
pub async fn remove_user_secret(&self, key: &str) -> EyreResult<bool> {
|
||||||
|
if is_browser() {
|
||||||
let win = match window() {
|
let win = match window() {
|
||||||
Some(w) => w,
|
Some(w) => w,
|
||||||
None => {
|
None => {
|
||||||
@ -231,45 +244,4 @@ impl ProtectedStore {
|
|||||||
unimplemented!();
|
unimplemented!();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
pub async fn save_user_secret(&self, key: &str, value: &[u8]) -> EyreResult<bool> {
|
|
||||||
let mut s = BASE64URL_NOPAD.encode(value);
|
|
||||||
s.push('!');
|
|
||||||
|
|
||||||
self.save_user_secret_string(key, s.as_str()).await
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn load_user_secret(&self, key: &str) -> EyreResult<Option<Vec<u8>>> {
|
|
||||||
let mut s = match self.load_user_secret_string(key).await? {
|
|
||||||
Some(s) => s,
|
|
||||||
None => {
|
|
||||||
return Ok(None);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if s.pop() != Some('!') {
|
|
||||||
bail!("User secret is not a buffer");
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut bytes = Vec::<u8>::new();
|
|
||||||
let res = BASE64URL_NOPAD.decode_len(s.len());
|
|
||||||
match res {
|
|
||||||
Ok(l) => {
|
|
||||||
bytes.resize(l, 0u8);
|
|
||||||
}
|
|
||||||
Err(_) => {
|
|
||||||
bail!("Failed to decode");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let res = BASE64URL_NOPAD.decode_mut(s.as_bytes(), &mut bytes);
|
|
||||||
match res {
|
|
||||||
Ok(_) => Ok(Some(bytes)),
|
|
||||||
Err(_) => bail!("Failed to decode"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn remove_user_secret(&self, key: &str) -> EyreResult<bool> {
|
|
||||||
self.remove_user_secret_string(key).await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -19,10 +19,8 @@ extern "C" {
|
|||||||
pub fn get_timestamp() -> u64 {
|
pub fn get_timestamp() -> u64 {
|
||||||
if utils::is_browser() {
|
if utils::is_browser() {
|
||||||
return (Date::now() * 1000.0f64) as u64;
|
return (Date::now() * 1000.0f64) as u64;
|
||||||
} else if utils::is_nodejs() {
|
|
||||||
return (Date::now() * 1000.0f64) as u64;
|
|
||||||
} else {
|
} else {
|
||||||
panic!("WASM requires browser or nodejs environment");
|
panic!("WASM requires browser environment");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -85,18 +83,22 @@ pub fn spawn<Out>(future: impl Future<Output = Out> + Send + 'static) -> MustJoi
|
|||||||
where
|
where
|
||||||
Out: Send + 'static,
|
Out: Send + 'static,
|
||||||
{
|
{
|
||||||
MustJoinHandle::new(Bindgen
|
MustJoinHandle::new(
|
||||||
.spawn_handle(future)
|
Bindgen
|
||||||
.expect("wasm-bindgen-futures spawn should never error out"))
|
.spawn_handle(future)
|
||||||
|
.expect("wasm-bindgen-futures spawn should never error out"),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn spawn_local<Out>(future: impl Future<Output = Out> + 'static) -> MustJoinHandle<Out>
|
pub fn spawn_local<Out>(future: impl Future<Output = Out> + 'static) -> MustJoinHandle<Out>
|
||||||
where
|
where
|
||||||
Out: 'static,
|
Out: 'static,
|
||||||
{
|
{
|
||||||
MustJoinHandle::new(Bindgen
|
MustJoinHandle::new(
|
||||||
.spawn_handle_local(future)
|
Bindgen
|
||||||
.expect("wasm-bindgen-futures spawn_local should never error out"))
|
.spawn_handle_local(future)
|
||||||
|
.expect("wasm-bindgen-futures spawn_local should never error out"),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// pub fn spawn_with_local_set<Out>(
|
// pub fn spawn_with_local_set<Out>(
|
||||||
@ -114,10 +116,10 @@ where
|
|||||||
{
|
{
|
||||||
Bindgen
|
Bindgen
|
||||||
.spawn_handle_local(future)
|
.spawn_handle_local(future)
|
||||||
.expect("wasm-bindgen-futures spawn_local should never error out").detach()
|
.expect("wasm-bindgen-futures spawn_local should never error out")
|
||||||
|
.detach()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
pub fn interval<F, FUT>(freq_ms: u32, callback: F) -> SendPinBoxFuture<()>
|
pub fn interval<F, FUT>(freq_ms: u32, callback: F) -> SendPinBoxFuture<()>
|
||||||
where
|
where
|
||||||
F: Fn() -> FUT + Send + Sync + 'static,
|
F: Fn() -> FUT + Send + Sync + 'static,
|
||||||
@ -160,12 +162,12 @@ pub async fn get_outbound_relay_peer() -> Option<crate::veilid_api::PeerInfo> {
|
|||||||
|
|
||||||
// pub async fn get_pwa_web_server_config() -> {
|
// pub async fn get_pwa_web_server_config() -> {
|
||||||
// if utils::is_browser() {
|
// if utils::is_browser() {
|
||||||
|
|
||||||
// let win = window().unwrap();
|
// let win = window().unwrap();
|
||||||
// let doc = win.document().unwrap();
|
// let doc = win.document().unwrap();
|
||||||
// let html_document = document.dyn_into::<web_sys::HtmlDocument>().unwrap();
|
// let html_document = document.dyn_into::<web_sys::HtmlDocument>().unwrap();
|
||||||
// let cookie = html_document.cookie().unwrap();
|
// let cookie = html_document.cookie().unwrap();
|
||||||
|
|
||||||
// // let wait_millis = if millis > u32::MAX {
|
// // let wait_millis = if millis > u32::MAX {
|
||||||
// // i32::MAX
|
// // i32::MAX
|
||||||
// // } else {
|
// // } else {
|
||||||
@ -177,22 +179,14 @@ pub async fn get_outbound_relay_peer() -> Option<crate::veilid_api::PeerInfo> {
|
|||||||
// // .unwrap();
|
// // .unwrap();
|
||||||
// // });
|
// // });
|
||||||
|
|
||||||
// // JsFuture::from(promise).await.unwrap();
|
|
||||||
// } else if utils::is_nodejs() {
|
|
||||||
// // let promise = Promise::new(&mut |yes, _| {
|
|
||||||
// // nodejs_global_set_timeout_with_callback_and_timeout_and_arguments_0(&yes, millis)
|
|
||||||
// // .unwrap();
|
|
||||||
// // });
|
|
||||||
|
|
||||||
// // JsFuture::from(promise).await.unwrap();
|
// // JsFuture::from(promise).await.unwrap();
|
||||||
// } else {
|
// } else {
|
||||||
// panic!("WASM requires browser or nodejs environment");
|
// panic!("WASM requires browser environment");
|
||||||
// }
|
// }
|
||||||
// }
|
// }
|
||||||
|
|
||||||
|
|
||||||
pub async fn txt_lookup<S: AsRef<str>>(_host: S) -> EyreResult<Vec<String>> {
|
pub async fn txt_lookup<S: AsRef<str>>(_host: S) -> EyreResult<Vec<String>> {
|
||||||
bail!("wasm does not support txt lookup")
|
bail!("wasm does not support txt lookup")
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn ptr_lookup(_ip_addr: IpAddr) -> EyreResult<String> {
|
pub async fn ptr_lookup(_ip_addr: IpAddr) -> EyreResult<String> {
|
||||||
|
@ -22,7 +22,7 @@ impl TableStore {
|
|||||||
opened: BTreeMap::new(),
|
opened: BTreeMap::new(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub fn new(config: VeilidConfig) -> Self {
|
pub(crate) fn new(config: VeilidConfig) -> Self {
|
||||||
Self {
|
Self {
|
||||||
config,
|
config,
|
||||||
inner: Arc::new(Mutex::new(Self::new_inner())),
|
inner: Arc::new(Mutex::new(Self::new_inner())),
|
||||||
@ -30,12 +30,25 @@ impl TableStore {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn init(&self) -> EyreResult<()> {
|
/// Delete all known tables
|
||||||
|
pub async fn delete_all(&self) {
|
||||||
|
if let Err(e) = self.delete("crypto_caches").await {
|
||||||
|
error!("failed to delete 'crypto_caches': {}", e);
|
||||||
|
}
|
||||||
|
if let Err(e) = self.delete("RouteSpecStore").await {
|
||||||
|
error!("failed to delete 'RouteSpecStore': {}", e);
|
||||||
|
}
|
||||||
|
if let Err(e) = self.delete("routing_table").await {
|
||||||
|
error!("failed to delete 'routing_table': {}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) async fn init(&self) -> EyreResult<()> {
|
||||||
let _async_guard = self.async_lock.lock().await;
|
let _async_guard = self.async_lock.lock().await;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn terminate(&self) {
|
pub(crate) async fn terminate(&self) {
|
||||||
let _async_guard = self.async_lock.lock().await;
|
let _async_guard = self.async_lock.lock().await;
|
||||||
assert!(
|
assert!(
|
||||||
self.inner.lock().opened.len() == 0,
|
self.inner.lock().opened.len() == 0,
|
||||||
@ -43,7 +56,7 @@ impl TableStore {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn on_table_db_drop(&self, table: String) {
|
pub(crate) fn on_table_db_drop(&self, table: String) {
|
||||||
let mut inner = self.inner.lock();
|
let mut inner = self.inner.lock();
|
||||||
match inner.opened.remove(&table) {
|
match inner.opened.remove(&table) {
|
||||||
Some(_) => (),
|
Some(_) => (),
|
||||||
@ -69,12 +82,14 @@ impl TableStore {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get or create a TableDB database table. If the column count is greater than an
|
||||||
|
/// existing TableDB's column count, the database will be upgraded to add the missing columns
|
||||||
pub async fn open(&self, name: &str, column_count: u32) -> EyreResult<TableDB> {
|
pub async fn open(&self, name: &str, column_count: u32) -> EyreResult<TableDB> {
|
||||||
let _async_guard = self.async_lock.lock().await;
|
let _async_guard = self.async_lock.lock().await;
|
||||||
let table_name = self.get_table_name(name)?;
|
let table_name = self.get_table_name(name)?;
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut inner = self.inner.lock();
|
let mut inner = self.inner.lock();
|
||||||
if let Some(table_db_weak_inner) = inner.opened.get(&table_name) {
|
if let Some(table_db_weak_inner) = inner.opened.get(&table_name) {
|
||||||
match TableDB::try_new_from_weak_inner(table_db_weak_inner.clone()) {
|
match TableDB::try_new_from_weak_inner(table_db_weak_inner.clone()) {
|
||||||
Some(tdb) => {
|
Some(tdb) => {
|
||||||
@ -89,7 +104,10 @@ impl TableStore {
|
|||||||
let db = Database::open(table_name.clone(), column_count)
|
let db = Database::open(table_name.clone(), column_count)
|
||||||
.await
|
.await
|
||||||
.wrap_err("failed to open tabledb")?;
|
.wrap_err("failed to open tabledb")?;
|
||||||
info!("opened table store '{}' with table name '{:?}' with {} columns", name, table_name, column_count);
|
info!(
|
||||||
|
"opened table store '{}' with table name '{:?}' with {} columns",
|
||||||
|
name, table_name, column_count
|
||||||
|
);
|
||||||
|
|
||||||
let table_db = TableDB::new(table_name.clone(), self.clone(), db);
|
let table_db = TableDB::new(table_name.clone(), self.clone(), db);
|
||||||
|
|
||||||
@ -101,11 +119,12 @@ impl TableStore {
|
|||||||
Ok(table_db)
|
Ok(table_db)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Delete a TableDB table by name
|
||||||
pub async fn delete(&self, name: &str) -> EyreResult<bool> {
|
pub async fn delete(&self, name: &str) -> EyreResult<bool> {
|
||||||
let _async_guard = self.async_lock.lock().await;
|
let _async_guard = self.async_lock.lock().await;
|
||||||
trace!("TableStore::delete {}", name);
|
trace!("TableStore::delete {}", name);
|
||||||
let table_name = self.get_table_name(name)?;
|
let table_name = self.get_table_name(name)?;
|
||||||
|
|
||||||
{
|
{
|
||||||
let inner = self.inner.lock();
|
let inner = self.inner.lock();
|
||||||
if inner.opened.contains_key(&table_name) {
|
if inner.opened.contains_key(&table_name) {
|
||||||
@ -117,9 +136,7 @@ impl TableStore {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if utils::is_nodejs() {
|
if utils::is_browser() {
|
||||||
unimplemented!();
|
|
||||||
} else if utils::is_browser() {
|
|
||||||
let out = match Database::delete(table_name.clone()).await {
|
let out = match Database::delete(table_name.clone()).await {
|
||||||
Ok(_) => true,
|
Ok(_) => true,
|
||||||
Err(_) => false,
|
Err(_) => false,
|
||||||
|
@ -15,21 +15,6 @@ extern "C" {
|
|||||||
pub fn alert(s: &str);
|
pub fn alert(s: &str);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_nodejs() -> bool {
|
|
||||||
static CACHE: AtomicI8 = AtomicI8::new(-1);
|
|
||||||
let cache = CACHE.load(Ordering::Relaxed);
|
|
||||||
if cache != -1 {
|
|
||||||
return cache != 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
let res = js_sys::eval("process.release.name === 'node'")
|
|
||||||
.map(|res| res.is_truthy())
|
|
||||||
.unwrap_or_default();
|
|
||||||
|
|
||||||
CACHE.store(res as i8, Ordering::Relaxed);
|
|
||||||
res
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn is_browser() -> bool {
|
pub fn is_browser() -> bool {
|
||||||
static CACHE: AtomicI8 = AtomicI8::new(-1);
|
static CACHE: AtomicI8 = AtomicI8::new(-1);
|
||||||
let cache = CACHE.load(Ordering::Relaxed);
|
let cache = CACHE.load(Ordering::Relaxed);
|
||||||
@ -60,24 +45,6 @@ pub fn is_browser() -> bool {
|
|||||||
// res
|
// res
|
||||||
// }
|
// }
|
||||||
|
|
||||||
// pub fn node_require(module: &str) -> JsValue {
|
|
||||||
// if !is_nodejs() {
|
|
||||||
// return JsValue::UNDEFINED;
|
|
||||||
// }
|
|
||||||
|
|
||||||
// let mut home = env!("CARGO_MANIFEST_DIR");
|
|
||||||
// if home.len() == 0 {
|
|
||||||
// home = ".";
|
|
||||||
// }
|
|
||||||
|
|
||||||
// match js_sys::eval(format!("require(\"{}/{}\")", home, module).as_str()) {
|
|
||||||
// Ok(v) => v,
|
|
||||||
// Err(e) => {
|
|
||||||
// panic!("node_require failed: {:?}", e);
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
#[derive(ThisError, Debug, Clone, Eq, PartialEq)]
|
#[derive(ThisError, Debug, Clone, Eq, PartialEq)]
|
||||||
#[error("JsValue error")]
|
#[error("JsValue error")]
|
||||||
pub struct JsValueError(String);
|
pub struct JsValueError(String);
|
||||||
|
@ -22,7 +22,7 @@ mod api_tracing_layer;
|
|||||||
mod attachment_manager;
|
mod attachment_manager;
|
||||||
mod callback_state_machine;
|
mod callback_state_machine;
|
||||||
mod core_context;
|
mod core_context;
|
||||||
mod dht;
|
mod crypto;
|
||||||
mod intf;
|
mod intf;
|
||||||
mod network_manager;
|
mod network_manager;
|
||||||
mod receipt_manager;
|
mod receipt_manager;
|
||||||
@ -64,12 +64,14 @@ pub fn veilid_version() -> (u32, u32, u32) {
|
|||||||
#[cfg(target_os = "android")]
|
#[cfg(target_os = "android")]
|
||||||
pub use intf::utils::android::{veilid_core_setup_android, veilid_core_setup_android_no_log};
|
pub use intf::utils::android::{veilid_core_setup_android, veilid_core_setup_android_no_log};
|
||||||
|
|
||||||
pub static DEFAULT_LOG_IGNORE_LIST: [&str; 19] = [
|
pub static DEFAULT_LOG_IGNORE_LIST: [&str; 21] = [
|
||||||
"mio",
|
"mio",
|
||||||
"h2",
|
"h2",
|
||||||
"hyper",
|
"hyper",
|
||||||
"tower",
|
"tower",
|
||||||
"tonic",
|
"tonic",
|
||||||
|
"tokio",
|
||||||
|
"runtime",
|
||||||
"tokio_util",
|
"tokio_util",
|
||||||
"want",
|
"want",
|
||||||
"serial_test",
|
"serial_test",
|
||||||
|
@ -4,7 +4,7 @@ use super::*;
|
|||||||
pub struct ConnectionHandle {
|
pub struct ConnectionHandle {
|
||||||
id: u64,
|
id: u64,
|
||||||
descriptor: ConnectionDescriptor,
|
descriptor: ConnectionDescriptor,
|
||||||
channel: flume::Sender<Vec<u8>>,
|
channel: flume::Sender<(Option<Id>, Vec<u8>)>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
@ -17,7 +17,7 @@ impl ConnectionHandle {
|
|||||||
pub(super) fn new(
|
pub(super) fn new(
|
||||||
id: u64,
|
id: u64,
|
||||||
descriptor: ConnectionDescriptor,
|
descriptor: ConnectionDescriptor,
|
||||||
channel: flume::Sender<Vec<u8>>,
|
channel: flume::Sender<(Option<Id>, Vec<u8>)>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
id,
|
id,
|
||||||
@ -34,16 +34,22 @@ impl ConnectionHandle {
|
|||||||
self.descriptor.clone()
|
self.descriptor.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[instrument(level="trace", skip(self, message), fields(message.len = message.len()))]
|
||||||
pub fn send(&self, message: Vec<u8>) -> ConnectionHandleSendResult {
|
pub fn send(&self, message: Vec<u8>) -> ConnectionHandleSendResult {
|
||||||
match self.channel.send(message) {
|
match self.channel.send((Span::current().id(), message)) {
|
||||||
Ok(()) => ConnectionHandleSendResult::Sent,
|
Ok(()) => ConnectionHandleSendResult::Sent,
|
||||||
Err(e) => ConnectionHandleSendResult::NotSent(e.0),
|
Err(e) => ConnectionHandleSendResult::NotSent(e.0 .1),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#[instrument(level="trace", skip(self, message), fields(message.len = message.len()))]
|
||||||
pub async fn send_async(&self, message: Vec<u8>) -> ConnectionHandleSendResult {
|
pub async fn send_async(&self, message: Vec<u8>) -> ConnectionHandleSendResult {
|
||||||
match self.channel.send_async(message).await {
|
match self
|
||||||
|
.channel
|
||||||
|
.send_async((Span::current().id(), message))
|
||||||
|
.await
|
||||||
|
{
|
||||||
Ok(()) => ConnectionHandleSendResult::Sent,
|
Ok(()) => ConnectionHandleSendResult::Sent,
|
||||||
Err(e) => ConnectionHandleSendResult::NotSent(e.0),
|
Err(e) => ConnectionHandleSendResult::NotSent(e.0 .1),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -26,6 +26,7 @@ struct ConnectionManagerArc {
|
|||||||
connection_initial_timeout_ms: u32,
|
connection_initial_timeout_ms: u32,
|
||||||
connection_inactivity_timeout_ms: u32,
|
connection_inactivity_timeout_ms: u32,
|
||||||
connection_table: ConnectionTable,
|
connection_table: ConnectionTable,
|
||||||
|
address_lock_table: AsyncTagLockTable<SocketAddr>,
|
||||||
inner: Mutex<Option<ConnectionManagerInner>>,
|
inner: Mutex<Option<ConnectionManagerInner>>,
|
||||||
}
|
}
|
||||||
impl core::fmt::Debug for ConnectionManagerArc {
|
impl core::fmt::Debug for ConnectionManagerArc {
|
||||||
@ -69,6 +70,7 @@ impl ConnectionManager {
|
|||||||
connection_initial_timeout_ms,
|
connection_initial_timeout_ms,
|
||||||
connection_inactivity_timeout_ms,
|
connection_inactivity_timeout_ms,
|
||||||
connection_table: ConnectionTable::new(config),
|
connection_table: ConnectionTable::new(config),
|
||||||
|
address_lock_table: AsyncTagLockTable::new(),
|
||||||
inner: Mutex::new(None),
|
inner: Mutex::new(None),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -140,6 +142,7 @@ impl ConnectionManager {
|
|||||||
// Internal routine to register new connection atomically.
|
// Internal routine to register new connection atomically.
|
||||||
// Registers connection in the connection table for later access
|
// Registers connection in the connection table for later access
|
||||||
// and spawns a message processing loop for the connection
|
// and spawns a message processing loop for the connection
|
||||||
|
#[instrument(level = "trace", skip(self, inner), ret, err)]
|
||||||
fn on_new_protocol_network_connection(
|
fn on_new_protocol_network_connection(
|
||||||
&self,
|
&self,
|
||||||
inner: &mut ConnectionManagerInner,
|
inner: &mut ConnectionManagerInner,
|
||||||
@ -195,6 +198,7 @@ impl ConnectionManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Returns a network connection if one already is established
|
// Returns a network connection if one already is established
|
||||||
|
//#[instrument(level = "trace", skip(self), ret)]
|
||||||
pub fn get_connection(&self, descriptor: ConnectionDescriptor) -> Option<ConnectionHandle> {
|
pub fn get_connection(&self, descriptor: ConnectionDescriptor) -> Option<ConnectionHandle> {
|
||||||
self.arc
|
self.arc
|
||||||
.connection_table
|
.connection_table
|
||||||
@ -228,21 +232,29 @@ impl ConnectionManager {
|
|||||||
});
|
});
|
||||||
// Wait for the killed connections to end their recv loops
|
// Wait for the killed connections to end their recv loops
|
||||||
let did_kill = !killed.is_empty();
|
let did_kill = !killed.is_empty();
|
||||||
for k in killed {
|
for mut k in killed {
|
||||||
|
k.close();
|
||||||
k.await;
|
k.await;
|
||||||
}
|
}
|
||||||
did_kill
|
did_kill
|
||||||
}
|
}
|
||||||
|
|
||||||
// Called when we want to create a new connection or get the current one that already exists
|
/// Called when we want to create a new connection or get the current one that already exists
|
||||||
// This will kill off any connections that are in conflict with the new connection to be made
|
/// This will kill off any connections that are in conflict with the new connection to be made
|
||||||
// in order to make room for the new connection in the system's connection table
|
/// in order to make room for the new connection in the system's connection table
|
||||||
// This routine needs to be atomic, or connections may exist in the table that are not established
|
/// This routine needs to be atomic, or connections may exist in the table that are not established
|
||||||
|
#[instrument(level = "trace", skip(self), ret, err)]
|
||||||
pub async fn get_or_create_connection(
|
pub async fn get_or_create_connection(
|
||||||
&self,
|
&self,
|
||||||
local_addr: Option<SocketAddr>,
|
local_addr: Option<SocketAddr>,
|
||||||
dial_info: DialInfo,
|
dial_info: DialInfo,
|
||||||
) -> EyreResult<NetworkResult<ConnectionHandle>> {
|
) -> EyreResult<NetworkResult<ConnectionHandle>> {
|
||||||
|
// Async lock on the remote address for atomicity per remote
|
||||||
|
let peer_address = dial_info.to_peer_address();
|
||||||
|
let remote_addr = peer_address.to_socket_addr();
|
||||||
|
|
||||||
|
let _lock_guard = self.arc.address_lock_table.lock_tag(remote_addr).await;
|
||||||
|
|
||||||
log_net!(
|
log_net!(
|
||||||
"== get_or_create_connection local_addr={:?} dial_info={:?}",
|
"== get_or_create_connection local_addr={:?} dial_info={:?}",
|
||||||
local_addr.green(),
|
local_addr.green(),
|
||||||
@ -253,21 +265,12 @@ impl ConnectionManager {
|
|||||||
let did_kill = self.kill_off_colliding_connections(&dial_info).await;
|
let did_kill = self.kill_off_colliding_connections(&dial_info).await;
|
||||||
let mut retry_count = if did_kill { 2 } else { 0 };
|
let mut retry_count = if did_kill { 2 } else { 0 };
|
||||||
|
|
||||||
// Make a connection descriptor for this dialinfo
|
|
||||||
let peer_address = dial_info.to_peer_address();
|
|
||||||
let descriptor = match local_addr {
|
|
||||||
Some(la) => {
|
|
||||||
ConnectionDescriptor::new(peer_address, SocketAddress::from_socket_addr(la))
|
|
||||||
}
|
|
||||||
None => ConnectionDescriptor::new_no_local(peer_address),
|
|
||||||
};
|
|
||||||
|
|
||||||
// If any connection to this remote exists that has the same protocol, return it
|
// If any connection to this remote exists that has the same protocol, return it
|
||||||
// Any connection will do, we don't have to match the local address
|
// Any connection will do, we don't have to match the local address
|
||||||
if let Some(conn) = self
|
if let Some(conn) = self
|
||||||
.arc
|
.arc
|
||||||
.connection_table
|
.connection_table
|
||||||
.get_last_connection_by_remote(descriptor.remote())
|
.get_last_connection_by_remote(peer_address)
|
||||||
{
|
{
|
||||||
log_net!(
|
log_net!(
|
||||||
"== Returning existing connection local_addr={:?} peer_address={:?}",
|
"== Returning existing connection local_addr={:?} peer_address={:?}",
|
||||||
@ -288,6 +291,23 @@ impl ConnectionManager {
|
|||||||
.await;
|
.await;
|
||||||
match result_net_res {
|
match result_net_res {
|
||||||
Ok(net_res) => {
|
Ok(net_res) => {
|
||||||
|
// If the connection 'already exists', then try one last time to return a connection from the table, in case
|
||||||
|
// an 'accept' happened at literally the same time as our connect
|
||||||
|
if net_res.is_already_exists() {
|
||||||
|
if let Some(conn) = self
|
||||||
|
.arc
|
||||||
|
.connection_table
|
||||||
|
.get_last_connection_by_remote(peer_address)
|
||||||
|
{
|
||||||
|
log_net!(
|
||||||
|
"== Returning existing connection in race local_addr={:?} peer_address={:?}",
|
||||||
|
local_addr.green(),
|
||||||
|
peer_address.green()
|
||||||
|
);
|
||||||
|
|
||||||
|
return Ok(NetworkResult::Value(conn));
|
||||||
|
}
|
||||||
|
}
|
||||||
if net_res.is_value() || retry_count == 0 {
|
if net_res.is_value() || retry_count == 0 {
|
||||||
break net_res;
|
break net_res;
|
||||||
}
|
}
|
||||||
@ -351,7 +371,7 @@ impl ConnectionManager {
|
|||||||
|
|
||||||
// Called by low-level network when any connection-oriented protocol connection appears
|
// Called by low-level network when any connection-oriented protocol connection appears
|
||||||
// either from incoming connections.
|
// either from incoming connections.
|
||||||
#[cfg_attr(target_os = "wasm32", allow(dead_code))]
|
#[cfg_attr(target_arch = "wasm32", allow(dead_code))]
|
||||||
pub(super) async fn on_accepted_protocol_network_connection(
|
pub(super) async fn on_accepted_protocol_network_connection(
|
||||||
&self,
|
&self,
|
||||||
protocol_connection: ProtocolNetworkConnection,
|
protocol_connection: ProtocolNetworkConnection,
|
||||||
@ -378,6 +398,7 @@ impl ConnectionManager {
|
|||||||
|
|
||||||
// Callback from network connection receive loop when it exits
|
// Callback from network connection receive loop when it exits
|
||||||
// cleans up the entry in the connection table
|
// cleans up the entry in the connection table
|
||||||
|
#[instrument(level = "trace", skip(self))]
|
||||||
pub(super) async fn report_connection_finished(&self, connection_id: u64) {
|
pub(super) async fn report_connection_finished(&self, connection_id: u64) {
|
||||||
// Get channel sender
|
// Get channel sender
|
||||||
let sender = {
|
let sender = {
|
||||||
|
@ -72,6 +72,7 @@ impl ConnectionTable {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "trace", skip(self))]
|
||||||
pub async fn join(&self) {
|
pub async fn join(&self) {
|
||||||
let mut unord = {
|
let mut unord = {
|
||||||
let mut inner = self.inner.lock();
|
let mut inner = self.inner.lock();
|
||||||
@ -90,6 +91,7 @@ impl ConnectionTable {
|
|||||||
while unord.next().await.is_some() {}
|
while unord.next().await.is_some() {}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "trace", skip(self), ret, err)]
|
||||||
pub fn add_connection(
|
pub fn add_connection(
|
||||||
&self,
|
&self,
|
||||||
network_connection: NetworkConnection,
|
network_connection: NetworkConnection,
|
||||||
@ -142,7 +144,7 @@ impl ConnectionTable {
|
|||||||
let mut out_conn = None;
|
let mut out_conn = None;
|
||||||
if inner.conn_by_id[protocol_index].len() > inner.max_connections[protocol_index] {
|
if inner.conn_by_id[protocol_index].len() > inner.max_connections[protocol_index] {
|
||||||
if let Some((lruk, lru_conn)) = inner.conn_by_id[protocol_index].remove_lru() {
|
if let Some((lruk, lru_conn)) = inner.conn_by_id[protocol_index].remove_lru() {
|
||||||
debug!("connection lru out: {:?}", lru_conn);
|
log_net!(debug "connection lru out: {:?}", lru_conn);
|
||||||
out_conn = Some(lru_conn);
|
out_conn = Some(lru_conn);
|
||||||
Self::remove_connection_records(&mut *inner, lruk);
|
Self::remove_connection_records(&mut *inner, lruk);
|
||||||
}
|
}
|
||||||
@ -156,6 +158,8 @@ impl ConnectionTable {
|
|||||||
Ok(out_conn)
|
Ok(out_conn)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//#[instrument(level = "trace", skip(self), ret)]
|
||||||
|
#[allow(dead_code)]
|
||||||
pub fn get_connection_by_id(&self, id: NetworkConnectionId) -> Option<ConnectionHandle> {
|
pub fn get_connection_by_id(&self, id: NetworkConnectionId) -> Option<ConnectionHandle> {
|
||||||
let mut inner = self.inner.lock();
|
let mut inner = self.inner.lock();
|
||||||
let protocol_index = *inner.protocol_index_by_id.get(&id)?;
|
let protocol_index = *inner.protocol_index_by_id.get(&id)?;
|
||||||
@ -163,6 +167,7 @@ impl ConnectionTable {
|
|||||||
Some(out.get_handle())
|
Some(out.get_handle())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//#[instrument(level = "trace", skip(self), ret)]
|
||||||
pub fn get_connection_by_descriptor(
|
pub fn get_connection_by_descriptor(
|
||||||
&self,
|
&self,
|
||||||
descriptor: ConnectionDescriptor,
|
descriptor: ConnectionDescriptor,
|
||||||
@ -175,6 +180,7 @@ impl ConnectionTable {
|
|||||||
Some(out.get_handle())
|
Some(out.get_handle())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//#[instrument(level = "trace", skip(self), ret)]
|
||||||
pub fn get_last_connection_by_remote(&self, remote: PeerAddress) -> Option<ConnectionHandle> {
|
pub fn get_last_connection_by_remote(&self, remote: PeerAddress) -> Option<ConnectionHandle> {
|
||||||
let mut inner = self.inner.lock();
|
let mut inner = self.inner.lock();
|
||||||
|
|
||||||
@ -184,7 +190,9 @@ impl ConnectionTable {
|
|||||||
Some(out.get_handle())
|
Some(out.get_handle())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn _get_connection_ids_by_remote(&self, remote: PeerAddress) -> Vec<NetworkConnectionId> {
|
//#[instrument(level = "trace", skip(self), ret)]
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub fn get_connection_ids_by_remote(&self, remote: PeerAddress) -> Vec<NetworkConnectionId> {
|
||||||
let inner = self.inner.lock();
|
let inner = self.inner.lock();
|
||||||
inner
|
inner
|
||||||
.ids_by_remote
|
.ids_by_remote
|
||||||
@ -219,6 +227,7 @@ impl ConnectionTable {
|
|||||||
inner.conn_by_id.iter().fold(0, |acc, c| acc + c.len())
|
inner.conn_by_id.iter().fold(0, |acc, c| acc + c.len())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "trace", skip(inner), ret)]
|
||||||
fn remove_connection_records(
|
fn remove_connection_records(
|
||||||
inner: &mut ConnectionTableInner,
|
inner: &mut ConnectionTableInner,
|
||||||
id: NetworkConnectionId,
|
id: NetworkConnectionId,
|
||||||
@ -251,6 +260,7 @@ impl ConnectionTable {
|
|||||||
conn
|
conn
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "trace", skip(self), ret)]
|
||||||
pub fn remove_connection_by_id(&self, id: NetworkConnectionId) -> Option<NetworkConnection> {
|
pub fn remove_connection_by_id(&self, id: NetworkConnectionId) -> Option<NetworkConnection> {
|
||||||
let mut inner = self.inner.lock();
|
let mut inner = self.inner.lock();
|
||||||
|
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -40,11 +40,9 @@ struct NetworkInner {
|
|||||||
/// such as dhcp release or change of address or interfaces being added or removed
|
/// such as dhcp release or change of address or interfaces being added or removed
|
||||||
network_needs_restart: bool,
|
network_needs_restart: bool,
|
||||||
/// the calculated protocol configuration for inbound/outbound protocols
|
/// the calculated protocol configuration for inbound/outbound protocols
|
||||||
protocol_config: Option<ProtocolConfig>,
|
protocol_config: ProtocolConfig,
|
||||||
/// set of statically configured protocols with public dialinfo
|
/// set of statically configured protocols with public dialinfo
|
||||||
static_public_dialinfo: ProtocolTypeSet,
|
static_public_dialinfo: ProtocolTypeSet,
|
||||||
/// network class per routing domain
|
|
||||||
network_class: [Option<NetworkClass>; RoutingDomain::count()],
|
|
||||||
/// join handles for all the low level network background tasks
|
/// join handles for all the low level network background tasks
|
||||||
join_handles: Vec<MustJoinHandle<()>>,
|
join_handles: Vec<MustJoinHandle<()>>,
|
||||||
/// stop source for shutting down the low level network background tasks
|
/// stop source for shutting down the low level network background tasks
|
||||||
@ -65,8 +63,6 @@ struct NetworkInner {
|
|||||||
enable_ipv6_local: bool,
|
enable_ipv6_local: bool,
|
||||||
/// set if we need to calculate our public dial info again
|
/// set if we need to calculate our public dial info again
|
||||||
needs_public_dial_info_check: bool,
|
needs_public_dial_info_check: bool,
|
||||||
/// set during the actual execution of the public dial info check to ensure we don't do it more than once
|
|
||||||
doing_public_dial_info_check: bool,
|
|
||||||
/// the punishment closure to enax
|
/// the punishment closure to enax
|
||||||
public_dial_info_check_punishment: Option<Box<dyn FnOnce() + Send + 'static>>,
|
public_dial_info_check_punishment: Option<Box<dyn FnOnce() + Send + 'static>>,
|
||||||
/// udp socket record for bound-first sockets, which are used to guarantee a port is available before
|
/// udp socket record for bound-first sockets, which are used to guarantee a port is available before
|
||||||
@ -118,11 +114,9 @@ impl Network {
|
|||||||
network_started: false,
|
network_started: false,
|
||||||
network_needs_restart: false,
|
network_needs_restart: false,
|
||||||
needs_public_dial_info_check: false,
|
needs_public_dial_info_check: false,
|
||||||
doing_public_dial_info_check: false,
|
|
||||||
public_dial_info_check_punishment: None,
|
public_dial_info_check_punishment: None,
|
||||||
protocol_config: None,
|
protocol_config: Default::default(),
|
||||||
static_public_dialinfo: ProtocolTypeSet::empty(),
|
static_public_dialinfo: ProtocolTypeSet::empty(),
|
||||||
network_class: [None, None],
|
|
||||||
join_handles: Vec::new(),
|
join_handles: Vec::new(),
|
||||||
stop_source: None,
|
stop_source: None,
|
||||||
udp_port: 0u16,
|
udp_port: 0u16,
|
||||||
@ -462,11 +456,13 @@ impl Network {
|
|||||||
|
|
||||||
// receive single response
|
// receive single response
|
||||||
let mut out = vec![0u8; MAX_MESSAGE_SIZE];
|
let mut out = vec![0u8; MAX_MESSAGE_SIZE];
|
||||||
let (recv_len, recv_addr) =
|
let (recv_len, recv_addr) = network_result_try!(timeout(
|
||||||
network_result_try!(timeout(timeout_ms, h.recv_message(&mut out))
|
timeout_ms,
|
||||||
.await
|
h.recv_message(&mut out).instrument(Span::current())
|
||||||
.into_network_result())
|
)
|
||||||
.wrap_err("recv_message failure")?;
|
.await
|
||||||
|
.into_network_result())
|
||||||
|
.wrap_err("recv_message failure")?;
|
||||||
|
|
||||||
let recv_socket_addr = recv_addr.remote_address().to_socket_addr();
|
let recv_socket_addr = recv_addr.remote_address().to_socket_addr();
|
||||||
self.network_manager()
|
self.network_manager()
|
||||||
@ -618,7 +614,7 @@ impl Network {
|
|||||||
|
|
||||||
/////////////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
pub fn get_protocol_config(&self) -> Option<ProtocolConfig> {
|
pub fn get_protocol_config(&self) -> ProtocolConfig {
|
||||||
self.inner.lock().protocol_config
|
self.inner.lock().protocol_config
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -732,7 +728,8 @@ impl Network {
|
|||||||
family_local,
|
family_local,
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
inner.protocol_config = Some(protocol_config);
|
inner.protocol_config = protocol_config;
|
||||||
|
|
||||||
protocol_config
|
protocol_config
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -769,27 +766,37 @@ impl Network {
|
|||||||
// that we have ports available to us
|
// that we have ports available to us
|
||||||
self.free_bound_first_ports();
|
self.free_bound_first_ports();
|
||||||
|
|
||||||
// If we have static public dialinfo, upgrade our network class
|
// set up the routing table's network config
|
||||||
|
// if we have static public dialinfo, upgrade our network class
|
||||||
|
|
||||||
|
editor_public_internet.setup_network(
|
||||||
|
protocol_config.inbound,
|
||||||
|
protocol_config.outbound,
|
||||||
|
protocol_config.family_global,
|
||||||
|
);
|
||||||
|
editor_local_network.setup_network(
|
||||||
|
protocol_config.inbound,
|
||||||
|
protocol_config.outbound,
|
||||||
|
protocol_config.family_local,
|
||||||
|
);
|
||||||
let detect_address_changes = {
|
let detect_address_changes = {
|
||||||
let c = self.config.get();
|
let c = self.config.get();
|
||||||
c.network.detect_address_changes
|
c.network.detect_address_changes
|
||||||
};
|
};
|
||||||
|
|
||||||
if !detect_address_changes {
|
if !detect_address_changes {
|
||||||
let mut inner = self.inner.lock();
|
let inner = self.inner.lock();
|
||||||
if !inner.static_public_dialinfo.is_empty() {
|
if !inner.static_public_dialinfo.is_empty() {
|
||||||
inner.network_class[RoutingDomain::PublicInternet as usize] =
|
editor_public_internet.set_network_class(Some(NetworkClass::InboundCapable));
|
||||||
Some(NetworkClass::InboundCapable);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
info!("network started");
|
|
||||||
self.inner.lock().network_started = true;
|
|
||||||
|
|
||||||
// commit routing table edits
|
// commit routing table edits
|
||||||
editor_public_internet.commit().await;
|
editor_public_internet.commit().await;
|
||||||
editor_local_network.commit().await;
|
editor_local_network.commit().await;
|
||||||
|
|
||||||
|
info!("network started");
|
||||||
|
self.inner.lock().network_started = true;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -861,21 +868,11 @@ impl Network {
|
|||||||
inner.public_dial_info_check_punishment = punishment;
|
inner.public_dial_info_check_punishment = punishment;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn needs_public_dial_info_check(&self) -> bool {
|
pub fn needs_public_dial_info_check(&self) -> bool {
|
||||||
let inner = self.inner.lock();
|
let inner = self.inner.lock();
|
||||||
inner.needs_public_dial_info_check
|
inner.needs_public_dial_info_check
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn doing_public_dial_info_check(&self) -> bool {
|
|
||||||
let inner = self.inner.lock();
|
|
||||||
inner.doing_public_dial_info_check
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_network_class(&self, routing_domain: RoutingDomain) -> Option<NetworkClass> {
|
|
||||||
let inner = self.inner.lock();
|
|
||||||
inner.network_class[routing_domain as usize]
|
|
||||||
}
|
|
||||||
|
|
||||||
//////////////////////////////////////////
|
//////////////////////////////////////////
|
||||||
|
|
||||||
#[instrument(level = "trace", skip(self), err)]
|
#[instrument(level = "trace", skip(self), err)]
|
||||||
@ -937,6 +934,7 @@ impl Network {
|
|||||||
// If we need to figure out our network class, tick the task for it
|
// If we need to figure out our network class, tick the task for it
|
||||||
if detect_address_changes {
|
if detect_address_changes {
|
||||||
let public_internet_network_class = self
|
let public_internet_network_class = self
|
||||||
|
.routing_table()
|
||||||
.get_network_class(RoutingDomain::PublicInternet)
|
.get_network_class(RoutingDomain::PublicInternet)
|
||||||
.unwrap_or(NetworkClass::Invalid);
|
.unwrap_or(NetworkClass::Invalid);
|
||||||
let needs_public_dial_info_check = self.needs_public_dial_info_check();
|
let needs_public_dial_info_check = self.needs_public_dial_info_check();
|
||||||
|
@ -3,6 +3,10 @@ use futures_util::stream::FuturesUnordered;
|
|||||||
use futures_util::FutureExt;
|
use futures_util::FutureExt;
|
||||||
use stop_token::future::FutureExt as StopTokenFutureExt;
|
use stop_token::future::FutureExt as StopTokenFutureExt;
|
||||||
|
|
||||||
|
const PORT_MAP_VALIDATE_TRY_COUNT: usize = 3;
|
||||||
|
const PORT_MAP_VALIDATE_DELAY_MS: u32 = 500;
|
||||||
|
const PORT_MAP_TRY_COUNT: usize = 3;
|
||||||
|
|
||||||
struct DetectedPublicDialInfo {
|
struct DetectedPublicDialInfo {
|
||||||
dial_info: DialInfo,
|
dial_info: DialInfo,
|
||||||
class: DialInfoClass,
|
class: DialInfoClass,
|
||||||
@ -79,7 +83,7 @@ impl DiscoveryContext {
|
|||||||
async fn request_public_address(&self, node_ref: NodeRef) -> Option<SocketAddress> {
|
async fn request_public_address(&self, node_ref: NodeRef) -> Option<SocketAddress> {
|
||||||
let rpc = self.routing_table.rpc_processor();
|
let rpc = self.routing_table.rpc_processor();
|
||||||
|
|
||||||
let res = network_result_value_or_log!(debug match rpc.rpc_call_status(node_ref.clone()).await {
|
let res = network_result_value_or_log!(debug match rpc.rpc_call_status(Destination::direct(node_ref.clone())).await {
|
||||||
Ok(v) => v,
|
Ok(v) => v,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
log_net!(error
|
log_net!(error
|
||||||
@ -98,7 +102,7 @@ impl DiscoveryContext {
|
|||||||
node_ref,
|
node_ref,
|
||||||
res.answer
|
res.answer
|
||||||
);
|
);
|
||||||
res.answer.socket_address
|
res.answer.map(|si| si.socket_address)
|
||||||
}
|
}
|
||||||
|
|
||||||
// find fast peers with a particular address type, and ask them to tell us what our external address is
|
// find fast peers with a particular address type, and ask them to tell us what our external address is
|
||||||
@ -125,22 +129,24 @@ impl DiscoveryContext {
|
|||||||
RoutingDomain::PublicInternet,
|
RoutingDomain::PublicInternet,
|
||||||
dial_info_filter.clone(),
|
dial_info_filter.clone(),
|
||||||
);
|
);
|
||||||
let disallow_relays_filter = move |e: &BucketEntryInner| {
|
let disallow_relays_filter = Box::new(
|
||||||
if let Some(n) = e.node_info(RoutingDomain::PublicInternet) {
|
move |rti: &RoutingTableInner, _k: DHTKey, v: Option<Arc<BucketEntry>>| {
|
||||||
n.relay_peer_info.is_none()
|
let v = v.unwrap();
|
||||||
} else {
|
v.with(rti, |_rti, e| {
|
||||||
false
|
if let Some(n) = e.signed_node_info(RoutingDomain::PublicInternet) {
|
||||||
}
|
n.relay_id().is_none()
|
||||||
};
|
} else {
|
||||||
let filter = RoutingTable::combine_entry_filters(
|
false
|
||||||
inbound_dial_info_entry_filter,
|
}
|
||||||
disallow_relays_filter,
|
})
|
||||||
);
|
},
|
||||||
|
) as RoutingTableEntryFilter;
|
||||||
|
let filters = VecDeque::from([inbound_dial_info_entry_filter, disallow_relays_filter]);
|
||||||
|
|
||||||
// Find public nodes matching this filter
|
// Find public nodes matching this filter
|
||||||
let peers = self
|
let peers = self
|
||||||
.routing_table
|
.routing_table
|
||||||
.find_fast_public_nodes_filtered(node_count, filter);
|
.find_fast_public_nodes_filtered(node_count, filters);
|
||||||
if peers.is_empty() {
|
if peers.is_empty() {
|
||||||
log_net!(
|
log_net!(
|
||||||
"no external address detection peers of type {:?}:{:?}",
|
"no external address detection peers of type {:?}:{:?}",
|
||||||
@ -218,6 +224,84 @@ impl DiscoveryContext {
|
|||||||
out
|
out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "trace", skip(self), ret)]
|
||||||
|
async fn try_upnp_port_mapping(&self) -> Option<DialInfo> {
|
||||||
|
let (pt, llpt, at, external_address_1, node_1, local_port) = {
|
||||||
|
let inner = self.inner.lock();
|
||||||
|
let pt = inner.protocol_type.unwrap();
|
||||||
|
let llpt = pt.low_level_protocol_type();
|
||||||
|
let at = inner.address_type.unwrap();
|
||||||
|
let external_address_1 = inner.external_1_address.unwrap();
|
||||||
|
let node_1 = inner.node_1.as_ref().unwrap().clone();
|
||||||
|
let local_port = self.net.get_local_port(pt);
|
||||||
|
(pt, llpt, at, external_address_1, node_1, local_port)
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut tries = 0;
|
||||||
|
loop {
|
||||||
|
tries += 1;
|
||||||
|
|
||||||
|
// Attempt a port mapping. If this doesn't succeed, it's not going to
|
||||||
|
let Some(mapped_external_address) = self
|
||||||
|
.net
|
||||||
|
.unlocked_inner
|
||||||
|
.igd_manager
|
||||||
|
.map_any_port(llpt, at, local_port, Some(external_address_1.to_ip_addr()))
|
||||||
|
.await else
|
||||||
|
{
|
||||||
|
return None;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Make dial info from the port mapping
|
||||||
|
let external_mapped_dial_info =
|
||||||
|
self.make_dial_info(SocketAddress::from_socket_addr(mapped_external_address), pt);
|
||||||
|
|
||||||
|
// Attempt to validate the port mapping
|
||||||
|
let mut validate_tries = 0;
|
||||||
|
loop {
|
||||||
|
validate_tries += 1;
|
||||||
|
|
||||||
|
// Ensure people can reach us. If we're firewalled off, this is useless
|
||||||
|
if self
|
||||||
|
.validate_dial_info(node_1.clone(), external_mapped_dial_info.clone(), false)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
return Some(external_mapped_dial_info);
|
||||||
|
}
|
||||||
|
|
||||||
|
if validate_tries == PORT_MAP_VALIDATE_TRY_COUNT {
|
||||||
|
log_net!(debug "UPNP port mapping succeeded but port {}/{} is still unreachable.\nretrying\n",
|
||||||
|
local_port, match llpt {
|
||||||
|
LowLevelProtocolType::UDP => "udp",
|
||||||
|
LowLevelProtocolType::TCP => "tcp",
|
||||||
|
});
|
||||||
|
intf::sleep(PORT_MAP_VALIDATE_DELAY_MS).await
|
||||||
|
} else {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Release the mapping if we're still unreachable
|
||||||
|
let _ = self
|
||||||
|
.net
|
||||||
|
.unlocked_inner
|
||||||
|
.igd_manager
|
||||||
|
.unmap_port(llpt, at, external_address_1.port())
|
||||||
|
.await;
|
||||||
|
|
||||||
|
if tries == PORT_MAP_TRY_COUNT {
|
||||||
|
warn!("UPNP port mapping succeeded but port {}/{} is still unreachable.\nYou may need to add a local firewall allowed port on this machine.\n",
|
||||||
|
local_port, match llpt {
|
||||||
|
LowLevelProtocolType::UDP => "udp",
|
||||||
|
LowLevelProtocolType::TCP => "tcp",
|
||||||
|
}
|
||||||
|
);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
#[instrument(level = "trace", skip(self), ret)]
|
#[instrument(level = "trace", skip(self), ret)]
|
||||||
async fn try_port_mapping(&self) -> Option<DialInfo> {
|
async fn try_port_mapping(&self) -> Option<DialInfo> {
|
||||||
let (enable_upnp, _enable_natpmp) = {
|
let (enable_upnp, _enable_natpmp) = {
|
||||||
@ -226,44 +310,7 @@ impl DiscoveryContext {
|
|||||||
};
|
};
|
||||||
|
|
||||||
if enable_upnp {
|
if enable_upnp {
|
||||||
let (pt, llpt, at, external_address_1, node_1, local_port) = {
|
return self.try_upnp_port_mapping().await;
|
||||||
let inner = self.inner.lock();
|
|
||||||
let pt = inner.protocol_type.unwrap();
|
|
||||||
let llpt = pt.low_level_protocol_type();
|
|
||||||
let at = inner.address_type.unwrap();
|
|
||||||
let external_address_1 = inner.external_1_address.unwrap();
|
|
||||||
let node_1 = inner.node_1.as_ref().unwrap().clone();
|
|
||||||
let local_port = self.net.get_local_port(pt);
|
|
||||||
(pt, llpt, at, external_address_1, node_1, local_port)
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Some(mapped_external_address) = self
|
|
||||||
.net
|
|
||||||
.unlocked_inner
|
|
||||||
.igd_manager
|
|
||||||
.map_any_port(llpt, at, local_port, Some(external_address_1.to_ip_addr()))
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
// make dial info from the port mapping
|
|
||||||
let external_mapped_dial_info = self
|
|
||||||
.make_dial_info(SocketAddress::from_socket_addr(mapped_external_address), pt);
|
|
||||||
|
|
||||||
// ensure people can reach us. if we're firewalled off, this is useless
|
|
||||||
if self
|
|
||||||
.validate_dial_info(node_1.clone(), external_mapped_dial_info.clone(), false)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
return Some(external_mapped_dial_info);
|
|
||||||
} else {
|
|
||||||
// release the mapping if we're still unreachable
|
|
||||||
let _ = self
|
|
||||||
.net
|
|
||||||
.unlocked_inner
|
|
||||||
.igd_manager
|
|
||||||
.unmap_port(llpt, at, external_address_1.port())
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
None
|
None
|
||||||
@ -375,17 +422,6 @@ impl DiscoveryContext {
|
|||||||
// If we know we are behind NAT check what kind
|
// If we know we are behind NAT check what kind
|
||||||
#[instrument(level = "trace", skip(self), ret, err)]
|
#[instrument(level = "trace", skip(self), ret, err)]
|
||||||
pub async fn protocol_process_nat(&self) -> EyreResult<bool> {
|
pub async fn protocol_process_nat(&self) -> EyreResult<bool> {
|
||||||
let (node_1, external_1_dial_info, external_1_address, protocol_type, address_type) = {
|
|
||||||
let inner = self.inner.lock();
|
|
||||||
(
|
|
||||||
inner.node_1.as_ref().unwrap().clone(),
|
|
||||||
inner.external_1_dial_info.as_ref().unwrap().clone(),
|
|
||||||
inner.external_1_address.unwrap(),
|
|
||||||
inner.protocol_type.unwrap(),
|
|
||||||
inner.address_type.unwrap(),
|
|
||||||
)
|
|
||||||
};
|
|
||||||
|
|
||||||
// Attempt a port mapping via all available and enabled mechanisms
|
// Attempt a port mapping via all available and enabled mechanisms
|
||||||
// Try this before the direct mapping in the event that we are restarting
|
// Try this before the direct mapping in the event that we are restarting
|
||||||
// and may not have recorded a mapping created the last time
|
// and may not have recorded a mapping created the last time
|
||||||
@ -397,8 +433,30 @@ impl DiscoveryContext {
|
|||||||
// No more retries
|
// No more retries
|
||||||
return Ok(true);
|
return Ok(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// XXX: is this necessary?
|
||||||
|
// Redo our external_1 dial info detection because a failed port mapping attempt
|
||||||
|
// may cause it to become invalid
|
||||||
|
// Get our external address from some fast node, call it node 1
|
||||||
|
// if !self.protocol_get_external_address_1().await {
|
||||||
|
// // If we couldn't get an external address, then we should just try the whole network class detection again later
|
||||||
|
// return Ok(false);
|
||||||
|
// }
|
||||||
|
|
||||||
|
// Get the external dial info for our use here
|
||||||
|
let (node_1, external_1_dial_info, external_1_address, protocol_type, address_type) = {
|
||||||
|
let inner = self.inner.lock();
|
||||||
|
(
|
||||||
|
inner.node_1.as_ref().unwrap().clone(),
|
||||||
|
inner.external_1_dial_info.as_ref().unwrap().clone(),
|
||||||
|
inner.external_1_address.unwrap(),
|
||||||
|
inner.protocol_type.unwrap(),
|
||||||
|
inner.address_type.unwrap(),
|
||||||
|
)
|
||||||
|
};
|
||||||
|
|
||||||
// Do a validate_dial_info on the external address from a redirected node
|
// Do a validate_dial_info on the external address from a redirected node
|
||||||
else if self
|
if self
|
||||||
.validate_dial_info(node_1.clone(), external_1_dial_info.clone(), true)
|
.validate_dial_info(node_1.clone(), external_1_dial_info.clone(), true)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
@ -592,12 +650,14 @@ impl Network {
|
|||||||
_l: u64,
|
_l: u64,
|
||||||
_t: u64,
|
_t: u64,
|
||||||
) -> EyreResult<()> {
|
) -> EyreResult<()> {
|
||||||
|
let routing_table = self.routing_table();
|
||||||
|
|
||||||
// Figure out if we can optimize TCP/WS checking since they are often on the same port
|
// Figure out if we can optimize TCP/WS checking since they are often on the same port
|
||||||
let (protocol_config, existing_network_class, tcp_same_port) = {
|
let (protocol_config, existing_network_class, tcp_same_port) = {
|
||||||
let inner = self.inner.lock();
|
let inner = self.inner.lock();
|
||||||
let protocol_config = inner.protocol_config.unwrap_or_default();
|
let protocol_config = inner.protocol_config;
|
||||||
let existing_network_class =
|
let existing_network_class =
|
||||||
inner.network_class[RoutingDomain::PublicInternet as usize];
|
routing_table.get_network_class(RoutingDomain::PublicInternet);
|
||||||
let tcp_same_port = if protocol_config.inbound.contains(ProtocolType::TCP)
|
let tcp_same_port = if protocol_config.inbound.contains(ProtocolType::TCP)
|
||||||
&& protocol_config.inbound.contains(ProtocolType::WS)
|
&& protocol_config.inbound.contains(ProtocolType::WS)
|
||||||
{
|
{
|
||||||
@ -607,7 +667,6 @@ impl Network {
|
|||||||
};
|
};
|
||||||
(protocol_config, existing_network_class, tcp_same_port)
|
(protocol_config, existing_network_class, tcp_same_port)
|
||||||
};
|
};
|
||||||
let routing_table = self.routing_table();
|
|
||||||
|
|
||||||
// Process all protocol and address combinations
|
// Process all protocol and address combinations
|
||||||
let mut futures = FuturesUnordered::new();
|
let mut futures = FuturesUnordered::new();
|
||||||
@ -628,6 +687,7 @@ impl Network {
|
|||||||
}
|
}
|
||||||
Some(vec![udpv4_context])
|
Some(vec![udpv4_context])
|
||||||
}
|
}
|
||||||
|
.instrument(trace_span!("do_public_dial_info_check UDPv4"))
|
||||||
.boxed(),
|
.boxed(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@ -647,6 +707,7 @@ impl Network {
|
|||||||
}
|
}
|
||||||
Some(vec![udpv6_context])
|
Some(vec![udpv6_context])
|
||||||
}
|
}
|
||||||
|
.instrument(trace_span!("do_public_dial_info_check UDPv6"))
|
||||||
.boxed(),
|
.boxed(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@ -669,6 +730,7 @@ impl Network {
|
|||||||
}
|
}
|
||||||
Some(vec![tcpv4_context])
|
Some(vec![tcpv4_context])
|
||||||
}
|
}
|
||||||
|
.instrument(trace_span!("do_public_dial_info_check TCPv4"))
|
||||||
.boxed(),
|
.boxed(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@ -688,6 +750,7 @@ impl Network {
|
|||||||
}
|
}
|
||||||
Some(vec![wsv4_context])
|
Some(vec![wsv4_context])
|
||||||
}
|
}
|
||||||
|
.instrument(trace_span!("do_public_dial_info_check WSv4"))
|
||||||
.boxed(),
|
.boxed(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@ -710,6 +773,7 @@ impl Network {
|
|||||||
}
|
}
|
||||||
Some(vec![tcpv6_context])
|
Some(vec![tcpv6_context])
|
||||||
}
|
}
|
||||||
|
.instrument(trace_span!("do_public_dial_info_check TCPv6"))
|
||||||
.boxed(),
|
.boxed(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@ -729,6 +793,7 @@ impl Network {
|
|||||||
}
|
}
|
||||||
Some(vec![wsv6_context])
|
Some(vec![wsv6_context])
|
||||||
}
|
}
|
||||||
|
.instrument(trace_span!("do_public_dial_info_check WSv6"))
|
||||||
.boxed(),
|
.boxed(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@ -825,17 +890,16 @@ impl Network {
|
|||||||
|
|
||||||
// Is the network class different?
|
// Is the network class different?
|
||||||
if existing_network_class != new_network_class {
|
if existing_network_class != new_network_class {
|
||||||
self.inner.lock().network_class[RoutingDomain::PublicInternet as usize] =
|
editor.set_network_class(new_network_class);
|
||||||
new_network_class;
|
|
||||||
changed = true;
|
changed = true;
|
||||||
log_net!(debug "PublicInternet network class changed to {:?}", new_network_class);
|
log_net!(debug "PublicInternet network class changed to {:?}", new_network_class);
|
||||||
}
|
}
|
||||||
} else if existing_network_class.is_some() {
|
} else if existing_network_class.is_some() {
|
||||||
// Network class could not be determined
|
// Network class could not be determined
|
||||||
editor.clear_dial_info_details();
|
editor.clear_dial_info_details();
|
||||||
self.inner.lock().network_class[RoutingDomain::PublicInternet as usize] = None;
|
editor.set_network_class(None);
|
||||||
changed = true;
|
changed = true;
|
||||||
log_net!(debug "network class cleared");
|
log_net!(debug "PublicInternet network class cleared");
|
||||||
}
|
}
|
||||||
|
|
||||||
// Punish nodes that told us our public address had changed when it didn't
|
// Punish nodes that told us our public address had changed when it didn't
|
||||||
@ -857,15 +921,11 @@ impl Network {
|
|||||||
l: u64,
|
l: u64,
|
||||||
t: u64,
|
t: u64,
|
||||||
) -> EyreResult<()> {
|
) -> EyreResult<()> {
|
||||||
// Note that we are doing the public dial info check
|
|
||||||
// We don't have to check this for concurrency, since this routine is run in a TickTask/SingleFuture
|
|
||||||
self.inner.lock().doing_public_dial_info_check = true;
|
|
||||||
|
|
||||||
// Do the public dial info check
|
// Do the public dial info check
|
||||||
let out = self.do_public_dial_info_check(stop_token, l, t).await;
|
let out = self.do_public_dial_info_check(stop_token, l, t).await;
|
||||||
|
|
||||||
// Done with public dial info check
|
// Done with public dial info check
|
||||||
self.inner.lock().doing_public_dial_info_check = false;
|
self.inner.lock().needs_public_dial_info_check = false;
|
||||||
|
|
||||||
out
|
out
|
||||||
}
|
}
|
||||||
|
@ -42,7 +42,8 @@ impl Network {
|
|||||||
&self,
|
&self,
|
||||||
tls_acceptor: &TlsAcceptor,
|
tls_acceptor: &TlsAcceptor,
|
||||||
stream: AsyncPeekStream,
|
stream: AsyncPeekStream,
|
||||||
addr: SocketAddr,
|
peer_addr: SocketAddr,
|
||||||
|
local_addr: SocketAddr,
|
||||||
protocol_handlers: &[Box<dyn ProtocolAcceptHandler>],
|
protocol_handlers: &[Box<dyn ProtocolAcceptHandler>],
|
||||||
tls_connection_initial_timeout_ms: u32,
|
tls_connection_initial_timeout_ms: u32,
|
||||||
) -> EyreResult<Option<ProtocolNetworkConnection>> {
|
) -> EyreResult<Option<ProtocolNetworkConnection>> {
|
||||||
@ -65,18 +66,20 @@ impl Network {
|
|||||||
.wrap_err("tls initial timeout")?
|
.wrap_err("tls initial timeout")?
|
||||||
.wrap_err("failed to peek tls stream")?;
|
.wrap_err("failed to peek tls stream")?;
|
||||||
|
|
||||||
self.try_handlers(ps, addr, protocol_handlers).await
|
self.try_handlers(ps, peer_addr, local_addr, protocol_handlers)
|
||||||
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn try_handlers(
|
async fn try_handlers(
|
||||||
&self,
|
&self,
|
||||||
stream: AsyncPeekStream,
|
stream: AsyncPeekStream,
|
||||||
addr: SocketAddr,
|
peer_addr: SocketAddr,
|
||||||
|
local_addr: SocketAddr,
|
||||||
protocol_accept_handlers: &[Box<dyn ProtocolAcceptHandler>],
|
protocol_accept_handlers: &[Box<dyn ProtocolAcceptHandler>],
|
||||||
) -> EyreResult<Option<ProtocolNetworkConnection>> {
|
) -> EyreResult<Option<ProtocolNetworkConnection>> {
|
||||||
for ah in protocol_accept_handlers.iter() {
|
for ah in protocol_accept_handlers.iter() {
|
||||||
if let Some(nc) = ah
|
if let Some(nc) = ah
|
||||||
.on_accept(stream.clone(), addr)
|
.on_accept(stream.clone(), peer_addr, local_addr)
|
||||||
.await
|
.await
|
||||||
.wrap_err("io error")?
|
.wrap_err("io error")?
|
||||||
{
|
{
|
||||||
@ -105,21 +108,35 @@ impl Network {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// XXX
|
||||||
|
// warn!(
|
||||||
|
// "DEBUGACCEPT: local={} remote={}",
|
||||||
|
// tcp_stream.local_addr().unwrap(),
|
||||||
|
// tcp_stream.peer_addr().unwrap(),
|
||||||
|
// );
|
||||||
|
|
||||||
let listener_state = listener_state.clone();
|
let listener_state = listener_state.clone();
|
||||||
let connection_manager = connection_manager.clone();
|
let connection_manager = connection_manager.clone();
|
||||||
|
|
||||||
// Limit the number of connections from the same IP address
|
// Limit the number of connections from the same IP address
|
||||||
// and the number of total connections
|
// and the number of total connections
|
||||||
let addr = match tcp_stream.peer_addr() {
|
let peer_addr = match tcp_stream.peer_addr() {
|
||||||
Ok(addr) => addr,
|
Ok(addr) => addr,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
log_net!(debug "failed to get peer address: {}", e);
|
log_net!(debug "failed to get peer address: {}", e);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
let local_addr = match tcp_stream.local_addr() {
|
||||||
|
Ok(addr) => addr,
|
||||||
|
Err(e) => {
|
||||||
|
log_net!(debug "failed to get local address: {}", e);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
// XXX limiting here instead for connection table? may be faster and avoids tls negotiation
|
// XXX limiting here instead for connection table? may be faster and avoids tls negotiation
|
||||||
|
|
||||||
log_net!("TCP connection from: {}", addr);
|
log_net!("TCP connection from: {}", peer_addr);
|
||||||
|
|
||||||
// Create a stream we can peek on
|
// Create a stream we can peek on
|
||||||
#[cfg(feature = "rt-tokio")]
|
#[cfg(feature = "rt-tokio")]
|
||||||
@ -139,7 +156,7 @@ impl Network {
|
|||||||
{
|
{
|
||||||
// If we fail to get a packet within the connection initial timeout
|
// If we fail to get a packet within the connection initial timeout
|
||||||
// then we punt this connection
|
// then we punt this connection
|
||||||
log_net!("connection initial timeout from: {:?}", addr);
|
log_net!("connection initial timeout from: {:?}", peer_addr);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -152,29 +169,30 @@ impl Network {
|
|||||||
self.try_tls_handlers(
|
self.try_tls_handlers(
|
||||||
ls.tls_acceptor.as_ref().unwrap(),
|
ls.tls_acceptor.as_ref().unwrap(),
|
||||||
ps,
|
ps,
|
||||||
addr,
|
peer_addr,
|
||||||
|
local_addr,
|
||||||
&ls.tls_protocol_handlers,
|
&ls.tls_protocol_handlers,
|
||||||
tls_connection_initial_timeout_ms,
|
tls_connection_initial_timeout_ms,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
} else {
|
} else {
|
||||||
self.try_handlers(ps, addr, &ls.protocol_accept_handlers)
|
self.try_handlers(ps, peer_addr, local_addr, &ls.protocol_accept_handlers)
|
||||||
.await
|
.await
|
||||||
};
|
};
|
||||||
|
|
||||||
let conn = match conn {
|
let conn = match conn {
|
||||||
Ok(Some(c)) => {
|
Ok(Some(c)) => {
|
||||||
log_net!("protocol handler found for {:?}: {:?}", addr, c);
|
log_net!("protocol handler found for {:?}: {:?}", peer_addr, c);
|
||||||
c
|
c
|
||||||
}
|
}
|
||||||
Ok(None) => {
|
Ok(None) => {
|
||||||
// No protocol handlers matched? drop it.
|
// No protocol handlers matched? drop it.
|
||||||
log_net!(debug "no protocol handler for connection from {:?}", addr);
|
log_net!(debug "no protocol handler for connection from {:?}", peer_addr);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
// Failed to negotiate connection? drop it.
|
// Failed to negotiate connection? drop it.
|
||||||
log_net!(debug "failed to negotiate connection from {:?}: {}", addr, e);
|
log_net!(debug "failed to negotiate connection from {:?}: {}", peer_addr, e);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -311,7 +329,6 @@ impl Network {
|
|||||||
.push(new_protocol_accept_handler(
|
.push(new_protocol_accept_handler(
|
||||||
self.network_manager().config(),
|
self.network_manager().config(),
|
||||||
true,
|
true,
|
||||||
addr,
|
|
||||||
));
|
));
|
||||||
} else {
|
} else {
|
||||||
ls.write()
|
ls.write()
|
||||||
@ -319,7 +336,6 @@ impl Network {
|
|||||||
.push(new_protocol_accept_handler(
|
.push(new_protocol_accept_handler(
|
||||||
self.network_manager().config(),
|
self.network_manager().config(),
|
||||||
false,
|
false,
|
||||||
addr,
|
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -34,6 +34,7 @@ cfg_if! {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "trace", ret, err)]
|
||||||
pub fn new_unbound_shared_udp_socket(domain: Domain) -> io::Result<Socket> {
|
pub fn new_unbound_shared_udp_socket(domain: Domain) -> io::Result<Socket> {
|
||||||
let socket = Socket::new(domain, Type::DGRAM, Some(Protocol::UDP))?;
|
let socket = Socket::new(domain, Type::DGRAM, Some(Protocol::UDP))?;
|
||||||
if domain == Domain::IPV6 {
|
if domain == Domain::IPV6 {
|
||||||
@ -49,6 +50,7 @@ pub fn new_unbound_shared_udp_socket(domain: Domain) -> io::Result<Socket> {
|
|||||||
Ok(socket)
|
Ok(socket)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "trace", ret, err)]
|
||||||
pub fn new_bound_shared_udp_socket(local_address: SocketAddr) -> io::Result<Socket> {
|
pub fn new_bound_shared_udp_socket(local_address: SocketAddr) -> io::Result<Socket> {
|
||||||
let domain = Domain::for_address(local_address);
|
let domain = Domain::for_address(local_address);
|
||||||
let socket = new_unbound_shared_udp_socket(domain)?;
|
let socket = new_unbound_shared_udp_socket(domain)?;
|
||||||
@ -60,6 +62,7 @@ pub fn new_bound_shared_udp_socket(local_address: SocketAddr) -> io::Result<Sock
|
|||||||
Ok(socket)
|
Ok(socket)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "trace", ret, err)]
|
||||||
pub fn new_bound_first_udp_socket(local_address: SocketAddr) -> io::Result<Socket> {
|
pub fn new_bound_first_udp_socket(local_address: SocketAddr) -> io::Result<Socket> {
|
||||||
let domain = Domain::for_address(local_address);
|
let domain = Domain::for_address(local_address);
|
||||||
let socket = Socket::new(domain, Type::DGRAM, Some(Protocol::UDP))?;
|
let socket = Socket::new(domain, Type::DGRAM, Some(Protocol::UDP))?;
|
||||||
@ -93,6 +96,7 @@ pub fn new_bound_first_udp_socket(local_address: SocketAddr) -> io::Result<Socke
|
|||||||
Ok(socket)
|
Ok(socket)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "trace", ret, err)]
|
||||||
pub fn new_unbound_shared_tcp_socket(domain: Domain) -> io::Result<Socket> {
|
pub fn new_unbound_shared_tcp_socket(domain: Domain) -> io::Result<Socket> {
|
||||||
let socket = Socket::new(domain, Type::STREAM, Some(Protocol::TCP))?;
|
let socket = Socket::new(domain, Type::STREAM, Some(Protocol::TCP))?;
|
||||||
if let Err(e) = socket.set_linger(Some(core::time::Duration::from_secs(0))) {
|
if let Err(e) = socket.set_linger(Some(core::time::Duration::from_secs(0))) {
|
||||||
@ -114,6 +118,7 @@ pub fn new_unbound_shared_tcp_socket(domain: Domain) -> io::Result<Socket> {
|
|||||||
Ok(socket)
|
Ok(socket)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "trace", ret, err)]
|
||||||
pub fn new_bound_shared_tcp_socket(local_address: SocketAddr) -> io::Result<Socket> {
|
pub fn new_bound_shared_tcp_socket(local_address: SocketAddr) -> io::Result<Socket> {
|
||||||
let domain = Domain::for_address(local_address);
|
let domain = Domain::for_address(local_address);
|
||||||
let socket = new_unbound_shared_tcp_socket(domain)?;
|
let socket = new_unbound_shared_tcp_socket(domain)?;
|
||||||
@ -125,6 +130,7 @@ pub fn new_bound_shared_tcp_socket(local_address: SocketAddr) -> io::Result<Sock
|
|||||||
Ok(socket)
|
Ok(socket)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "trace", ret, err)]
|
||||||
pub fn new_bound_first_tcp_socket(local_address: SocketAddr) -> io::Result<Socket> {
|
pub fn new_bound_first_tcp_socket(local_address: SocketAddr) -> io::Result<Socket> {
|
||||||
let domain = Domain::for_address(local_address);
|
let domain = Domain::for_address(local_address);
|
||||||
|
|
||||||
@ -166,6 +172,8 @@ pub fn new_bound_first_tcp_socket(local_address: SocketAddr) -> io::Result<Socke
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Non-blocking connect is tricky when you want to start with a prepared socket
|
// Non-blocking connect is tricky when you want to start with a prepared socket
|
||||||
|
// Errors should not be logged as they are valid conditions for this function
|
||||||
|
#[instrument(level = "trace", ret)]
|
||||||
pub async fn nonblocking_connect(
|
pub async fn nonblocking_connect(
|
||||||
socket: Socket,
|
socket: Socket,
|
||||||
addr: SocketAddr,
|
addr: SocketAddr,
|
||||||
@ -185,7 +193,6 @@ pub async fn nonblocking_connect(
|
|||||||
Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => Ok(()),
|
Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => Ok(()),
|
||||||
Err(e) => Err(e),
|
Err(e) => Err(e),
|
||||||
}?;
|
}?;
|
||||||
|
|
||||||
let async_stream = Async::new(std::net::TcpStream::from(socket))?;
|
let async_stream = Async::new(std::net::TcpStream::from(socket))?;
|
||||||
|
|
||||||
// The stream becomes writable when connected
|
// The stream becomes writable when connected
|
||||||
|
@ -87,11 +87,11 @@ impl RawTcpNetworkConnection {
|
|||||||
Ok(NetworkResult::Value(out))
|
Ok(NetworkResult::Value(out))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "trace", err, skip(self), fields(network_result))]
|
// #[instrument(level = "trace", err, skip(self), fields(network_result))]
|
||||||
pub async fn recv(&self) -> io::Result<NetworkResult<Vec<u8>>> {
|
pub async fn recv(&self) -> io::Result<NetworkResult<Vec<u8>>> {
|
||||||
let mut stream = self.stream.clone();
|
let mut stream = self.stream.clone();
|
||||||
let out = Self::recv_internal(&mut stream).await?;
|
let out = Self::recv_internal(&mut stream).await?;
|
||||||
tracing::Span::current().record("network_result", &tracing::field::display(&out));
|
//tracing::Span::current().record("network_result", &tracing::field::display(&out));
|
||||||
Ok(out)
|
Ok(out)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -99,30 +99,20 @@ impl RawTcpNetworkConnection {
|
|||||||
///////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////
|
||||||
///
|
///
|
||||||
|
|
||||||
struct RawTcpProtocolHandlerInner {
|
|
||||||
local_address: SocketAddr,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct RawTcpProtocolHandler
|
pub struct RawTcpProtocolHandler
|
||||||
where
|
where
|
||||||
Self: ProtocolAcceptHandler,
|
Self: ProtocolAcceptHandler,
|
||||||
{
|
{
|
||||||
connection_initial_timeout_ms: u32,
|
connection_initial_timeout_ms: u32,
|
||||||
inner: Arc<Mutex<RawTcpProtocolHandlerInner>>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RawTcpProtocolHandler {
|
impl RawTcpProtocolHandler {
|
||||||
fn new_inner(local_address: SocketAddr) -> RawTcpProtocolHandlerInner {
|
pub fn new(config: VeilidConfig) -> Self {
|
||||||
RawTcpProtocolHandlerInner { local_address }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new(config: VeilidConfig, local_address: SocketAddr) -> Self {
|
|
||||||
let c = config.get();
|
let c = config.get();
|
||||||
let connection_initial_timeout_ms = c.network.connection_initial_timeout_ms;
|
let connection_initial_timeout_ms = c.network.connection_initial_timeout_ms;
|
||||||
Self {
|
Self {
|
||||||
connection_initial_timeout_ms,
|
connection_initial_timeout_ms,
|
||||||
inner: Arc::new(Mutex::new(Self::new_inner(local_address))),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -131,6 +121,7 @@ impl RawTcpProtocolHandler {
|
|||||||
self,
|
self,
|
||||||
ps: AsyncPeekStream,
|
ps: AsyncPeekStream,
|
||||||
socket_addr: SocketAddr,
|
socket_addr: SocketAddr,
|
||||||
|
local_addr: SocketAddr,
|
||||||
) -> io::Result<Option<ProtocolNetworkConnection>> {
|
) -> io::Result<Option<ProtocolNetworkConnection>> {
|
||||||
log_net!("TCP: on_accept_async: enter");
|
log_net!("TCP: on_accept_async: enter");
|
||||||
let mut peekbuf: [u8; PEEK_DETECT_LEN] = [0u8; PEEK_DETECT_LEN];
|
let mut peekbuf: [u8; PEEK_DETECT_LEN] = [0u8; PEEK_DETECT_LEN];
|
||||||
@ -147,9 +138,8 @@ impl RawTcpProtocolHandler {
|
|||||||
SocketAddress::from_socket_addr(socket_addr),
|
SocketAddress::from_socket_addr(socket_addr),
|
||||||
ProtocolType::TCP,
|
ProtocolType::TCP,
|
||||||
);
|
);
|
||||||
let local_address = self.inner.lock().local_address;
|
|
||||||
let conn = ProtocolNetworkConnection::RawTcp(RawTcpNetworkConnection::new(
|
let conn = ProtocolNetworkConnection::RawTcp(RawTcpNetworkConnection::new(
|
||||||
ConnectionDescriptor::new(peer_addr, SocketAddress::from_socket_addr(local_address)),
|
ConnectionDescriptor::new(peer_addr, SocketAddress::from_socket_addr(local_addr)),
|
||||||
ps,
|
ps,
|
||||||
));
|
));
|
||||||
|
|
||||||
@ -158,7 +148,7 @@ impl RawTcpProtocolHandler {
|
|||||||
Ok(Some(conn))
|
Ok(Some(conn))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "trace", err)]
|
#[instrument(level = "trace", ret, err)]
|
||||||
pub async fn connect(
|
pub async fn connect(
|
||||||
local_address: Option<SocketAddr>,
|
local_address: Option<SocketAddr>,
|
||||||
socket_addr: SocketAddr,
|
socket_addr: SocketAddr,
|
||||||
@ -202,7 +192,8 @@ impl ProtocolAcceptHandler for RawTcpProtocolHandler {
|
|||||||
&self,
|
&self,
|
||||||
stream: AsyncPeekStream,
|
stream: AsyncPeekStream,
|
||||||
peer_addr: SocketAddr,
|
peer_addr: SocketAddr,
|
||||||
|
local_addr: SocketAddr,
|
||||||
) -> SendPinBoxFuture<io::Result<Option<ProtocolNetworkConnection>>> {
|
) -> SendPinBoxFuture<io::Result<Option<ProtocolNetworkConnection>>> {
|
||||||
Box::pin(self.clone().on_accept_async(stream, peer_addr))
|
Box::pin(self.clone().on_accept_async(stream, peer_addr, local_addr))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -11,7 +11,7 @@ impl RawUdpProtocolHandler {
|
|||||||
Self { socket }
|
Self { socket }
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "trace", err, skip(self, data), fields(data.len = data.len(), ret.len, ret.from))]
|
// #[instrument(level = "trace", err, skip(self, data), fields(data.len = data.len(), ret.len, ret.descriptor))]
|
||||||
pub async fn recv_message(&self, data: &mut [u8]) -> io::Result<(usize, ConnectionDescriptor)> {
|
pub async fn recv_message(&self, data: &mut [u8]) -> io::Result<(usize, ConnectionDescriptor)> {
|
||||||
let (size, descriptor) = loop {
|
let (size, descriptor) = loop {
|
||||||
let (size, remote_addr) = network_result_value_or_log!(debug self.socket.recv_from(data).await.into_network_result()? => continue);
|
let (size, remote_addr) = network_result_value_or_log!(debug self.socket.recv_from(data).await.into_network_result()? => continue);
|
||||||
@ -33,12 +33,12 @@ impl RawUdpProtocolHandler {
|
|||||||
break (size, descriptor);
|
break (size, descriptor);
|
||||||
};
|
};
|
||||||
|
|
||||||
tracing::Span::current().record("ret.len", &size);
|
// tracing::Span::current().record("ret.len", &size);
|
||||||
tracing::Span::current().record("ret.from", &format!("{:?}", descriptor).as_str());
|
// tracing::Span::current().record("ret.descriptor", &format!("{:?}", descriptor).as_str());
|
||||||
Ok((size, descriptor))
|
Ok((size, descriptor))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "trace", err, skip(self, data), fields(data.len = data.len(), ret.len, ret.from))]
|
#[instrument(level = "trace", err, skip(self, data), fields(data.len = data.len(), ret.len, ret.descriptor))]
|
||||||
pub async fn send_message(
|
pub async fn send_message(
|
||||||
&self,
|
&self,
|
||||||
data: Vec<u8>,
|
data: Vec<u8>,
|
||||||
@ -67,6 +67,8 @@ impl RawUdpProtocolHandler {
|
|||||||
bail_io_error_other!("UDP partial send")
|
bail_io_error_other!("UDP partial send")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tracing::Span::current().record("ret.len", &len);
|
||||||
|
tracing::Span::current().record("ret.descriptor", &format!("{:?}", descriptor).as_str());
|
||||||
Ok(NetworkResult::value(descriptor))
|
Ok(NetworkResult::value(descriptor))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -93,7 +93,7 @@ where
|
|||||||
Ok(out)
|
Ok(out)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "trace", err, skip(self), fields(network_result, ret.len))]
|
// #[instrument(level = "trace", err, skip(self), fields(network_result, ret.len))]
|
||||||
pub async fn recv(&self) -> io::Result<NetworkResult<Vec<u8>>> {
|
pub async fn recv(&self) -> io::Result<NetworkResult<Vec<u8>>> {
|
||||||
let out = match self.stream.clone().next().await {
|
let out = match self.stream.clone().next().await {
|
||||||
Some(Ok(Message::Binary(v))) => {
|
Some(Ok(Message::Binary(v))) => {
|
||||||
@ -120,7 +120,7 @@ where
|
|||||||
)),
|
)),
|
||||||
};
|
};
|
||||||
|
|
||||||
tracing::Span::current().record("network_result", &tracing::field::display(&out));
|
// tracing::Span::current().record("network_result", &tracing::field::display(&out));
|
||||||
Ok(out)
|
Ok(out)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -129,7 +129,6 @@ where
|
|||||||
///
|
///
|
||||||
struct WebsocketProtocolHandlerArc {
|
struct WebsocketProtocolHandlerArc {
|
||||||
tls: bool,
|
tls: bool,
|
||||||
local_address: SocketAddr,
|
|
||||||
request_path: Vec<u8>,
|
request_path: Vec<u8>,
|
||||||
connection_initial_timeout_ms: u32,
|
connection_initial_timeout_ms: u32,
|
||||||
}
|
}
|
||||||
@ -142,7 +141,7 @@ where
|
|||||||
arc: Arc<WebsocketProtocolHandlerArc>,
|
arc: Arc<WebsocketProtocolHandlerArc>,
|
||||||
}
|
}
|
||||||
impl WebsocketProtocolHandler {
|
impl WebsocketProtocolHandler {
|
||||||
pub fn new(config: VeilidConfig, tls: bool, local_address: SocketAddr) -> Self {
|
pub fn new(config: VeilidConfig, tls: bool) -> Self {
|
||||||
let c = config.get();
|
let c = config.get();
|
||||||
let path = if tls {
|
let path = if tls {
|
||||||
format!("GET /{}", c.network.protocol.ws.path.trim_end_matches('/'))
|
format!("GET /{}", c.network.protocol.ws.path.trim_end_matches('/'))
|
||||||
@ -158,7 +157,6 @@ impl WebsocketProtocolHandler {
|
|||||||
Self {
|
Self {
|
||||||
arc: Arc::new(WebsocketProtocolHandlerArc {
|
arc: Arc::new(WebsocketProtocolHandlerArc {
|
||||||
tls,
|
tls,
|
||||||
local_address,
|
|
||||||
request_path: path.as_bytes().to_vec(),
|
request_path: path.as_bytes().to_vec(),
|
||||||
connection_initial_timeout_ms,
|
connection_initial_timeout_ms,
|
||||||
}),
|
}),
|
||||||
@ -170,6 +168,7 @@ impl WebsocketProtocolHandler {
|
|||||||
self,
|
self,
|
||||||
ps: AsyncPeekStream,
|
ps: AsyncPeekStream,
|
||||||
socket_addr: SocketAddr,
|
socket_addr: SocketAddr,
|
||||||
|
local_addr: SocketAddr,
|
||||||
) -> io::Result<Option<ProtocolNetworkConnection>> {
|
) -> io::Result<Option<ProtocolNetworkConnection>> {
|
||||||
log_net!("WS: on_accept_async: enter");
|
log_net!("WS: on_accept_async: enter");
|
||||||
let request_path_len = self.arc.request_path.len() + 2;
|
let request_path_len = self.arc.request_path.len() + 2;
|
||||||
@ -209,10 +208,7 @@ impl WebsocketProtocolHandler {
|
|||||||
PeerAddress::new(SocketAddress::from_socket_addr(socket_addr), protocol_type);
|
PeerAddress::new(SocketAddress::from_socket_addr(socket_addr), protocol_type);
|
||||||
|
|
||||||
let conn = ProtocolNetworkConnection::WsAccepted(WebsocketNetworkConnection::new(
|
let conn = ProtocolNetworkConnection::WsAccepted(WebsocketNetworkConnection::new(
|
||||||
ConnectionDescriptor::new(
|
ConnectionDescriptor::new(peer_addr, SocketAddress::from_socket_addr(local_addr)),
|
||||||
peer_addr,
|
|
||||||
SocketAddress::from_socket_addr(self.arc.local_address),
|
|
||||||
),
|
|
||||||
ws_stream,
|
ws_stream,
|
||||||
));
|
));
|
||||||
|
|
||||||
@ -221,7 +217,7 @@ impl WebsocketProtocolHandler {
|
|||||||
Ok(Some(conn))
|
Ok(Some(conn))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "trace", err)]
|
#[instrument(level = "trace", ret, err)]
|
||||||
pub async fn connect(
|
pub async fn connect(
|
||||||
local_address: Option<SocketAddr>,
|
local_address: Option<SocketAddr>,
|
||||||
dial_info: &DialInfo,
|
dial_info: &DialInfo,
|
||||||
@ -296,7 +292,8 @@ impl ProtocolAcceptHandler for WebsocketProtocolHandler {
|
|||||||
&self,
|
&self,
|
||||||
stream: AsyncPeekStream,
|
stream: AsyncPeekStream,
|
||||||
peer_addr: SocketAddr,
|
peer_addr: SocketAddr,
|
||||||
|
local_addr: SocketAddr,
|
||||||
) -> SendPinBoxFuture<io::Result<Option<ProtocolNetworkConnection>>> {
|
) -> SendPinBoxFuture<io::Result<Option<ProtocolNetworkConnection>>> {
|
||||||
Box::pin(self.clone().on_accept_async(stream, peer_addr))
|
Box::pin(self.clone().on_accept_async(stream, peer_addr, local_addr))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -387,7 +387,7 @@ impl Network {
|
|||||||
ip_addrs,
|
ip_addrs,
|
||||||
ws_port,
|
ws_port,
|
||||||
false,
|
false,
|
||||||
Box::new(|c, t, a| Box::new(WebsocketProtocolHandler::new(c, t, a))),
|
Box::new(|c, t| Box::new(WebsocketProtocolHandler::new(c, t))),
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
trace!("WS: listener started on {:#?}", socket_addresses);
|
trace!("WS: listener started on {:#?}", socket_addresses);
|
||||||
@ -496,7 +496,7 @@ impl Network {
|
|||||||
ip_addrs,
|
ip_addrs,
|
||||||
wss_port,
|
wss_port,
|
||||||
true,
|
true,
|
||||||
Box::new(|c, t, a| Box::new(WebsocketProtocolHandler::new(c, t, a))),
|
Box::new(|c, t| Box::new(WebsocketProtocolHandler::new(c, t))),
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
trace!("WSS: listener started on {:#?}", socket_addresses);
|
trace!("WSS: listener started on {:#?}", socket_addresses);
|
||||||
@ -590,7 +590,7 @@ impl Network {
|
|||||||
ip_addrs,
|
ip_addrs,
|
||||||
tcp_port,
|
tcp_port,
|
||||||
false,
|
false,
|
||||||
Box::new(move |c, _, a| Box::new(RawTcpProtocolHandler::new(c, a))),
|
Box::new(move |c, _| Box::new(RawTcpProtocolHandler::new(c))),
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
trace!("TCP: listener started on {:#?}", socket_addresses);
|
trace!("TCP: listener started on {:#?}", socket_addresses);
|
||||||
|
@ -16,6 +16,7 @@ cfg_if::cfg_if! {
|
|||||||
&self,
|
&self,
|
||||||
stream: AsyncPeekStream,
|
stream: AsyncPeekStream,
|
||||||
peer_addr: SocketAddr,
|
peer_addr: SocketAddr,
|
||||||
|
local_addr: SocketAddr,
|
||||||
) -> SendPinBoxFuture<io::Result<Option<ProtocolNetworkConnection>>>;
|
) -> SendPinBoxFuture<io::Result<Option<ProtocolNetworkConnection>>>;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -38,7 +39,7 @@ cfg_if::cfg_if! {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub type NewProtocolAcceptHandler =
|
pub type NewProtocolAcceptHandler =
|
||||||
dyn Fn(VeilidConfig, bool, SocketAddr) -> Box<dyn ProtocolAcceptHandler> + Send;
|
dyn Fn(VeilidConfig, bool) -> Box<dyn ProtocolAcceptHandler> + Send;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
///////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////
|
||||||
@ -91,7 +92,7 @@ pub struct NetworkConnection {
|
|||||||
processor: Option<MustJoinHandle<()>>,
|
processor: Option<MustJoinHandle<()>>,
|
||||||
established_time: u64,
|
established_time: u64,
|
||||||
stats: Arc<Mutex<NetworkConnectionStats>>,
|
stats: Arc<Mutex<NetworkConnectionStats>>,
|
||||||
sender: flume::Sender<Vec<u8>>,
|
sender: flume::Sender<(Option<Id>, Vec<u8>)>,
|
||||||
stop_source: Option<StopSource>,
|
stop_source: Option<StopSource>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -120,9 +121,6 @@ impl NetworkConnection {
|
|||||||
protocol_connection: ProtocolNetworkConnection,
|
protocol_connection: ProtocolNetworkConnection,
|
||||||
connection_id: NetworkConnectionId,
|
connection_id: NetworkConnectionId,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
// Get timeout
|
|
||||||
let network_manager = connection_manager.network_manager();
|
|
||||||
|
|
||||||
// Get descriptor
|
// Get descriptor
|
||||||
let descriptor = protocol_connection.descriptor();
|
let descriptor = protocol_connection.descriptor();
|
||||||
|
|
||||||
@ -181,6 +179,7 @@ impl NetworkConnection {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[instrument(level="trace", skip(message, stats), fields(message.len = message.len()), ret, err)]
|
||||||
async fn send_internal(
|
async fn send_internal(
|
||||||
protocol_connection: &ProtocolNetworkConnection,
|
protocol_connection: &ProtocolNetworkConnection,
|
||||||
stats: Arc<Mutex<NetworkConnectionStats>>,
|
stats: Arc<Mutex<NetworkConnectionStats>>,
|
||||||
@ -194,6 +193,8 @@ impl NetworkConnection {
|
|||||||
|
|
||||||
Ok(NetworkResult::Value(out))
|
Ok(NetworkResult::Value(out))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[instrument(level="trace", skip(stats), fields(ret.len), err)]
|
||||||
async fn recv_internal(
|
async fn recv_internal(
|
||||||
protocol_connection: &ProtocolNetworkConnection,
|
protocol_connection: &ProtocolNetworkConnection,
|
||||||
stats: Arc<Mutex<NetworkConnectionStats>>,
|
stats: Arc<Mutex<NetworkConnectionStats>>,
|
||||||
@ -204,14 +205,18 @@ impl NetworkConnection {
|
|||||||
let mut stats = stats.lock();
|
let mut stats = stats.lock();
|
||||||
stats.last_message_recv_time.max_assign(Some(ts));
|
stats.last_message_recv_time.max_assign(Some(ts));
|
||||||
|
|
||||||
|
tracing::Span::current().record("ret.len", out.len());
|
||||||
|
|
||||||
Ok(NetworkResult::Value(out))
|
Ok(NetworkResult::Value(out))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(dead_code)]
|
||||||
pub fn stats(&self) -> NetworkConnectionStats {
|
pub fn stats(&self) -> NetworkConnectionStats {
|
||||||
let stats = self.stats.lock();
|
let stats = self.stats.lock();
|
||||||
stats.clone()
|
stats.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(dead_code)]
|
||||||
pub fn established_time(&self) -> u64 {
|
pub fn established_time(&self) -> u64 {
|
||||||
self.established_time
|
self.established_time
|
||||||
}
|
}
|
||||||
@ -223,7 +228,7 @@ impl NetworkConnection {
|
|||||||
manager_stop_token: StopToken,
|
manager_stop_token: StopToken,
|
||||||
connection_id: NetworkConnectionId,
|
connection_id: NetworkConnectionId,
|
||||||
descriptor: ConnectionDescriptor,
|
descriptor: ConnectionDescriptor,
|
||||||
receiver: flume::Receiver<Vec<u8>>,
|
receiver: flume::Receiver<(Option<Id>, Vec<u8>)>,
|
||||||
protocol_connection: ProtocolNetworkConnection,
|
protocol_connection: ProtocolNetworkConnection,
|
||||||
stats: Arc<Mutex<NetworkConnectionStats>>,
|
stats: Arc<Mutex<NetworkConnectionStats>>,
|
||||||
) -> SendPinBoxFuture<()> {
|
) -> SendPinBoxFuture<()> {
|
||||||
@ -249,7 +254,7 @@ impl NetworkConnection {
|
|||||||
};
|
};
|
||||||
let timer = MutableFuture::new(new_timer());
|
let timer = MutableFuture::new(new_timer());
|
||||||
|
|
||||||
unord.push(system_boxed(timer.clone()));
|
unord.push(system_boxed(timer.clone().instrument(Span::current())));
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
// Add another message sender future if necessary
|
// Add another message sender future if necessary
|
||||||
@ -257,13 +262,18 @@ impl NetworkConnection {
|
|||||||
need_sender = false;
|
need_sender = false;
|
||||||
let sender_fut = receiver.recv_async().then(|res| async {
|
let sender_fut = receiver.recv_async().then(|res| async {
|
||||||
match res {
|
match res {
|
||||||
Ok(message) => {
|
Ok((_span_id, message)) => {
|
||||||
|
|
||||||
|
let recv_span = span!(Level::TRACE, "process_connection recv");
|
||||||
|
// xxx: causes crash (Missing otel data span extensions)
|
||||||
|
// recv_span.follows_from(span_id);
|
||||||
|
|
||||||
// send the packet
|
// send the packet
|
||||||
if let Err(e) = Self::send_internal(
|
if let Err(e) = Self::send_internal(
|
||||||
&protocol_connection,
|
&protocol_connection,
|
||||||
stats.clone(),
|
stats.clone(),
|
||||||
message,
|
message,
|
||||||
)
|
).instrument(recv_span)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
// Sending the packet along can fail, if so, this connection is dead
|
// Sending the packet along can fail, if so, this connection is dead
|
||||||
@ -280,7 +290,7 @@ impl NetworkConnection {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
unord.push(system_boxed(sender_fut));
|
unord.push(system_boxed(sender_fut.instrument(Span::current())));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add another message receiver future if necessary
|
// Add another message receiver future if necessary
|
||||||
@ -314,7 +324,7 @@ impl NetworkConnection {
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
unord.push(system_boxed(receiver_fut));
|
unord.push(system_boxed(receiver_fut.instrument(Span::current())));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Process futures
|
// Process futures
|
||||||
@ -358,7 +368,7 @@ impl NetworkConnection {
|
|||||||
connection_manager
|
connection_manager
|
||||||
.report_connection_finished(connection_id)
|
.report_connection_finished(connection_id)
|
||||||
.await;
|
.await;
|
||||||
})
|
}.instrument(trace_span!("process_connection")))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
use crate::dht::*;
|
use crate::crypto::*;
|
||||||
use crate::xx::*;
|
use crate::xx::*;
|
||||||
use futures_util::FutureExt;
|
use futures_util::FutureExt;
|
||||||
use stop_token::future::FutureExt as StopFutureExt;
|
use stop_token::future::FutureExt as StopFutureExt;
|
||||||
@ -39,119 +39,125 @@ impl NetworkManager {
|
|||||||
// Get bootstrap nodes from hostnames concurrently
|
// Get bootstrap nodes from hostnames concurrently
|
||||||
let mut unord = FuturesUnordered::new();
|
let mut unord = FuturesUnordered::new();
|
||||||
for bsname in bsnames {
|
for bsname in bsnames {
|
||||||
unord.push(async move {
|
unord.push(
|
||||||
// look up boostrap node txt records
|
async move {
|
||||||
let bsnirecords = match intf::txt_lookup(&bsname).await {
|
// look up boostrap node txt records
|
||||||
Err(e) => {
|
let bsnirecords = match intf::txt_lookup(&bsname).await {
|
||||||
warn!("bootstrap node txt lookup failed for {}: {}", bsname, e);
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
Ok(v) => v,
|
|
||||||
};
|
|
||||||
// for each record resolve into key/bootstraprecord pairs
|
|
||||||
let mut bootstrap_records: Vec<(DHTKey, BootstrapRecord)> = Vec::new();
|
|
||||||
for bsnirecord in bsnirecords {
|
|
||||||
// Bootstrap TXT Record Format Version 0:
|
|
||||||
// txt_version,min_version,max_version,nodeid,hostname,dialinfoshort*
|
|
||||||
//
|
|
||||||
// Split bootstrap node record by commas. Example:
|
|
||||||
// 0,0,0,7lxDEabK_qgjbe38RtBa3IZLrud84P6NhGP-pRTZzdQ,bootstrap-1.dev.veilid.net,T5150,U5150,W5150/ws
|
|
||||||
let records: Vec<String> = bsnirecord
|
|
||||||
.trim()
|
|
||||||
.split(',')
|
|
||||||
.map(|x| x.trim().to_owned())
|
|
||||||
.collect();
|
|
||||||
if records.len() < 6 {
|
|
||||||
warn!("invalid number of fields in bootstrap txt record");
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bootstrap TXT record version
|
|
||||||
let txt_version: u8 = match records[0].parse::<u8>() {
|
|
||||||
Ok(v) => v,
|
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
warn!(
|
warn!("bootstrap node txt lookup failed for {}: {}", bsname, e);
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
Ok(v) => v,
|
||||||
|
};
|
||||||
|
// for each record resolve into key/bootstraprecord pairs
|
||||||
|
let mut bootstrap_records: Vec<(DHTKey, BootstrapRecord)> = Vec::new();
|
||||||
|
for bsnirecord in bsnirecords {
|
||||||
|
// Bootstrap TXT Record Format Version 0:
|
||||||
|
// txt_version,min_version,max_version,nodeid,hostname,dialinfoshort*
|
||||||
|
//
|
||||||
|
// Split bootstrap node record by commas. Example:
|
||||||
|
// 0,0,0,7lxDEabK_qgjbe38RtBa3IZLrud84P6NhGP-pRTZzdQ,bootstrap-1.dev.veilid.net,T5150,U5150,W5150/ws
|
||||||
|
let records: Vec<String> = bsnirecord
|
||||||
|
.trim()
|
||||||
|
.split(',')
|
||||||
|
.map(|x| x.trim().to_owned())
|
||||||
|
.collect();
|
||||||
|
if records.len() < 6 {
|
||||||
|
warn!("invalid number of fields in bootstrap txt record");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bootstrap TXT record version
|
||||||
|
let txt_version: u8 = match records[0].parse::<u8>() {
|
||||||
|
Ok(v) => v,
|
||||||
|
Err(e) => {
|
||||||
|
warn!(
|
||||||
"invalid txt_version specified in bootstrap node txt record: {}",
|
"invalid txt_version specified in bootstrap node txt record: {}",
|
||||||
e
|
e
|
||||||
);
|
);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
if txt_version != BOOTSTRAP_TXT_VERSION {
|
||||||
|
warn!("unsupported bootstrap txt record version");
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
};
|
|
||||||
if txt_version != BOOTSTRAP_TXT_VERSION {
|
|
||||||
warn!("unsupported bootstrap txt record version");
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Min/Max wire protocol version
|
// Min/Max wire protocol version
|
||||||
let min_version: u8 = match records[1].parse::<u8>() {
|
let min_version: u8 = match records[1].parse::<u8>() {
|
||||||
Ok(v) => v,
|
Ok(v) => v,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
warn!(
|
warn!(
|
||||||
"invalid min_version specified in bootstrap node txt record: {}",
|
"invalid min_version specified in bootstrap node txt record: {}",
|
||||||
e
|
e
|
||||||
);
|
);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
let max_version: u8 = match records[2].parse::<u8>() {
|
let max_version: u8 = match records[2].parse::<u8>() {
|
||||||
Ok(v) => v,
|
Ok(v) => v,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
warn!(
|
warn!(
|
||||||
"invalid max_version specified in bootstrap node txt record: {}",
|
"invalid max_version specified in bootstrap node txt record: {}",
|
||||||
e
|
e
|
||||||
);
|
);
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Node Id
|
|
||||||
let node_id_str = &records[3];
|
|
||||||
let node_id_key = match DHTKey::try_decode(node_id_str) {
|
|
||||||
Ok(v) => v,
|
|
||||||
Err(e) => {
|
|
||||||
warn!(
|
|
||||||
"Invalid node id in bootstrap node record {}: {}",
|
|
||||||
node_id_str, e
|
|
||||||
);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Hostname
|
|
||||||
let hostname_str = &records[4];
|
|
||||||
|
|
||||||
// If this is our own node id, then we skip it for bootstrap, in case we are a bootstrap node
|
|
||||||
if self.routing_table().node_id() == node_id_key {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resolve each record and store in node dial infos list
|
|
||||||
let mut bootstrap_record = BootstrapRecord {
|
|
||||||
min_version,
|
|
||||||
max_version,
|
|
||||||
dial_info_details: Vec::new(),
|
|
||||||
};
|
|
||||||
for rec in &records[5..] {
|
|
||||||
let rec = rec.trim();
|
|
||||||
let dial_infos = match DialInfo::try_vec_from_short(rec, hostname_str) {
|
|
||||||
Ok(dis) => dis,
|
|
||||||
Err(e) => {
|
|
||||||
warn!("Couldn't resolve bootstrap node dial info {}: {}", rec, e);
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
for di in dial_infos {
|
// Node Id
|
||||||
bootstrap_record.dial_info_details.push(DialInfoDetail {
|
let node_id_str = &records[3];
|
||||||
dial_info: di,
|
let node_id_key = match DHTKey::try_decode(node_id_str) {
|
||||||
class: DialInfoClass::Direct,
|
Ok(v) => v,
|
||||||
});
|
Err(e) => {
|
||||||
|
warn!(
|
||||||
|
"Invalid node id in bootstrap node record {}: {}",
|
||||||
|
node_id_str, e
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Hostname
|
||||||
|
let hostname_str = &records[4];
|
||||||
|
|
||||||
|
// If this is our own node id, then we skip it for bootstrap, in case we are a bootstrap node
|
||||||
|
if self.routing_table().node_id() == node_id_key {
|
||||||
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Resolve each record and store in node dial infos list
|
||||||
|
let mut bootstrap_record = BootstrapRecord {
|
||||||
|
min_version,
|
||||||
|
max_version,
|
||||||
|
dial_info_details: Vec::new(),
|
||||||
|
};
|
||||||
|
for rec in &records[5..] {
|
||||||
|
let rec = rec.trim();
|
||||||
|
let dial_infos = match DialInfo::try_vec_from_short(rec, hostname_str) {
|
||||||
|
Ok(dis) => dis,
|
||||||
|
Err(e) => {
|
||||||
|
warn!(
|
||||||
|
"Couldn't resolve bootstrap node dial info {}: {}",
|
||||||
|
rec, e
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
for di in dial_infos {
|
||||||
|
bootstrap_record.dial_info_details.push(DialInfoDetail {
|
||||||
|
dial_info: di,
|
||||||
|
class: DialInfoClass::Direct,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
bootstrap_records.push((node_id_key, bootstrap_record));
|
||||||
}
|
}
|
||||||
bootstrap_records.push((node_id_key, bootstrap_record));
|
Some(bootstrap_records)
|
||||||
}
|
}
|
||||||
Some(bootstrap_records)
|
.instrument(Span::current()),
|
||||||
});
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut bsmap = BootstrapRecordMap::new();
|
let mut bsmap = BootstrapRecordMap::new();
|
||||||
@ -172,6 +178,7 @@ impl NetworkManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// 'direct' bootstrap task routine for systems incapable of resolving TXT records, such as browser WASM
|
// 'direct' bootstrap task routine for systems incapable of resolving TXT records, such as browser WASM
|
||||||
|
#[instrument(level = "trace", skip(self), err)]
|
||||||
pub(super) async fn direct_bootstrap_task_routine(
|
pub(super) async fn direct_bootstrap_task_routine(
|
||||||
self,
|
self,
|
||||||
stop_token: StopToken,
|
stop_token: StopToken,
|
||||||
@ -201,7 +208,8 @@ impl NetworkManager {
|
|||||||
let routing_table = routing_table.clone();
|
let routing_table = routing_table.clone();
|
||||||
unord.push(
|
unord.push(
|
||||||
// lets ask bootstrap to find ourselves now
|
// lets ask bootstrap to find ourselves now
|
||||||
async move { routing_table.reverse_find_node(nr, true).await },
|
async move { routing_table.reverse_find_node(nr, true).await }
|
||||||
|
.instrument(Span::current()),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -216,7 +224,7 @@ impl NetworkManager {
|
|||||||
#[instrument(level = "trace", skip(self), err)]
|
#[instrument(level = "trace", skip(self), err)]
|
||||||
pub(super) async fn bootstrap_task_routine(self, stop_token: StopToken) -> EyreResult<()> {
|
pub(super) async fn bootstrap_task_routine(self, stop_token: StopToken) -> EyreResult<()> {
|
||||||
let (bootstrap, bootstrap_nodes) = {
|
let (bootstrap, bootstrap_nodes) = {
|
||||||
let c = self.config.get();
|
let c = self.unlocked_inner.config.get();
|
||||||
(
|
(
|
||||||
c.network.bootstrap.clone(),
|
c.network.bootstrap.clone(),
|
||||||
c.network.bootstrap_nodes.clone(),
|
c.network.bootstrap_nodes.clone(),
|
||||||
@ -248,22 +256,26 @@ impl NetworkManager {
|
|||||||
let mut bsmap = BootstrapRecordMap::new();
|
let mut bsmap = BootstrapRecordMap::new();
|
||||||
let mut bootstrap_node_dial_infos = Vec::new();
|
let mut bootstrap_node_dial_infos = Vec::new();
|
||||||
for b in bootstrap_nodes {
|
for b in bootstrap_nodes {
|
||||||
let ndis = NodeDialInfo::from_str(b.as_str())
|
let (id_str, di_str) = b
|
||||||
.wrap_err("Invalid node dial info in bootstrap entry")?;
|
.split_once('@')
|
||||||
bootstrap_node_dial_infos.push(ndis);
|
.ok_or_else(|| eyre!("Invalid node dial info in bootstrap entry"))?;
|
||||||
|
let node_id =
|
||||||
|
NodeId::from_str(id_str).wrap_err("Invalid node id in bootstrap entry")?;
|
||||||
|
let dial_info =
|
||||||
|
DialInfo::from_str(di_str).wrap_err("Invalid dial info in bootstrap entry")?;
|
||||||
|
bootstrap_node_dial_infos.push((node_id, dial_info));
|
||||||
}
|
}
|
||||||
for ndi in bootstrap_node_dial_infos {
|
for (node_id, dial_info) in bootstrap_node_dial_infos {
|
||||||
let node_id = ndi.node_id.key;
|
|
||||||
bsmap
|
bsmap
|
||||||
.entry(node_id)
|
.entry(node_id.key)
|
||||||
.or_insert_with(|| BootstrapRecord {
|
.or_insert_with(|| BootstrapRecord {
|
||||||
min_version: MIN_VERSION,
|
min_version: MIN_CRYPTO_VERSION,
|
||||||
max_version: MAX_VERSION,
|
max_version: MAX_CRYPTO_VERSION,
|
||||||
dial_info_details: Vec::new(),
|
dial_info_details: Vec::new(),
|
||||||
})
|
})
|
||||||
.dial_info_details
|
.dial_info_details
|
||||||
.push(DialInfoDetail {
|
.push(DialInfoDetail {
|
||||||
dial_info: ndi.dial_info,
|
dial_info,
|
||||||
class: DialInfoClass::Direct, // Bootstraps are always directly reachable
|
class: DialInfoClass::Direct, // Bootstraps are always directly reachable
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@ -287,36 +299,38 @@ impl NetworkManager {
|
|||||||
if let Some(nr) = routing_table.register_node_with_signed_node_info(
|
if let Some(nr) = routing_table.register_node_with_signed_node_info(
|
||||||
RoutingDomain::PublicInternet,
|
RoutingDomain::PublicInternet,
|
||||||
k,
|
k,
|
||||||
SignedNodeInfo::with_no_signature(NodeInfo {
|
SignedNodeInfo::Direct(SignedDirectNodeInfo::with_no_signature(NodeInfo {
|
||||||
network_class: NetworkClass::InboundCapable, // Bootstraps are always inbound capable
|
network_class: NetworkClass::InboundCapable, // Bootstraps are always inbound capable
|
||||||
outbound_protocols: ProtocolTypeSet::only(ProtocolType::UDP), // Bootstraps do not participate in relaying and will not make outbound requests, but will have UDP enabled
|
outbound_protocols: ProtocolTypeSet::only(ProtocolType::UDP), // Bootstraps do not participate in relaying and will not make outbound requests, but will have UDP enabled
|
||||||
address_types: AddressTypeSet::all(), // Bootstraps are always IPV4 and IPV6 capable
|
address_types: AddressTypeSet::all(), // Bootstraps are always IPV4 and IPV6 capable
|
||||||
min_version: v.min_version, // Minimum protocol version specified in txt record
|
min_version: v.min_version, // Minimum crypto version specified in txt record
|
||||||
max_version: v.max_version, // Maximum protocol version specified in txt record
|
max_version: v.max_version, // Maximum crypto version specified in txt record
|
||||||
dial_info_detail_list: v.dial_info_details, // Dial info is as specified in the bootstrap list
|
dial_info_detail_list: v.dial_info_details, // Dial info is as specified in the bootstrap list
|
||||||
relay_peer_info: None, // Bootstraps never require a relay themselves
|
})),
|
||||||
}),
|
|
||||||
true,
|
true,
|
||||||
) {
|
) {
|
||||||
// Add this our futures to process in parallel
|
// Add this our futures to process in parallel
|
||||||
let routing_table = routing_table.clone();
|
let routing_table = routing_table.clone();
|
||||||
unord.push(async move {
|
unord.push(
|
||||||
// Need VALID signed peer info, so ask bootstrap to find_node of itself
|
async move {
|
||||||
// which will ensure it has the bootstrap's signed peer info as part of the response
|
// Need VALID signed peer info, so ask bootstrap to find_node of itself
|
||||||
let _ = routing_table.find_target(nr.clone()).await;
|
// which will ensure it has the bootstrap's signed peer info as part of the response
|
||||||
|
let _ = routing_table.find_target(nr.clone()).await;
|
||||||
|
|
||||||
// Ensure we got the signed peer info
|
// Ensure we got the signed peer info
|
||||||
if !nr.signed_node_info_has_valid_signature(RoutingDomain::PublicInternet) {
|
if !nr.signed_node_info_has_valid_signature(RoutingDomain::PublicInternet) {
|
||||||
log_net!(warn
|
log_net!(warn
|
||||||
"bootstrap at {:?} did not return valid signed node info",
|
"bootstrap at {:?} did not return valid signed node info",
|
||||||
nr
|
nr
|
||||||
);
|
);
|
||||||
// If this node info is invalid, it will time out after being unpingable
|
// If this node info is invalid, it will time out after being unpingable
|
||||||
} else {
|
} else {
|
||||||
// otherwise this bootstrap is valid, lets ask it to find ourselves now
|
// otherwise this bootstrap is valid, lets ask it to find ourselves now
|
||||||
routing_table.reverse_find_node(nr, true).await
|
routing_table.reverse_find_node(nr, true).await
|
||||||
|
}
|
||||||
}
|
}
|
||||||
});
|
.instrument(Span::current()),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -332,7 +346,7 @@ impl NetworkManager {
|
|||||||
&self,
|
&self,
|
||||||
cur_ts: u64,
|
cur_ts: u64,
|
||||||
unord: &mut FuturesUnordered<
|
unord: &mut FuturesUnordered<
|
||||||
SendPinBoxFuture<Result<NetworkResult<Answer<SenderInfo>>, RPCError>>,
|
SendPinBoxFuture<Result<NetworkResult<Answer<Option<SenderInfo>>>, RPCError>>,
|
||||||
>,
|
>,
|
||||||
) -> EyreResult<()> {
|
) -> EyreResult<()> {
|
||||||
let rpc = self.rpc_processor();
|
let rpc = self.rpc_processor();
|
||||||
@ -342,7 +356,7 @@ impl NetworkManager {
|
|||||||
let node_refs = routing_table.get_nodes_needing_ping(RoutingDomain::PublicInternet, cur_ts);
|
let node_refs = routing_table.get_nodes_needing_ping(RoutingDomain::PublicInternet, cur_ts);
|
||||||
|
|
||||||
// Look up any NAT mappings we may need to try to preserve with keepalives
|
// Look up any NAT mappings we may need to try to preserve with keepalives
|
||||||
let mut mapped_port_info = routing_table.get_mapped_port_info();
|
let mut mapped_port_info = routing_table.get_low_level_port_info();
|
||||||
|
|
||||||
// Get the PublicInternet relay if we are using one
|
// Get the PublicInternet relay if we are using one
|
||||||
let opt_relay_nr = routing_table.relay_node(RoutingDomain::PublicInternet);
|
let opt_relay_nr = routing_table.relay_node(RoutingDomain::PublicInternet);
|
||||||
@ -382,7 +396,11 @@ impl NetworkManager {
|
|||||||
let nr_filtered =
|
let nr_filtered =
|
||||||
nr.filtered_clone(NodeRefFilter::new().with_dial_info_filter(dif));
|
nr.filtered_clone(NodeRefFilter::new().with_dial_info_filter(dif));
|
||||||
log_net!("--> Keepalive ping to {:?}", nr_filtered);
|
log_net!("--> Keepalive ping to {:?}", nr_filtered);
|
||||||
unord.push(async move { rpc.rpc_call_status(nr_filtered).await }.boxed());
|
unord.push(
|
||||||
|
async move { rpc.rpc_call_status(Destination::direct(nr_filtered)).await }
|
||||||
|
.instrument(Span::current())
|
||||||
|
.boxed(),
|
||||||
|
);
|
||||||
did_pings = true;
|
did_pings = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -392,7 +410,11 @@ impl NetworkManager {
|
|||||||
// any mapped ports to preserve
|
// any mapped ports to preserve
|
||||||
if !did_pings {
|
if !did_pings {
|
||||||
let rpc = rpc.clone();
|
let rpc = rpc.clone();
|
||||||
unord.push(async move { rpc.rpc_call_status(nr).await }.boxed());
|
unord.push(
|
||||||
|
async move { rpc.rpc_call_status(Destination::direct(nr)).await }
|
||||||
|
.instrument(Span::current())
|
||||||
|
.boxed(),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -406,7 +428,7 @@ impl NetworkManager {
|
|||||||
&self,
|
&self,
|
||||||
cur_ts: u64,
|
cur_ts: u64,
|
||||||
unord: &mut FuturesUnordered<
|
unord: &mut FuturesUnordered<
|
||||||
SendPinBoxFuture<Result<NetworkResult<Answer<SenderInfo>>, RPCError>>,
|
SendPinBoxFuture<Result<NetworkResult<Answer<Option<SenderInfo>>>, RPCError>>,
|
||||||
>,
|
>,
|
||||||
) -> EyreResult<()> {
|
) -> EyreResult<()> {
|
||||||
let rpc = self.rpc_processor();
|
let rpc = self.rpc_processor();
|
||||||
@ -420,7 +442,11 @@ impl NetworkManager {
|
|||||||
let rpc = rpc.clone();
|
let rpc = rpc.clone();
|
||||||
|
|
||||||
// Just do a single ping with the best protocol for all the nodes
|
// Just do a single ping with the best protocol for all the nodes
|
||||||
unord.push(async move { rpc.rpc_call_status(nr).await }.boxed());
|
unord.push(
|
||||||
|
async move { rpc.rpc_call_status(Destination::direct(nr)).await }
|
||||||
|
.instrument(Span::current())
|
||||||
|
.boxed(),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -464,7 +490,7 @@ impl NetworkManager {
|
|||||||
let routing_table = self.routing_table();
|
let routing_table = self.routing_table();
|
||||||
let mut ord = FuturesOrdered::new();
|
let mut ord = FuturesOrdered::new();
|
||||||
let min_peer_count = {
|
let min_peer_count = {
|
||||||
let c = self.config.get();
|
let c = self.unlocked_inner.config.get();
|
||||||
c.network.dht.min_peer_count as usize
|
c.network.dht.min_peer_count as usize
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -472,14 +498,17 @@ impl NetworkManager {
|
|||||||
// even the unreliable ones, and ask them to find nodes close to our node too
|
// even the unreliable ones, and ask them to find nodes close to our node too
|
||||||
let noderefs = routing_table.find_fastest_nodes(
|
let noderefs = routing_table.find_fastest_nodes(
|
||||||
min_peer_count,
|
min_peer_count,
|
||||||
|_k, _v| true,
|
VecDeque::new(),
|
||||||
|k: DHTKey, v: Option<Arc<BucketEntry>>| {
|
|_rti, k: DHTKey, v: Option<Arc<BucketEntry>>| {
|
||||||
NodeRef::new(routing_table.clone(), k, v.unwrap().clone(), None)
|
NodeRef::new(routing_table.clone(), k, v.unwrap().clone(), None)
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
for nr in noderefs {
|
for nr in noderefs {
|
||||||
let routing_table = routing_table.clone();
|
let routing_table = routing_table.clone();
|
||||||
ord.push_back(async move { routing_table.reverse_find_node(nr, false).await });
|
ord.push_back(
|
||||||
|
async move { routing_table.reverse_find_node(nr, false).await }
|
||||||
|
.instrument(Span::current()),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// do peer minimum search in order from fastest to slowest
|
// do peer minimum search in order from fastest to slowest
|
||||||
@ -498,8 +527,9 @@ impl NetworkManager {
|
|||||||
) -> EyreResult<()> {
|
) -> EyreResult<()> {
|
||||||
// Get our node's current node info and network class and do the right thing
|
// Get our node's current node info and network class and do the right thing
|
||||||
let routing_table = self.routing_table();
|
let routing_table = self.routing_table();
|
||||||
let node_info = routing_table.get_own_node_info(RoutingDomain::PublicInternet);
|
let own_peer_info = routing_table.get_own_peer_info(RoutingDomain::PublicInternet);
|
||||||
let network_class = self.get_network_class(RoutingDomain::PublicInternet);
|
let own_node_info = own_peer_info.signed_node_info.node_info();
|
||||||
|
let network_class = routing_table.get_network_class(RoutingDomain::PublicInternet);
|
||||||
|
|
||||||
// Get routing domain editor
|
// Get routing domain editor
|
||||||
let mut editor = routing_table.edit_routing_domain(RoutingDomain::PublicInternet);
|
let mut editor = routing_table.edit_routing_domain(RoutingDomain::PublicInternet);
|
||||||
@ -515,7 +545,7 @@ impl NetworkManager {
|
|||||||
info!("Relay node died, dropping relay {}", relay_node);
|
info!("Relay node died, dropping relay {}", relay_node);
|
||||||
editor.clear_relay_node();
|
editor.clear_relay_node();
|
||||||
false
|
false
|
||||||
} else if !node_info.requires_relay() {
|
} else if !own_node_info.requires_relay() {
|
||||||
info!(
|
info!(
|
||||||
"Relay node no longer required, dropping relay {}",
|
"Relay node no longer required, dropping relay {}",
|
||||||
relay_node
|
relay_node
|
||||||
@ -531,8 +561,9 @@ impl NetworkManager {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// Do we need a relay?
|
// Do we need a relay?
|
||||||
if !has_relay && node_info.requires_relay() {
|
if !has_relay && own_node_info.requires_relay() {
|
||||||
// Do we need an outbound relay?
|
// Do we want an outbound relay?
|
||||||
|
let mut got_outbound_relay = false;
|
||||||
if network_class.outbound_wants_relay() {
|
if network_class.outbound_wants_relay() {
|
||||||
// The outbound relay is the host of the PWA
|
// The outbound relay is the host of the PWA
|
||||||
if let Some(outbound_relay_peerinfo) = intf::get_outbound_relay_peer().await {
|
if let Some(outbound_relay_peerinfo) = intf::get_outbound_relay_peer().await {
|
||||||
@ -545,10 +576,11 @@ impl NetworkManager {
|
|||||||
) {
|
) {
|
||||||
info!("Outbound relay node selected: {}", nr);
|
info!("Outbound relay node selected: {}", nr);
|
||||||
editor.set_relay_node(nr);
|
editor.set_relay_node(nr);
|
||||||
|
got_outbound_relay = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Otherwise we must need an inbound relay
|
}
|
||||||
} else {
|
if !got_outbound_relay {
|
||||||
// Find a node in our routing table that is an acceptable inbound relay
|
// Find a node in our routing table that is an acceptable inbound relay
|
||||||
if let Some(nr) =
|
if let Some(nr) =
|
||||||
routing_table.find_inbound_relay(RoutingDomain::PublicInternet, cur_ts)
|
routing_table.find_inbound_relay(RoutingDomain::PublicInternet, cur_ts)
|
||||||
@ -566,6 +598,34 @@ impl NetworkManager {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Keep private routes assigned and accessible
|
||||||
|
#[instrument(level = "trace", skip(self), err)]
|
||||||
|
pub(super) async fn private_route_management_task_routine(
|
||||||
|
self,
|
||||||
|
_stop_token: StopToken,
|
||||||
|
_last_ts: u64,
|
||||||
|
cur_ts: u64,
|
||||||
|
) -> EyreResult<()> {
|
||||||
|
// Get our node's current node info and network class and do the right thing
|
||||||
|
let routing_table = self.routing_table();
|
||||||
|
let own_peer_info = routing_table.get_own_peer_info(RoutingDomain::PublicInternet);
|
||||||
|
let network_class = routing_table.get_network_class(RoutingDomain::PublicInternet);
|
||||||
|
|
||||||
|
// Get routing domain editor
|
||||||
|
let mut editor = routing_table.edit_routing_domain(RoutingDomain::PublicInternet);
|
||||||
|
|
||||||
|
// Do we know our network class yet?
|
||||||
|
if let Some(network_class) = network_class {
|
||||||
|
|
||||||
|
// see if we have any routes that need extending
|
||||||
|
}
|
||||||
|
|
||||||
|
// Commit the changes
|
||||||
|
editor.commit().await;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
// Compute transfer statistics for the low level network
|
// Compute transfer statistics for the low level network
|
||||||
#[instrument(level = "trace", skip(self), err)]
|
#[instrument(level = "trace", skip(self), err)]
|
||||||
pub(super) async fn rolling_transfers_task_routine(
|
pub(super) async fn rolling_transfers_task_routine(
|
||||||
|
@ -52,6 +52,7 @@ pub async fn test_add_get_remove() {
|
|||||||
);
|
);
|
||||||
|
|
||||||
let c1 = NetworkConnection::dummy(1, a1);
|
let c1 = NetworkConnection::dummy(1, a1);
|
||||||
|
let c1b = NetworkConnection::dummy(10, a1);
|
||||||
let c1h = c1.get_handle();
|
let c1h = c1.get_handle();
|
||||||
let c2 = NetworkConnection::dummy(2, a2);
|
let c2 = NetworkConnection::dummy(2, a2);
|
||||||
let c3 = NetworkConnection::dummy(3, a3);
|
let c3 = NetworkConnection::dummy(3, a3);
|
||||||
@ -65,6 +66,7 @@ pub async fn test_add_get_remove() {
|
|||||||
assert_eq!(table.connection_count(), 0);
|
assert_eq!(table.connection_count(), 0);
|
||||||
assert_eq!(table.get_connection_by_descriptor(a1), None);
|
assert_eq!(table.get_connection_by_descriptor(a1), None);
|
||||||
table.add_connection(c1).unwrap();
|
table.add_connection(c1).unwrap();
|
||||||
|
assert!(table.add_connection(c1b).is_err());
|
||||||
|
|
||||||
assert_eq!(table.connection_count(), 1);
|
assert_eq!(table.connection_count(), 1);
|
||||||
assert!(table.remove_connection_by_id(4).is_none());
|
assert!(table.remove_connection_by_id(4).is_none());
|
||||||
|
@ -10,25 +10,43 @@ use std::io;
|
|||||||
/////////////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
struct NetworkInner {
|
struct NetworkInner {
|
||||||
network_manager: NetworkManager,
|
|
||||||
network_started: bool,
|
network_started: bool,
|
||||||
network_needs_restart: bool,
|
network_needs_restart: bool,
|
||||||
protocol_config: Option<ProtocolConfig>,
|
protocol_config: ProtocolConfig,
|
||||||
|
}
|
||||||
|
|
||||||
|
struct NetworkUnlockedInner {
|
||||||
|
// Accessors
|
||||||
|
routing_table: RoutingTable,
|
||||||
|
network_manager: NetworkManager,
|
||||||
|
connection_manager: ConnectionManager,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct Network {
|
pub struct Network {
|
||||||
config: VeilidConfig,
|
config: VeilidConfig,
|
||||||
inner: Arc<Mutex<NetworkInner>>,
|
inner: Arc<Mutex<NetworkInner>>,
|
||||||
|
unlocked_inner: Arc<NetworkUnlockedInner>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Network {
|
impl Network {
|
||||||
fn new_inner(network_manager: NetworkManager) -> NetworkInner {
|
fn new_inner() -> NetworkInner {
|
||||||
NetworkInner {
|
NetworkInner {
|
||||||
network_manager,
|
|
||||||
network_started: false,
|
network_started: false,
|
||||||
network_needs_restart: false,
|
network_needs_restart: false,
|
||||||
protocol_config: None, //join_handle: None,
|
protocol_config: Default::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_unlocked_inner(
|
||||||
|
network_manager: NetworkManager,
|
||||||
|
routing_table: RoutingTable,
|
||||||
|
connection_manager: ConnectionManager,
|
||||||
|
) -> NetworkUnlockedInner {
|
||||||
|
NetworkUnlockedInner {
|
||||||
|
network_manager,
|
||||||
|
routing_table,
|
||||||
|
connection_manager,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -39,15 +57,23 @@ impl Network {
|
|||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
config: network_manager.config(),
|
config: network_manager.config(),
|
||||||
inner: Arc::new(Mutex::new(Self::new_inner(network_manager))),
|
inner: Arc::new(Mutex::new(Self::new_inner())),
|
||||||
|
unlocked_inner: Arc::new(Self::new_unlocked_inner(
|
||||||
|
network_manager,
|
||||||
|
routing_table,
|
||||||
|
connection_manager,
|
||||||
|
)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn network_manager(&self) -> NetworkManager {
|
fn network_manager(&self) -> NetworkManager {
|
||||||
self.inner.lock().network_manager.clone()
|
self.unlocked_inner.network_manager.clone()
|
||||||
|
}
|
||||||
|
fn routing_table(&self) -> RoutingTable {
|
||||||
|
self.unlocked_inner.routing_table.clone()
|
||||||
}
|
}
|
||||||
fn connection_manager(&self) -> ConnectionManager {
|
fn connection_manager(&self) -> ConnectionManager {
|
||||||
self.inner.lock().network_manager.connection_manager()
|
self.unlocked_inner.connection_manager.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
/////////////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////////////
|
||||||
@ -225,7 +251,7 @@ impl Network {
|
|||||||
|
|
||||||
pub async fn startup(&self) -> EyreResult<()> {
|
pub async fn startup(&self) -> EyreResult<()> {
|
||||||
// get protocol config
|
// get protocol config
|
||||||
self.inner.lock().protocol_config = Some({
|
self.inner.lock().protocol_config = {
|
||||||
let c = self.config.get();
|
let c = self.config.get();
|
||||||
let inbound = ProtocolTypeSet::new();
|
let inbound = ProtocolTypeSet::new();
|
||||||
let mut outbound = ProtocolTypeSet::new();
|
let mut outbound = ProtocolTypeSet::new();
|
||||||
@ -247,7 +273,7 @@ impl Network {
|
|||||||
family_global,
|
family_global,
|
||||||
family_local,
|
family_local,
|
||||||
}
|
}
|
||||||
});
|
};
|
||||||
|
|
||||||
self.inner.lock().network_started = true;
|
self.inner.lock().network_started = true;
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -269,20 +295,26 @@ impl Network {
|
|||||||
trace!("stopping network");
|
trace!("stopping network");
|
||||||
|
|
||||||
// Reset state
|
// Reset state
|
||||||
let network_manager = self.inner.lock().network_manager.clone();
|
let routing_table = self.routing_table();
|
||||||
let routing_table = network_manager.routing_table();
|
|
||||||
|
|
||||||
// Drop all dial info
|
// Drop all dial info
|
||||||
routing_table.clear_dial_info_details(RoutingDomain::PublicInternet);
|
let mut editor = routing_table.edit_routing_domain(RoutingDomain::PublicInternet);
|
||||||
routing_table.clear_dial_info_details(RoutingDomain::LocalNetwork);
|
editor.disable_node_info_updates();
|
||||||
|
editor.clear_dial_info_details();
|
||||||
|
editor.commit().await;
|
||||||
|
|
||||||
|
let mut editor = routing_table.edit_routing_domain(RoutingDomain::LocalNetwork);
|
||||||
|
editor.disable_node_info_updates();
|
||||||
|
editor.clear_dial_info_details();
|
||||||
|
editor.commit().await;
|
||||||
|
|
||||||
// Cancels all async background tasks by dropping join handles
|
// Cancels all async background tasks by dropping join handles
|
||||||
*self.inner.lock() = Self::new_inner(network_manager);
|
*self.inner.lock() = Self::new_inner();
|
||||||
|
|
||||||
trace!("network stopped");
|
trace!("network stopped");
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_usable_interface_address(&self, addr: IpAddr) -> bool {
|
pub fn is_usable_interface_address(&self, _addr: IpAddr) -> bool {
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -291,12 +323,15 @@ impl Network {
|
|||||||
}
|
}
|
||||||
|
|
||||||
//////////////////////////////////////////
|
//////////////////////////////////////////
|
||||||
|
|
||||||
pub fn set_needs_public_dial_info_check(&self, _punishment: Option<Box<dyn FnOnce() + Send + 'static>>) {
|
pub fn set_needs_public_dial_info_check(
|
||||||
|
&self,
|
||||||
|
_punishment: Option<Box<dyn FnOnce() + Send + 'static>>,
|
||||||
|
) {
|
||||||
//
|
//
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn doing_public_dial_info_check(&self) -> bool {
|
pub fn needs_public_dial_info_check(&self) -> bool {
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -309,7 +344,7 @@ impl Network {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_protocol_config(&self) -> Option<ProtocolConfig> {
|
pub fn get_protocol_config(&self) -> ProtocolConfig {
|
||||||
self.inner.lock().protocol_config.clone()
|
self.inner.lock().protocol_config.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -75,7 +75,7 @@ impl WebsocketNetworkConnection {
|
|||||||
Ok(out)
|
Ok(out)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "trace", err, skip(self), fields(network_result, ret.len))]
|
// #[instrument(level = "trace", err, skip(self), fields(network_result, ret.len))]
|
||||||
pub async fn recv(&self) -> io::Result<NetworkResult<Vec<u8>>> {
|
pub async fn recv(&self) -> io::Result<NetworkResult<Vec<u8>>> {
|
||||||
let out = match SendWrapper::new(self.inner.ws_stream.clone().next()).await {
|
let out = match SendWrapper::new(self.inner.ws_stream.clone().next()).await {
|
||||||
Some(WsMessage::Binary(v)) => {
|
Some(WsMessage::Binary(v)) => {
|
||||||
@ -95,7 +95,7 @@ impl WebsocketNetworkConnection {
|
|||||||
bail_io_error_other!("WS stream closed");
|
bail_io_error_other!("WS stream closed");
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
tracing::Span::current().record("network_result", &tracing::field::display(&out));
|
// tracing::Span::current().record("network_result", &tracing::field::display(&out));
|
||||||
Ok(out)
|
Ok(out)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -106,13 +106,13 @@ impl WebsocketNetworkConnection {
|
|||||||
pub struct WebsocketProtocolHandler {}
|
pub struct WebsocketProtocolHandler {}
|
||||||
|
|
||||||
impl WebsocketProtocolHandler {
|
impl WebsocketProtocolHandler {
|
||||||
#[instrument(level = "trace", err)]
|
#[instrument(level = "trace", ret, err)]
|
||||||
pub async fn connect(
|
pub async fn connect(
|
||||||
dial_info: &DialInfo,
|
dial_info: &DialInfo,
|
||||||
timeout_ms: u32,
|
timeout_ms: u32,
|
||||||
) -> io::Result<NetworkResult<ProtocolNetworkConnection>> {
|
) -> io::Result<NetworkResult<ProtocolNetworkConnection>> {
|
||||||
// Split dial info up
|
// Split dial info up
|
||||||
let (tls, scheme) = match dial_info {
|
let (_tls, scheme) = match dial_info {
|
||||||
DialInfo::WS(_) => (false, "ws"),
|
DialInfo::WS(_) => (false, "ws"),
|
||||||
DialInfo::WSS(_) => (true, "wss"),
|
DialInfo::WSS(_) => (true, "wss"),
|
||||||
_ => panic!("invalid dialinfo for WS/WSS protocol"),
|
_ => panic!("invalid dialinfo for WS/WSS protocol"),
|
||||||
|
@ -1,20 +1,30 @@
|
|||||||
use crate::*;
|
use crate::*;
|
||||||
use core::fmt;
|
use core::fmt;
|
||||||
use dht::*;
|
use crypto::*;
|
||||||
use futures_util::stream::{FuturesUnordered, StreamExt};
|
use futures_util::stream::{FuturesUnordered, StreamExt};
|
||||||
use network_manager::*;
|
use network_manager::*;
|
||||||
use routing_table::*;
|
use routing_table::*;
|
||||||
use stop_token::future::FutureExt;
|
use stop_token::future::FutureExt;
|
||||||
use xx::*;
|
use xx::*;
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
#[derive(Clone, Debug)]
|
||||||
pub enum ReceiptEvent {
|
pub enum ReceiptEvent {
|
||||||
ReturnedOutOfBand,
|
ReturnedOutOfBand,
|
||||||
ReturnedInBand { inbound_noderef: NodeRef },
|
ReturnedInBand { inbound_noderef: NodeRef },
|
||||||
|
ReturnedSafety,
|
||||||
|
ReturnedPrivate { private_route: DHTKey },
|
||||||
Expired,
|
Expired,
|
||||||
Cancelled,
|
Cancelled,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub enum ReceiptReturned {
|
||||||
|
OutOfBand,
|
||||||
|
InBand { inbound_noderef: NodeRef },
|
||||||
|
Safety,
|
||||||
|
Private { private_route: DHTKey },
|
||||||
|
}
|
||||||
|
|
||||||
pub trait ReceiptCallback: Send + 'static {
|
pub trait ReceiptCallback: Send + 'static {
|
||||||
fn call(
|
fn call(
|
||||||
&self,
|
&self,
|
||||||
@ -246,7 +256,7 @@ impl ReceiptManager {
|
|||||||
if let Some(callback) =
|
if let Some(callback) =
|
||||||
Self::perform_callback(ReceiptEvent::Expired, &mut expired_record_mut)
|
Self::perform_callback(ReceiptEvent::Expired, &mut expired_record_mut)
|
||||||
{
|
{
|
||||||
callbacks.push(callback)
|
callbacks.push(callback.instrument(Span::current()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -394,17 +404,18 @@ impl ReceiptManager {
|
|||||||
pub async fn handle_receipt(
|
pub async fn handle_receipt(
|
||||||
&self,
|
&self,
|
||||||
receipt: Receipt,
|
receipt: Receipt,
|
||||||
inbound_noderef: Option<NodeRef>,
|
receipt_returned: ReceiptReturned,
|
||||||
) -> NetworkResult<()> {
|
) -> NetworkResult<()> {
|
||||||
let receipt_nonce = receipt.get_nonce();
|
let receipt_nonce = receipt.get_nonce();
|
||||||
let extra_data = receipt.get_extra_data();
|
let extra_data = receipt.get_extra_data();
|
||||||
|
|
||||||
log_rpc!(debug "<<== RECEIPT {} <- {}{}",
|
log_rpc!(debug "<<== RECEIPT {} <- {}{}",
|
||||||
receipt_nonce.encode(),
|
receipt_nonce.encode(),
|
||||||
if let Some(nr) = &inbound_noderef {
|
match receipt_returned {
|
||||||
nr.to_string()
|
ReceiptReturned::OutOfBand => "OutOfBand".to_owned(),
|
||||||
} else {
|
ReceiptReturned::InBand { ref inbound_noderef } => format!("InBand({})", inbound_noderef),
|
||||||
"DIRECT".to_owned()
|
ReceiptReturned::Safety => "Safety".to_owned(),
|
||||||
|
ReceiptReturned::Private { ref private_route } => format!("Private({})", private_route),
|
||||||
},
|
},
|
||||||
if extra_data.is_empty() {
|
if extra_data.is_empty() {
|
||||||
"".to_owned()
|
"".to_owned()
|
||||||
@ -435,10 +446,17 @@ impl ReceiptManager {
|
|||||||
record_mut.returns_so_far += 1;
|
record_mut.returns_so_far += 1;
|
||||||
|
|
||||||
// Get the receipt event to return
|
// Get the receipt event to return
|
||||||
let receipt_event = if let Some(inbound_noderef) = inbound_noderef {
|
let receipt_event = match receipt_returned {
|
||||||
ReceiptEvent::ReturnedInBand { inbound_noderef }
|
ReceiptReturned::OutOfBand => ReceiptEvent::ReturnedOutOfBand,
|
||||||
} else {
|
ReceiptReturned::Safety => ReceiptEvent::ReturnedSafety,
|
||||||
ReceiptEvent::ReturnedOutOfBand
|
ReceiptReturned::InBand {
|
||||||
|
ref inbound_noderef,
|
||||||
|
} => ReceiptEvent::ReturnedInBand {
|
||||||
|
inbound_noderef: inbound_noderef.clone(),
|
||||||
|
},
|
||||||
|
ReceiptReturned::Private { ref private_route } => ReceiptEvent::ReturnedPrivate {
|
||||||
|
private_route: private_route.clone(),
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
let callback_future = Self::perform_callback(receipt_event, &mut record_mut);
|
let callback_future = Self::perform_callback(receipt_event, &mut record_mut);
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
use super::*;
|
use super::*;
|
||||||
use core::sync::atomic::Ordering;
|
use core::sync::atomic::Ordering;
|
||||||
|
use rkyv::{Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize};
|
||||||
|
|
||||||
pub struct Bucket {
|
pub struct Bucket {
|
||||||
routing_table: RoutingTable,
|
routing_table: RoutingTable,
|
||||||
@ -8,6 +9,20 @@ pub struct Bucket {
|
|||||||
}
|
}
|
||||||
pub(super) type EntriesIter<'a> = alloc::collections::btree_map::Iter<'a, DHTKey, Arc<BucketEntry>>;
|
pub(super) type EntriesIter<'a> = alloc::collections::btree_map::Iter<'a, DHTKey, Arc<BucketEntry>>;
|
||||||
|
|
||||||
|
#[derive(Debug, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
|
||||||
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
|
struct BucketEntryData {
|
||||||
|
key: DHTKey,
|
||||||
|
value: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
|
||||||
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
|
struct BucketData {
|
||||||
|
entries: Vec<BucketEntryData>,
|
||||||
|
newest_entry: Option<DHTKey>,
|
||||||
|
}
|
||||||
|
|
||||||
fn state_ordering(state: BucketEntryState) -> usize {
|
fn state_ordering(state: BucketEntryState) -> usize {
|
||||||
match state {
|
match state {
|
||||||
BucketEntryState::Dead => 0,
|
BucketEntryState::Dead => 0,
|
||||||
@ -25,6 +40,36 @@ impl Bucket {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(super) fn load_bucket(&mut self, data: Vec<u8>) -> EyreResult<()> {
|
||||||
|
let bucket_data: BucketData = from_rkyv(data)?;
|
||||||
|
|
||||||
|
for e in bucket_data.entries {
|
||||||
|
let entryinner = from_rkyv(e.value).wrap_err("failed to deserialize bucket entry")?;
|
||||||
|
self.entries
|
||||||
|
.insert(e.key, Arc::new(BucketEntry::new_with_inner(entryinner)));
|
||||||
|
}
|
||||||
|
|
||||||
|
self.newest_entry = bucket_data.newest_entry;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
pub(super) fn save_bucket(&self) -> EyreResult<Vec<u8>> {
|
||||||
|
let mut entries = Vec::new();
|
||||||
|
for (k, v) in &self.entries {
|
||||||
|
let entry_bytes = v.with_inner(|e| to_rkyv(e))?;
|
||||||
|
entries.push(BucketEntryData {
|
||||||
|
key: *k,
|
||||||
|
value: entry_bytes,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
let bucket_data = BucketData {
|
||||||
|
entries,
|
||||||
|
newest_entry: self.newest_entry.clone(),
|
||||||
|
};
|
||||||
|
let out = to_rkyv(&bucket_data)?;
|
||||||
|
Ok(out)
|
||||||
|
}
|
||||||
|
|
||||||
pub(super) fn add_entry(&mut self, node_id: DHTKey) -> NodeRef {
|
pub(super) fn add_entry(&mut self, node_id: DHTKey) -> NodeRef {
|
||||||
log_rtab!("Node added: {}", node_id.encode());
|
log_rtab!("Node added: {}", node_id.encode());
|
||||||
|
|
||||||
@ -48,13 +93,6 @@ impl Bucket {
|
|||||||
// newest_entry is updated by kick_bucket()
|
// newest_entry is updated by kick_bucket()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) fn roll_transfers(&self, last_ts: u64, cur_ts: u64) {
|
|
||||||
// Called every ROLLING_TRANSFERS_INTERVAL_SECS
|
|
||||||
for (_k, v) in &self.entries {
|
|
||||||
v.with_mut(|e| e.roll_transfers(last_ts, cur_ts));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn entry(&self, key: &DHTKey) -> Option<Arc<BucketEntry>> {
|
pub(super) fn entry(&self, key: &DHTKey) -> Option<Arc<BucketEntry>> {
|
||||||
self.entries.get(key).cloned()
|
self.entries.get(key).cloned()
|
||||||
}
|
}
|
||||||
@ -87,8 +125,8 @@ impl Bucket {
|
|||||||
if a.0 == b.0 {
|
if a.0 == b.0 {
|
||||||
return core::cmp::Ordering::Equal;
|
return core::cmp::Ordering::Equal;
|
||||||
}
|
}
|
||||||
a.1.with(|ea| {
|
a.1.with_inner(|ea| {
|
||||||
b.1.with(|eb| {
|
b.1.with_inner(|eb| {
|
||||||
let astate = state_ordering(ea.state(cur_ts));
|
let astate = state_ordering(ea.state(cur_ts));
|
||||||
let bstate = state_ordering(eb.state(cur_ts));
|
let bstate = state_ordering(eb.state(cur_ts));
|
||||||
// first kick dead nodes, then unreliable nodes
|
// first kick dead nodes, then unreliable nodes
|
||||||
|
@ -1,5 +1,8 @@
|
|||||||
use super::*;
|
use super::*;
|
||||||
use core::sync::atomic::{AtomicU32, Ordering};
|
use core::sync::atomic::{AtomicU32, Ordering};
|
||||||
|
use rkyv::{
|
||||||
|
with::Skip, Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize,
|
||||||
|
};
|
||||||
|
|
||||||
/// Reliable pings are done with increased spacing between pings
|
/// Reliable pings are done with increased spacing between pings
|
||||||
|
|
||||||
@ -39,10 +42,11 @@ pub enum BucketEntryState {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Ord, Hash)]
|
#[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Ord, Hash)]
|
||||||
struct LastConnectionKey(ProtocolType, AddressType);
|
pub struct LastConnectionKey(ProtocolType, AddressType);
|
||||||
|
|
||||||
/// Bucket entry information specific to the LocalNetwork RoutingDomain
|
/// Bucket entry information specific to the LocalNetwork RoutingDomain
|
||||||
#[derive(Debug)]
|
#[derive(Debug, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
|
||||||
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
pub struct BucketEntryPublicInternet {
|
pub struct BucketEntryPublicInternet {
|
||||||
/// The PublicInternet node info
|
/// The PublicInternet node info
|
||||||
signed_node_info: Option<Box<SignedNodeInfo>>,
|
signed_node_info: Option<Box<SignedNodeInfo>>,
|
||||||
@ -53,7 +57,8 @@ pub struct BucketEntryPublicInternet {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Bucket entry information specific to the LocalNetwork RoutingDomain
|
/// Bucket entry information specific to the LocalNetwork RoutingDomain
|
||||||
#[derive(Debug)]
|
#[derive(Debug, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
|
||||||
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
pub struct BucketEntryLocalNetwork {
|
pub struct BucketEntryLocalNetwork {
|
||||||
/// The LocalNetwork node info
|
/// The LocalNetwork node info
|
||||||
signed_node_info: Option<Box<SignedNodeInfo>>,
|
signed_node_info: Option<Box<SignedNodeInfo>>,
|
||||||
@ -63,19 +68,51 @@ pub struct BucketEntryLocalNetwork {
|
|||||||
node_status: Option<LocalNetworkNodeStatus>,
|
node_status: Option<LocalNetworkNodeStatus>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
/// A range of cryptography versions supported by this entry
|
||||||
|
#[derive(Copy, Clone, Debug, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
|
||||||
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
|
pub struct VersionRange {
|
||||||
|
/// The minimum cryptography version supported by this entry
|
||||||
|
pub min: u8,
|
||||||
|
/// The maximum cryptography version supported by this entry
|
||||||
|
pub max: u8,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The data associated with each bucket entry
|
||||||
|
#[derive(Debug, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
|
||||||
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
pub struct BucketEntryInner {
|
pub struct BucketEntryInner {
|
||||||
min_max_version: Option<(u8, u8)>,
|
/// The minimum and maximum range of cryptography versions supported by the node,
|
||||||
|
/// inclusive of the requirements of any relay the node may be using
|
||||||
|
min_max_version: Option<VersionRange>,
|
||||||
|
/// If this node has updated it's SignedNodeInfo since our network
|
||||||
|
/// and dial info has last changed, for example when our IP address changes
|
||||||
|
/// Used to determine if we should make this entry 'live' again when we receive a signednodeinfo update that
|
||||||
|
/// has the same timestamp, because if we change our own IP address or network class it may be possible for nodes that were
|
||||||
|
/// unreachable may now be reachable with the same SignedNodeInfo/DialInfo
|
||||||
updated_since_last_network_change: bool,
|
updated_since_last_network_change: bool,
|
||||||
|
/// The last connection descriptors used to contact this node, per protocol type
|
||||||
|
#[with(Skip)]
|
||||||
last_connections: BTreeMap<LastConnectionKey, (ConnectionDescriptor, u64)>,
|
last_connections: BTreeMap<LastConnectionKey, (ConnectionDescriptor, u64)>,
|
||||||
|
/// The node info for this entry on the publicinternet routing domain
|
||||||
public_internet: BucketEntryPublicInternet,
|
public_internet: BucketEntryPublicInternet,
|
||||||
|
/// The node info for this entry on the localnetwork routing domain
|
||||||
local_network: BucketEntryLocalNetwork,
|
local_network: BucketEntryLocalNetwork,
|
||||||
|
/// Statistics gathered for the peer
|
||||||
peer_stats: PeerStats,
|
peer_stats: PeerStats,
|
||||||
|
/// The accounting for the latency statistics
|
||||||
|
#[with(Skip)]
|
||||||
latency_stats_accounting: LatencyStatsAccounting,
|
latency_stats_accounting: LatencyStatsAccounting,
|
||||||
|
/// The accounting for the transfer statistics
|
||||||
|
#[with(Skip)]
|
||||||
transfer_stats_accounting: TransferStatsAccounting,
|
transfer_stats_accounting: TransferStatsAccounting,
|
||||||
|
/// Tracking identifier for NodeRef debugging
|
||||||
#[cfg(feature = "tracking")]
|
#[cfg(feature = "tracking")]
|
||||||
|
#[with(Skip)]
|
||||||
next_track_id: usize,
|
next_track_id: usize,
|
||||||
|
/// Backtraces for NodeRef debugging
|
||||||
#[cfg(feature = "tracking")]
|
#[cfg(feature = "tracking")]
|
||||||
|
#[with(Skip)]
|
||||||
node_ref_tracks: HashMap<usize, backtrace::Backtrace>,
|
node_ref_tracks: HashMap<usize, backtrace::Backtrace>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -132,6 +169,28 @@ impl BucketEntryInner {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Less is more reliable then older
|
||||||
|
pub fn cmp_oldest_reliable(cur_ts: u64, e1: &Self, e2: &Self) -> std::cmp::Ordering {
|
||||||
|
// Reverse compare so most reliable is at front
|
||||||
|
let ret = e2.state(cur_ts).cmp(&e1.state(cur_ts));
|
||||||
|
if ret != std::cmp::Ordering::Equal {
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lower timestamp to the front, recent or no timestamp is at the end
|
||||||
|
if let Some(e1_ts) = &e1.peer_stats.rpc_stats.first_consecutive_seen_ts {
|
||||||
|
if let Some(e2_ts) = &e2.peer_stats.rpc_stats.first_consecutive_seen_ts {
|
||||||
|
e1_ts.cmp(&e2_ts)
|
||||||
|
} else {
|
||||||
|
std::cmp::Ordering::Less
|
||||||
|
}
|
||||||
|
} else if e2.peer_stats.rpc_stats.first_consecutive_seen_ts.is_some() {
|
||||||
|
std::cmp::Ordering::Greater
|
||||||
|
} else {
|
||||||
|
std::cmp::Ordering::Equal
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub fn sort_fastest_reliable_fn(cur_ts: u64) -> impl FnMut(&Self, &Self) -> std::cmp::Ordering {
|
pub fn sort_fastest_reliable_fn(cur_ts: u64) -> impl FnMut(&Self, &Self) -> std::cmp::Ordering {
|
||||||
move |e1, e2| Self::cmp_fastest_reliable(cur_ts, e1, e2)
|
move |e1, e2| Self::cmp_fastest_reliable(cur_ts, e1, e2)
|
||||||
}
|
}
|
||||||
@ -159,28 +218,42 @@ impl BucketEntryInner {
|
|||||||
|
|
||||||
// See if we have an existing signed_node_info to update or not
|
// See if we have an existing signed_node_info to update or not
|
||||||
if let Some(current_sni) = opt_current_sni {
|
if let Some(current_sni) = opt_current_sni {
|
||||||
// If the timestamp hasn't changed or is less, ignore this update
|
// Always allow overwriting invalid/unsigned node
|
||||||
if signed_node_info.timestamp <= current_sni.timestamp {
|
if current_sni.has_valid_signature() {
|
||||||
// If we received a node update with the same timestamp
|
// If the timestamp hasn't changed or is less, ignore this update
|
||||||
// we can make this node live again, but only if our network has recently changed
|
if signed_node_info.timestamp() <= current_sni.timestamp() {
|
||||||
// which may make nodes that were unreachable now reachable with the same dialinfo
|
// If we received a node update with the same timestamp
|
||||||
if !self.updated_since_last_network_change
|
// we can make this node live again, but only if our network has recently changed
|
||||||
&& signed_node_info.timestamp == current_sni.timestamp
|
// which may make nodes that were unreachable now reachable with the same dialinfo
|
||||||
{
|
if !self.updated_since_last_network_change
|
||||||
// No need to update the signednodeinfo though since the timestamp is the same
|
&& signed_node_info.timestamp() == current_sni.timestamp()
|
||||||
// Touch the node and let it try to live again
|
{
|
||||||
self.updated_since_last_network_change = true;
|
// No need to update the signednodeinfo though since the timestamp is the same
|
||||||
self.touch_last_seen(intf::get_timestamp());
|
// Touch the node and let it try to live again
|
||||||
|
self.updated_since_last_network_change = true;
|
||||||
|
self.touch_last_seen(intf::get_timestamp());
|
||||||
|
}
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the protocol min/max version we have
|
// Update the protocol min/max version we have to use, to include relay requirements if needed
|
||||||
self.min_max_version = Some((
|
let mut version_range = VersionRange {
|
||||||
signed_node_info.node_info.min_version,
|
min: signed_node_info.node_info().min_version,
|
||||||
signed_node_info.node_info.max_version,
|
max: signed_node_info.node_info().max_version,
|
||||||
));
|
};
|
||||||
|
if let Some(relay_info) = signed_node_info.relay_info() {
|
||||||
|
version_range.min.max_assign(relay_info.min_version);
|
||||||
|
version_range.max.min_assign(relay_info.max_version);
|
||||||
|
}
|
||||||
|
if version_range.min <= version_range.max {
|
||||||
|
// Can be reached with at least one crypto version
|
||||||
|
self.min_max_version = Some(version_range);
|
||||||
|
} else {
|
||||||
|
// No valid crypto version in range
|
||||||
|
self.min_max_version = None;
|
||||||
|
}
|
||||||
|
|
||||||
// Update the signed node info
|
// Update the signed node info
|
||||||
*opt_current_sni = Some(Box::new(signed_node_info));
|
*opt_current_sni = Some(Box::new(signed_node_info));
|
||||||
@ -207,7 +280,7 @@ impl BucketEntryInner {
|
|||||||
RoutingDomain::LocalNetwork => &self.local_network.signed_node_info,
|
RoutingDomain::LocalNetwork => &self.local_network.signed_node_info,
|
||||||
RoutingDomain::PublicInternet => &self.public_internet.signed_node_info,
|
RoutingDomain::PublicInternet => &self.public_internet.signed_node_info,
|
||||||
};
|
};
|
||||||
opt_current_sni.as_ref().map(|s| &s.node_info)
|
opt_current_sni.as_ref().map(|s| s.node_info())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn signed_node_info(&self, routing_domain: RoutingDomain) -> Option<&SignedNodeInfo> {
|
pub fn signed_node_info(&self, routing_domain: RoutingDomain) -> Option<&SignedNodeInfo> {
|
||||||
@ -264,37 +337,54 @@ impl BucketEntryInner {
|
|||||||
self.last_connections.clear();
|
self.last_connections.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Gets the best 'last connection' that matches a set of routing domain, protocol types and address types
|
// Gets all the 'last connections' that match a particular filter
|
||||||
pub(super) fn last_connection(
|
pub(super) fn last_connections(
|
||||||
&self,
|
&self,
|
||||||
routing_table_inner: &RoutingTableInner,
|
rti: &RoutingTableInner,
|
||||||
node_ref_filter: Option<NodeRefFilter>,
|
filter: Option<NodeRefFilter>,
|
||||||
) -> Option<(ConnectionDescriptor, u64)> {
|
) -> Vec<(ConnectionDescriptor, u64)> {
|
||||||
// Iterate peer scopes and protocol types and address type in order to ensure we pick the preferred protocols if all else is the same
|
let mut out: Vec<(ConnectionDescriptor, u64)> = self
|
||||||
let nrf = node_ref_filter.unwrap_or_default();
|
.last_connections
|
||||||
for pt in nrf.dial_info_filter.protocol_type_set {
|
.iter()
|
||||||
for at in nrf.dial_info_filter.address_type_set {
|
.filter_map(|(k, v)| {
|
||||||
let key = LastConnectionKey(pt, at);
|
let include = if let Some(filter) = &filter {
|
||||||
if let Some(v) = self.last_connections.get(&key) {
|
let remote_address = v.0.remote_address().address();
|
||||||
// Verify this connection could be in the filtered routing domain
|
if let Some(routing_domain) = rti.routing_domain_for_address(remote_address) {
|
||||||
let address = v.0.remote_address().address();
|
if filter.routing_domain_set.contains(routing_domain)
|
||||||
if let Some(rd) =
|
&& filter.dial_info_filter.protocol_type_set.contains(k.0)
|
||||||
RoutingTable::routing_domain_for_address_inner(routing_table_inner, address)
|
&& filter.dial_info_filter.address_type_set.contains(k.1)
|
||||||
{
|
{
|
||||||
if nrf.routing_domain_set.contains(rd) {
|
// matches filter
|
||||||
return Some(*v);
|
true
|
||||||
|
} else {
|
||||||
|
// does not match filter
|
||||||
|
false
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
// no valid routing domain
|
||||||
|
false
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
// no filter
|
||||||
|
true
|
||||||
|
};
|
||||||
|
if include {
|
||||||
|
Some(v.clone())
|
||||||
|
} else {
|
||||||
|
None
|
||||||
}
|
}
|
||||||
}
|
})
|
||||||
}
|
.collect();
|
||||||
None
|
// Sort with newest timestamps first
|
||||||
|
out.sort_by(|a, b| b.1.cmp(&a.1));
|
||||||
|
out
|
||||||
}
|
}
|
||||||
pub fn set_min_max_version(&mut self, min_max_version: (u8, u8)) {
|
|
||||||
|
pub fn set_min_max_version(&mut self, min_max_version: VersionRange) {
|
||||||
self.min_max_version = Some(min_max_version);
|
self.min_max_version = Some(min_max_version);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn min_max_version(&self) -> Option<(u8, u8)> {
|
pub fn min_max_version(&self) -> Option<VersionRange> {
|
||||||
self.min_max_version
|
self.min_max_version
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -409,14 +499,17 @@ impl BucketEntryInner {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn needs_constant_ping(&self, cur_ts: u64, interval: u64) -> bool {
|
/// Return the last time we either saw a node, or asked it a question
|
||||||
// If we have not either seen the node, nor asked it a question in the last 'interval'
|
fn latest_contact_time(&self) -> Option<u64> {
|
||||||
// then we should ping it
|
self.peer_stats
|
||||||
let latest_contact_time = self
|
|
||||||
.peer_stats
|
|
||||||
.rpc_stats
|
.rpc_stats
|
||||||
.last_seen_ts
|
.last_seen_ts
|
||||||
.max(self.peer_stats.rpc_stats.last_question);
|
.max(self.peer_stats.rpc_stats.last_question)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn needs_constant_ping(&self, cur_ts: u64, interval: u64) -> bool {
|
||||||
|
// If we have not either seen the node in the last 'interval' then we should ping it
|
||||||
|
let latest_contact_time = self.latest_contact_time();
|
||||||
|
|
||||||
match latest_contact_time {
|
match latest_contact_time {
|
||||||
None => true,
|
None => true,
|
||||||
@ -438,14 +531,19 @@ impl BucketEntryInner {
|
|||||||
return self.needs_constant_ping(cur_ts, KEEPALIVE_PING_INTERVAL_SECS as u64);
|
return self.needs_constant_ping(cur_ts, KEEPALIVE_PING_INTERVAL_SECS as u64);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If we don't have node status for this node, then we should ping it to get some node status
|
||||||
|
for routing_domain in RoutingDomainSet::all() {
|
||||||
|
if self.has_node_info(routing_domain.into()) {
|
||||||
|
if self.node_status(routing_domain).is_none() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
match state {
|
match state {
|
||||||
BucketEntryState::Reliable => {
|
BucketEntryState::Reliable => {
|
||||||
// If we are in a reliable state, we need a ping on an exponential scale
|
// If we are in a reliable state, we need a ping on an exponential scale
|
||||||
let latest_contact_time = self
|
let latest_contact_time = self.latest_contact_time();
|
||||||
.peer_stats
|
|
||||||
.rpc_stats
|
|
||||||
.last_seen_ts
|
|
||||||
.max(self.peer_stats.rpc_stats.last_question);
|
|
||||||
|
|
||||||
match latest_contact_time {
|
match latest_contact_time {
|
||||||
None => {
|
None => {
|
||||||
@ -607,7 +705,37 @@ impl BucketEntry {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) fn with<F, R>(&self, f: F) -> R
|
pub(super) fn new_with_inner(inner: BucketEntryInner) -> Self {
|
||||||
|
Self {
|
||||||
|
ref_count: AtomicU32::new(0),
|
||||||
|
inner: RwLock::new(inner),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note, that this requires -also- holding the RoutingTable read lock, as an
|
||||||
|
// immutable reference to RoutingTableInner must be passed in to get this
|
||||||
|
// This ensures that an operation on the routing table can not change entries
|
||||||
|
// while it is being read from
|
||||||
|
pub fn with<F, R>(&self, rti: &RoutingTableInner, f: F) -> R
|
||||||
|
where
|
||||||
|
F: FnOnce(&RoutingTableInner, &BucketEntryInner) -> R,
|
||||||
|
{
|
||||||
|
let inner = self.inner.read();
|
||||||
|
f(rti, &*inner)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note, that this requires -also- holding the RoutingTable write lock, as a
|
||||||
|
// mutable reference to RoutingTableInner must be passed in to get this
|
||||||
|
pub fn with_mut<F, R>(&self, rti: &mut RoutingTableInner, f: F) -> R
|
||||||
|
where
|
||||||
|
F: FnOnce(&mut RoutingTableInner, &mut BucketEntryInner) -> R,
|
||||||
|
{
|
||||||
|
let mut inner = self.inner.write();
|
||||||
|
f(rti, &mut *inner)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Internal inner access for RoutingTableInner only
|
||||||
|
pub(super) fn with_inner<F, R>(&self, f: F) -> R
|
||||||
where
|
where
|
||||||
F: FnOnce(&BucketEntryInner) -> R,
|
F: FnOnce(&BucketEntryInner) -> R,
|
||||||
{
|
{
|
||||||
@ -615,7 +743,8 @@ impl BucketEntry {
|
|||||||
f(&*inner)
|
f(&*inner)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) fn with_mut<F, R>(&self, f: F) -> R
|
// Internal inner access for RoutingTableInner only
|
||||||
|
pub(super) fn with_mut_inner<F, R>(&self, f: F) -> R
|
||||||
where
|
where
|
||||||
F: FnOnce(&mut BucketEntryInner) -> R,
|
F: FnOnce(&mut BucketEntryInner) -> R,
|
||||||
{
|
{
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
impl RoutingTable {
|
impl RoutingTable {
|
||||||
pub fn debug_info_nodeinfo(&self) -> String {
|
pub(crate) fn debug_info_nodeinfo(&self) -> String {
|
||||||
let mut out = String::new();
|
let mut out = String::new();
|
||||||
let inner = self.inner.read();
|
let inner = self.inner.read();
|
||||||
out += "Routing Table Info:\n";
|
out += "Routing Table Info:\n";
|
||||||
@ -23,7 +23,7 @@ impl RoutingTable {
|
|||||||
out
|
out
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn debug_info_txtrecord(&self) -> String {
|
pub(crate) async fn debug_info_txtrecord(&self) -> String {
|
||||||
let mut out = String::new();
|
let mut out = String::new();
|
||||||
|
|
||||||
let gdis = self.dial_info_details(RoutingDomain::PublicInternet);
|
let gdis = self.dial_info_details(RoutingDomain::PublicInternet);
|
||||||
@ -58,8 +58,8 @@ impl RoutingTable {
|
|||||||
out += &format!(
|
out += &format!(
|
||||||
"{},{},{},{},{}",
|
"{},{},{},{},{}",
|
||||||
BOOTSTRAP_TXT_VERSION,
|
BOOTSTRAP_TXT_VERSION,
|
||||||
MIN_VERSION,
|
MIN_CRYPTO_VERSION,
|
||||||
MAX_VERSION,
|
MAX_CRYPTO_VERSION,
|
||||||
self.node_id().encode(),
|
self.node_id().encode(),
|
||||||
some_hostname.unwrap()
|
some_hostname.unwrap()
|
||||||
);
|
);
|
||||||
@ -71,7 +71,7 @@ impl RoutingTable {
|
|||||||
out
|
out
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn debug_info_dialinfo(&self) -> String {
|
pub(crate) fn debug_info_dialinfo(&self) -> String {
|
||||||
let ldis = self.dial_info_details(RoutingDomain::LocalNetwork);
|
let ldis = self.dial_info_details(RoutingDomain::LocalNetwork);
|
||||||
let gdis = self.dial_info_details(RoutingDomain::PublicInternet);
|
let gdis = self.dial_info_details(RoutingDomain::PublicInternet);
|
||||||
let mut out = String::new();
|
let mut out = String::new();
|
||||||
@ -100,8 +100,9 @@ impl RoutingTable {
|
|||||||
out
|
out
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn debug_info_entries(&self, limit: usize, min_state: BucketEntryState) -> String {
|
pub(crate) fn debug_info_entries(&self, limit: usize, min_state: BucketEntryState) -> String {
|
||||||
let inner = self.inner.read();
|
let inner = self.inner.read();
|
||||||
|
let inner = &*inner;
|
||||||
let cur_ts = intf::get_timestamp();
|
let cur_ts = intf::get_timestamp();
|
||||||
|
|
||||||
let mut out = String::new();
|
let mut out = String::new();
|
||||||
@ -114,14 +115,14 @@ impl RoutingTable {
|
|||||||
let filtered_entries: Vec<(&DHTKey, &Arc<BucketEntry>)> = inner.buckets[b]
|
let filtered_entries: Vec<(&DHTKey, &Arc<BucketEntry>)> = inner.buckets[b]
|
||||||
.entries()
|
.entries()
|
||||||
.filter(|e| {
|
.filter(|e| {
|
||||||
let state = e.1.with(|e| e.state(cur_ts));
|
let state = e.1.with(inner, |_rti, e| e.state(cur_ts));
|
||||||
state >= min_state
|
state >= min_state
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
if !filtered_entries.is_empty() {
|
if !filtered_entries.is_empty() {
|
||||||
out += &format!(" Bucket #{}:\n", b);
|
out += &format!(" Bucket #{}:\n", b);
|
||||||
for e in filtered_entries {
|
for e in filtered_entries {
|
||||||
let state = e.1.with(|e| e.state(cur_ts));
|
let state = e.1.with(inner, |_rti, e| e.state(cur_ts));
|
||||||
out += &format!(
|
out += &format!(
|
||||||
" {} [{}]\n",
|
" {} [{}]\n",
|
||||||
e.0.encode(),
|
e.0.encode(),
|
||||||
@ -147,7 +148,7 @@ impl RoutingTable {
|
|||||||
out
|
out
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn debug_info_entry(&self, node_id: DHTKey) -> String {
|
pub(crate) fn debug_info_entry(&self, node_id: DHTKey) -> String {
|
||||||
let mut out = String::new();
|
let mut out = String::new();
|
||||||
out += &format!("Entry {:?}:\n", node_id);
|
out += &format!("Entry {:?}:\n", node_id);
|
||||||
if let Some(nr) = self.lookup_node_ref(node_id) {
|
if let Some(nr) = self.lookup_node_ref(node_id) {
|
||||||
@ -159,8 +160,9 @@ impl RoutingTable {
|
|||||||
out
|
out
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn debug_info_buckets(&self, min_state: BucketEntryState) -> String {
|
pub(crate) fn debug_info_buckets(&self, min_state: BucketEntryState) -> String {
|
||||||
let inner = self.inner.read();
|
let inner = self.inner.read();
|
||||||
|
let inner = &*inner;
|
||||||
let cur_ts = intf::get_timestamp();
|
let cur_ts = intf::get_timestamp();
|
||||||
|
|
||||||
let mut out = String::new();
|
let mut out = String::new();
|
||||||
@ -175,7 +177,7 @@ impl RoutingTable {
|
|||||||
while c < COLS {
|
while c < COLS {
|
||||||
let mut cnt = 0;
|
let mut cnt = 0;
|
||||||
for e in inner.buckets[b].entries() {
|
for e in inner.buckets[b].entries() {
|
||||||
if e.1.with(|e| e.state(cur_ts) >= min_state) {
|
if e.1.with(inner, |_rti, e| e.state(cur_ts) >= min_state) {
|
||||||
cnt += 1;
|
cnt += 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,615 +0,0 @@
|
|||||||
use super::*;
|
|
||||||
|
|
||||||
use crate::dht::*;
|
|
||||||
use crate::xx::*;
|
|
||||||
use crate::*;
|
|
||||||
|
|
||||||
pub type LowLevelProtocolPorts = BTreeSet<(LowLevelProtocolType, AddressType, u16)>;
|
|
||||||
pub type ProtocolToPortMapping = BTreeMap<(ProtocolType, AddressType), (LowLevelProtocolType, u16)>;
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub struct MappedPortInfo {
|
|
||||||
pub low_level_protocol_ports: LowLevelProtocolPorts,
|
|
||||||
pub protocol_to_port: ProtocolToPortMapping,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl RoutingTable {
|
|
||||||
// Makes a filter that finds nodes with a matching inbound dialinfo
|
|
||||||
pub fn make_inbound_dial_info_entry_filter(
|
|
||||||
routing_domain: RoutingDomain,
|
|
||||||
dial_info_filter: DialInfoFilter,
|
|
||||||
) -> impl FnMut(&BucketEntryInner) -> bool {
|
|
||||||
// does it have matching public dial info?
|
|
||||||
move |e| {
|
|
||||||
if let Some(ni) = e.node_info(routing_domain) {
|
|
||||||
if ni
|
|
||||||
.first_filtered_dial_info_detail(|did| did.matches_filter(&dial_info_filter))
|
|
||||||
.is_some()
|
|
||||||
{
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Makes a filter that finds nodes capable of dialing a particular outbound dialinfo
|
|
||||||
pub fn make_outbound_dial_info_entry_filter(
|
|
||||||
routing_domain: RoutingDomain,
|
|
||||||
dial_info: DialInfo,
|
|
||||||
) -> impl FnMut(&BucketEntryInner) -> bool {
|
|
||||||
// does the node's outbound capabilities match the dialinfo?
|
|
||||||
move |e| {
|
|
||||||
if let Some(ni) = e.node_info(routing_domain) {
|
|
||||||
let dif = DialInfoFilter::all()
|
|
||||||
.with_protocol_type_set(ni.outbound_protocols)
|
|
||||||
.with_address_type_set(ni.address_types);
|
|
||||||
if dial_info.matches_filter(&dif) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make a filter that wraps another filter
|
|
||||||
pub fn combine_entry_filters<F, G>(
|
|
||||||
mut f1: F,
|
|
||||||
mut f2: G,
|
|
||||||
) -> impl FnMut(&BucketEntryInner) -> bool
|
|
||||||
where
|
|
||||||
F: FnMut(&BucketEntryInner) -> bool,
|
|
||||||
G: FnMut(&BucketEntryInner) -> bool,
|
|
||||||
{
|
|
||||||
move |e| {
|
|
||||||
if !f1(e) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if !f2(e) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Retrieve the fastest nodes in the routing table matching an entry filter
|
|
||||||
pub fn find_fast_public_nodes_filtered<F>(
|
|
||||||
&self,
|
|
||||||
node_count: usize,
|
|
||||||
mut entry_filter: F,
|
|
||||||
) -> Vec<NodeRef>
|
|
||||||
where
|
|
||||||
F: FnMut(&BucketEntryInner) -> bool,
|
|
||||||
{
|
|
||||||
self.find_fastest_nodes(
|
|
||||||
// count
|
|
||||||
node_count,
|
|
||||||
// filter
|
|
||||||
|_k: DHTKey, v: Option<Arc<BucketEntry>>| {
|
|
||||||
let entry = v.unwrap();
|
|
||||||
entry.with(|e| {
|
|
||||||
// skip nodes on local network
|
|
||||||
if e.node_info(RoutingDomain::LocalNetwork).is_some() {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
// skip nodes not on public internet
|
|
||||||
if e.node_info(RoutingDomain::PublicInternet).is_none() {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
// skip nodes that dont match entry filter
|
|
||||||
entry_filter(e)
|
|
||||||
})
|
|
||||||
},
|
|
||||||
// transform
|
|
||||||
|k: DHTKey, v: Option<Arc<BucketEntry>>| {
|
|
||||||
NodeRef::new(self.clone(), k, v.unwrap().clone(), None)
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Retrieve up to N of each type of protocol capable nodes
|
|
||||||
pub fn find_bootstrap_nodes_filtered(&self, max_per_type: usize) -> Vec<NodeRef> {
|
|
||||||
let protocol_types = vec![
|
|
||||||
ProtocolType::UDP,
|
|
||||||
ProtocolType::TCP,
|
|
||||||
ProtocolType::WS,
|
|
||||||
ProtocolType::WSS,
|
|
||||||
];
|
|
||||||
let mut nodes_proto_v4 = vec![0usize, 0usize, 0usize, 0usize];
|
|
||||||
let mut nodes_proto_v6 = vec![0usize, 0usize, 0usize, 0usize];
|
|
||||||
|
|
||||||
self.find_fastest_nodes(
|
|
||||||
// count
|
|
||||||
protocol_types.len() * 2 * max_per_type,
|
|
||||||
// filter
|
|
||||||
move |_k: DHTKey, v: Option<Arc<BucketEntry>>| {
|
|
||||||
let entry = v.unwrap();
|
|
||||||
entry.with(|e| {
|
|
||||||
// skip nodes on our local network here
|
|
||||||
if e.has_node_info(RoutingDomain::LocalNetwork.into()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// does it have some dial info we need?
|
|
||||||
let filter = |n: &NodeInfo| {
|
|
||||||
let mut keep = false;
|
|
||||||
for did in &n.dial_info_detail_list {
|
|
||||||
if matches!(did.dial_info.address_type(), AddressType::IPV4) {
|
|
||||||
for (n, protocol_type) in protocol_types.iter().enumerate() {
|
|
||||||
if nodes_proto_v4[n] < max_per_type
|
|
||||||
&& did.dial_info.protocol_type() == *protocol_type
|
|
||||||
{
|
|
||||||
nodes_proto_v4[n] += 1;
|
|
||||||
keep = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if matches!(did.dial_info.address_type(), AddressType::IPV6) {
|
|
||||||
for (n, protocol_type) in protocol_types.iter().enumerate() {
|
|
||||||
if nodes_proto_v6[n] < max_per_type
|
|
||||||
&& did.dial_info.protocol_type() == *protocol_type
|
|
||||||
{
|
|
||||||
nodes_proto_v6[n] += 1;
|
|
||||||
keep = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
keep
|
|
||||||
};
|
|
||||||
|
|
||||||
e.node_info(RoutingDomain::PublicInternet)
|
|
||||||
.map(filter)
|
|
||||||
.unwrap_or(false)
|
|
||||||
})
|
|
||||||
},
|
|
||||||
// transform
|
|
||||||
|k: DHTKey, v: Option<Arc<BucketEntry>>| {
|
|
||||||
NodeRef::new(self.clone(), k, v.unwrap().clone(), None)
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn filter_has_valid_signed_node_info(
|
|
||||||
&self,
|
|
||||||
routing_domain: RoutingDomain,
|
|
||||||
v: Option<Arc<BucketEntry>>,
|
|
||||||
) -> bool {
|
|
||||||
match v {
|
|
||||||
None => self.has_valid_own_node_info(routing_domain),
|
|
||||||
Some(entry) => entry.with(|e| {
|
|
||||||
e.signed_node_info(routing_domain.into())
|
|
||||||
.map(|sni| sni.has_valid_signature())
|
|
||||||
.unwrap_or(false)
|
|
||||||
}),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn transform_to_peer_info(
|
|
||||||
&self,
|
|
||||||
routing_domain: RoutingDomain,
|
|
||||||
k: DHTKey,
|
|
||||||
v: Option<Arc<BucketEntry>>,
|
|
||||||
) -> PeerInfo {
|
|
||||||
match v {
|
|
||||||
None => self.get_own_peer_info(routing_domain),
|
|
||||||
Some(entry) => entry.with(|e| e.make_peer_info(k, routing_domain).unwrap()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn find_peers_with_sort_and_filter<F, C, T, O>(
|
|
||||||
&self,
|
|
||||||
node_count: usize,
|
|
||||||
cur_ts: u64,
|
|
||||||
mut filter: F,
|
|
||||||
compare: C,
|
|
||||||
mut transform: T,
|
|
||||||
) -> Vec<O>
|
|
||||||
where
|
|
||||||
F: FnMut(DHTKey, Option<Arc<BucketEntry>>) -> bool,
|
|
||||||
C: FnMut(
|
|
||||||
&(DHTKey, Option<Arc<BucketEntry>>),
|
|
||||||
&(DHTKey, Option<Arc<BucketEntry>>),
|
|
||||||
) -> core::cmp::Ordering,
|
|
||||||
T: FnMut(DHTKey, Option<Arc<BucketEntry>>) -> O,
|
|
||||||
{
|
|
||||||
let inner = self.inner.read();
|
|
||||||
let self_node_id = self.unlocked_inner.node_id;
|
|
||||||
|
|
||||||
// collect all the nodes for sorting
|
|
||||||
let mut nodes =
|
|
||||||
Vec::<(DHTKey, Option<Arc<BucketEntry>>)>::with_capacity(inner.bucket_entry_count + 1);
|
|
||||||
|
|
||||||
// add our own node (only one of there with the None entry)
|
|
||||||
if filter(self_node_id, None) {
|
|
||||||
nodes.push((self_node_id, None));
|
|
||||||
}
|
|
||||||
|
|
||||||
// add all nodes from buckets
|
|
||||||
Self::with_entries(&*inner, cur_ts, BucketEntryState::Unreliable, |k, v| {
|
|
||||||
// Apply filter
|
|
||||||
if filter(k, Some(v.clone())) {
|
|
||||||
nodes.push((k, Some(v.clone())));
|
|
||||||
}
|
|
||||||
Option::<()>::None
|
|
||||||
});
|
|
||||||
|
|
||||||
// sort by preference for returning nodes
|
|
||||||
nodes.sort_by(compare);
|
|
||||||
|
|
||||||
// return transformed vector for filtered+sorted nodes
|
|
||||||
let cnt = usize::min(node_count, nodes.len());
|
|
||||||
let mut out = Vec::<O>::with_capacity(cnt);
|
|
||||||
for node in nodes {
|
|
||||||
let val = transform(node.0, node.1);
|
|
||||||
out.push(val);
|
|
||||||
}
|
|
||||||
|
|
||||||
out
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn find_fastest_nodes<T, F, O>(
|
|
||||||
&self,
|
|
||||||
node_count: usize,
|
|
||||||
mut filter: F,
|
|
||||||
transform: T,
|
|
||||||
) -> Vec<O>
|
|
||||||
where
|
|
||||||
F: FnMut(DHTKey, Option<Arc<BucketEntry>>) -> bool,
|
|
||||||
T: FnMut(DHTKey, Option<Arc<BucketEntry>>) -> O,
|
|
||||||
{
|
|
||||||
let cur_ts = intf::get_timestamp();
|
|
||||||
let out = self.find_peers_with_sort_and_filter(
|
|
||||||
node_count,
|
|
||||||
cur_ts,
|
|
||||||
// filter
|
|
||||||
|k, v| {
|
|
||||||
if let Some(entry) = &v {
|
|
||||||
// always filter out dead nodes
|
|
||||||
if entry.with(|e| e.state(cur_ts) == BucketEntryState::Dead) {
|
|
||||||
false
|
|
||||||
} else {
|
|
||||||
filter(k, v)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// always filter out self peer, as it is irrelevant to the 'fastest nodes' search
|
|
||||||
false
|
|
||||||
}
|
|
||||||
},
|
|
||||||
// sort
|
|
||||||
|(a_key, a_entry), (b_key, b_entry)| {
|
|
||||||
// same nodes are always the same
|
|
||||||
if a_key == b_key {
|
|
||||||
return core::cmp::Ordering::Equal;
|
|
||||||
}
|
|
||||||
// our own node always comes last (should not happen, here for completeness)
|
|
||||||
if a_entry.is_none() {
|
|
||||||
return core::cmp::Ordering::Greater;
|
|
||||||
}
|
|
||||||
if b_entry.is_none() {
|
|
||||||
return core::cmp::Ordering::Less;
|
|
||||||
}
|
|
||||||
// reliable nodes come first
|
|
||||||
let ae = a_entry.as_ref().unwrap();
|
|
||||||
let be = b_entry.as_ref().unwrap();
|
|
||||||
ae.with(|ae| {
|
|
||||||
be.with(|be| {
|
|
||||||
let ra = ae.check_reliable(cur_ts);
|
|
||||||
let rb = be.check_reliable(cur_ts);
|
|
||||||
if ra != rb {
|
|
||||||
if ra {
|
|
||||||
return core::cmp::Ordering::Less;
|
|
||||||
} else {
|
|
||||||
return core::cmp::Ordering::Greater;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// latency is the next metric, closer nodes first
|
|
||||||
let a_latency = match ae.peer_stats().latency.as_ref() {
|
|
||||||
None => {
|
|
||||||
// treat unknown latency as slow
|
|
||||||
return core::cmp::Ordering::Greater;
|
|
||||||
}
|
|
||||||
Some(l) => l,
|
|
||||||
};
|
|
||||||
let b_latency = match be.peer_stats().latency.as_ref() {
|
|
||||||
None => {
|
|
||||||
// treat unknown latency as slow
|
|
||||||
return core::cmp::Ordering::Less;
|
|
||||||
}
|
|
||||||
Some(l) => l,
|
|
||||||
};
|
|
||||||
// Sort by average latency
|
|
||||||
a_latency.average.cmp(&b_latency.average)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
},
|
|
||||||
// transform,
|
|
||||||
transform,
|
|
||||||
);
|
|
||||||
out
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn find_closest_nodes<F, T, O>(
|
|
||||||
&self,
|
|
||||||
node_id: DHTKey,
|
|
||||||
filter: F,
|
|
||||||
mut transform: T,
|
|
||||||
) -> Vec<O>
|
|
||||||
where
|
|
||||||
F: FnMut(DHTKey, Option<Arc<BucketEntry>>) -> bool,
|
|
||||||
T: FnMut(DHTKey, Option<Arc<BucketEntry>>) -> O,
|
|
||||||
{
|
|
||||||
let cur_ts = intf::get_timestamp();
|
|
||||||
let node_count = {
|
|
||||||
let c = self.unlocked_inner.config.get();
|
|
||||||
c.network.dht.max_find_node_count as usize
|
|
||||||
};
|
|
||||||
let out = self.find_peers_with_sort_and_filter(
|
|
||||||
node_count,
|
|
||||||
cur_ts,
|
|
||||||
// filter
|
|
||||||
filter,
|
|
||||||
// sort
|
|
||||||
|(a_key, a_entry), (b_key, b_entry)| {
|
|
||||||
// same nodes are always the same
|
|
||||||
if a_key == b_key {
|
|
||||||
return core::cmp::Ordering::Equal;
|
|
||||||
}
|
|
||||||
|
|
||||||
// reliable nodes come first, pessimistically treating our own node as unreliable
|
|
||||||
let ra = a_entry
|
|
||||||
.as_ref()
|
|
||||||
.map_or(false, |x| x.with(|x| x.check_reliable(cur_ts)));
|
|
||||||
let rb = b_entry
|
|
||||||
.as_ref()
|
|
||||||
.map_or(false, |x| x.with(|x| x.check_reliable(cur_ts)));
|
|
||||||
if ra != rb {
|
|
||||||
if ra {
|
|
||||||
return core::cmp::Ordering::Less;
|
|
||||||
} else {
|
|
||||||
return core::cmp::Ordering::Greater;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// distance is the next metric, closer nodes first
|
|
||||||
let da = distance(a_key, &node_id);
|
|
||||||
let db = distance(b_key, &node_id);
|
|
||||||
da.cmp(&db)
|
|
||||||
},
|
|
||||||
// transform,
|
|
||||||
&mut transform,
|
|
||||||
);
|
|
||||||
log_rtab!(">> find_closest_nodes: node count = {}", out.len());
|
|
||||||
out
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build a map of protocols to low level ports
|
|
||||||
// This way we can get the set of protocols required to keep our NAT mapping alive for keepalive pings
|
|
||||||
// Only one protocol per low level protocol/port combination is required
|
|
||||||
// For example, if WS/WSS and TCP protocols are on the same low-level TCP port, only TCP keepalives will be required
|
|
||||||
// and we do not need to do WS/WSS keepalive as well. If they are on different ports, then we will need WS/WSS keepalives too.
|
|
||||||
pub fn get_mapped_port_info(&self) -> MappedPortInfo {
|
|
||||||
let mut low_level_protocol_ports =
|
|
||||||
BTreeSet::<(LowLevelProtocolType, AddressType, u16)>::new();
|
|
||||||
let mut protocol_to_port =
|
|
||||||
BTreeMap::<(ProtocolType, AddressType), (LowLevelProtocolType, u16)>::new();
|
|
||||||
let our_dids = self.all_filtered_dial_info_details(
|
|
||||||
RoutingDomain::PublicInternet.into(),
|
|
||||||
&DialInfoFilter::all(),
|
|
||||||
);
|
|
||||||
for did in our_dids {
|
|
||||||
low_level_protocol_ports.insert((
|
|
||||||
did.dial_info.protocol_type().low_level_protocol_type(),
|
|
||||||
did.dial_info.address_type(),
|
|
||||||
did.dial_info.socket_address().port(),
|
|
||||||
));
|
|
||||||
protocol_to_port.insert(
|
|
||||||
(did.dial_info.protocol_type(), did.dial_info.address_type()),
|
|
||||||
(
|
|
||||||
did.dial_info.protocol_type().low_level_protocol_type(),
|
|
||||||
did.dial_info.socket_address().port(),
|
|
||||||
),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
MappedPortInfo {
|
|
||||||
low_level_protocol_ports,
|
|
||||||
protocol_to_port,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn make_public_internet_relay_node_filter(&self) -> impl Fn(&BucketEntryInner) -> bool {
|
|
||||||
// Get all our outbound protocol/address types
|
|
||||||
let outbound_dif = self
|
|
||||||
.network_manager()
|
|
||||||
.get_outbound_dial_info_filter(RoutingDomain::PublicInternet);
|
|
||||||
let mapped_port_info = self.get_mapped_port_info();
|
|
||||||
|
|
||||||
move |e: &BucketEntryInner| {
|
|
||||||
// Ensure this node is not on the local network
|
|
||||||
if e.has_node_info(RoutingDomain::LocalNetwork.into()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Disqualify nodes that don't cover all our inbound ports for tcp and udp
|
|
||||||
// as we need to be able to use the relay for keepalives for all nat mappings
|
|
||||||
let mut low_level_protocol_ports = mapped_port_info.low_level_protocol_ports.clone();
|
|
||||||
|
|
||||||
let can_serve_as_relay = e
|
|
||||||
.node_info(RoutingDomain::PublicInternet)
|
|
||||||
.map(|n| {
|
|
||||||
let dids =
|
|
||||||
n.all_filtered_dial_info_details(|did| did.matches_filter(&outbound_dif));
|
|
||||||
for did in &dids {
|
|
||||||
let pt = did.dial_info.protocol_type();
|
|
||||||
let at = did.dial_info.address_type();
|
|
||||||
if let Some((llpt, port)) = mapped_port_info.protocol_to_port.get(&(pt, at))
|
|
||||||
{
|
|
||||||
low_level_protocol_ports.remove(&(*llpt, at, *port));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
low_level_protocol_ports.is_empty()
|
|
||||||
})
|
|
||||||
.unwrap_or(false);
|
|
||||||
if !can_serve_as_relay {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(level = "trace", skip(self), ret)]
|
|
||||||
pub fn find_inbound_relay(
|
|
||||||
&self,
|
|
||||||
routing_domain: RoutingDomain,
|
|
||||||
cur_ts: u64,
|
|
||||||
) -> Option<NodeRef> {
|
|
||||||
// Get relay filter function
|
|
||||||
let relay_node_filter = match routing_domain {
|
|
||||||
RoutingDomain::PublicInternet => self.make_public_internet_relay_node_filter(),
|
|
||||||
RoutingDomain::LocalNetwork => {
|
|
||||||
unimplemented!();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Go through all entries and find fastest entry that matches filter function
|
|
||||||
let inner = self.inner.read();
|
|
||||||
let inner = &*inner;
|
|
||||||
let mut best_inbound_relay: Option<(DHTKey, Arc<BucketEntry>)> = None;
|
|
||||||
|
|
||||||
// Iterate all known nodes for candidates
|
|
||||||
Self::with_entries(inner, cur_ts, BucketEntryState::Unreliable, |k, v| {
|
|
||||||
let v2 = v.clone();
|
|
||||||
v.with(|e| {
|
|
||||||
// Ensure we have the node's status
|
|
||||||
if let Some(node_status) = e.node_status(routing_domain) {
|
|
||||||
// Ensure the node will relay
|
|
||||||
if node_status.will_relay() {
|
|
||||||
// Compare against previous candidate
|
|
||||||
if let Some(best_inbound_relay) = best_inbound_relay.as_mut() {
|
|
||||||
// Less is faster
|
|
||||||
let better = best_inbound_relay.1.with(|best| {
|
|
||||||
BucketEntryInner::cmp_fastest_reliable(cur_ts, e, best)
|
|
||||||
== std::cmp::Ordering::Less
|
|
||||||
});
|
|
||||||
// Now apply filter function and see if this node should be included
|
|
||||||
if better && relay_node_filter(e) {
|
|
||||||
*best_inbound_relay = (k, v2);
|
|
||||||
}
|
|
||||||
} else if relay_node_filter(e) {
|
|
||||||
// Always store the first candidate
|
|
||||||
best_inbound_relay = Some((k, v2));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
// Don't end early, iterate through all entries
|
|
||||||
Option::<()>::None
|
|
||||||
});
|
|
||||||
// Return the best inbound relay noderef
|
|
||||||
best_inbound_relay.map(|(k, e)| NodeRef::new(self.clone(), k, e, None))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(level = "trace", skip(self), ret)]
|
|
||||||
pub fn register_find_node_answer(&self, peers: Vec<PeerInfo>) -> Vec<NodeRef> {
|
|
||||||
let node_id = self.node_id();
|
|
||||||
|
|
||||||
// register nodes we'd found
|
|
||||||
let mut out = Vec::<NodeRef>::with_capacity(peers.len());
|
|
||||||
for p in peers {
|
|
||||||
// if our own node if is in the list then ignore it, as we don't add ourselves to our own routing table
|
|
||||||
if p.node_id.key == node_id {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// node can not be its own relay
|
|
||||||
if let Some(rpi) = &p.signed_node_info.node_info.relay_peer_info {
|
|
||||||
if rpi.node_id == p.node_id {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// register the node if it's new
|
|
||||||
if let Some(nr) = self.register_node_with_signed_node_info(
|
|
||||||
RoutingDomain::PublicInternet,
|
|
||||||
p.node_id.key,
|
|
||||||
p.signed_node_info.clone(),
|
|
||||||
false,
|
|
||||||
) {
|
|
||||||
out.push(nr);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
out
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(level = "trace", skip(self), ret, err)]
|
|
||||||
pub async fn find_node(
|
|
||||||
&self,
|
|
||||||
node_ref: NodeRef,
|
|
||||||
node_id: DHTKey,
|
|
||||||
) -> EyreResult<NetworkResult<Vec<NodeRef>>> {
|
|
||||||
let rpc_processor = self.rpc_processor();
|
|
||||||
|
|
||||||
let res = network_result_try!(
|
|
||||||
rpc_processor
|
|
||||||
.clone()
|
|
||||||
.rpc_call_find_node(Destination::direct(node_ref), node_id)
|
|
||||||
.await?
|
|
||||||
);
|
|
||||||
|
|
||||||
// register nodes we'd found
|
|
||||||
Ok(NetworkResult::value(
|
|
||||||
self.register_find_node_answer(res.answer),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(level = "trace", skip(self), ret, err)]
|
|
||||||
pub async fn find_self(&self, node_ref: NodeRef) -> EyreResult<NetworkResult<Vec<NodeRef>>> {
|
|
||||||
let node_id = self.node_id();
|
|
||||||
self.find_node(node_ref, node_id).await
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(level = "trace", skip(self), ret, err)]
|
|
||||||
pub async fn find_target(&self, node_ref: NodeRef) -> EyreResult<NetworkResult<Vec<NodeRef>>> {
|
|
||||||
let node_id = node_ref.node_id();
|
|
||||||
self.find_node(node_ref, node_id).await
|
|
||||||
}
|
|
||||||
|
|
||||||
#[instrument(level = "trace", skip(self))]
|
|
||||||
pub async fn reverse_find_node(&self, node_ref: NodeRef, wide: bool) {
|
|
||||||
// Ask bootstrap node to 'find' our own node so we can get some more nodes near ourselves
|
|
||||||
// and then contact those nodes to inform -them- that we exist
|
|
||||||
|
|
||||||
// Ask bootstrap server for nodes closest to our own node
|
|
||||||
let closest_nodes = network_result_value_or_log!(debug match self.find_self(node_ref.clone()).await {
|
|
||||||
Err(e) => {
|
|
||||||
log_rtab!(error
|
|
||||||
"find_self failed for {:?}: {:?}",
|
|
||||||
&node_ref, e
|
|
||||||
);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
Ok(v) => v,
|
|
||||||
} => {
|
|
||||||
return;
|
|
||||||
});
|
|
||||||
|
|
||||||
// Ask each node near us to find us as well
|
|
||||||
if wide {
|
|
||||||
for closest_nr in closest_nodes {
|
|
||||||
network_result_value_or_log!(debug match self.find_self(closest_nr.clone()).await {
|
|
||||||
Err(e) => {
|
|
||||||
log_rtab!(error
|
|
||||||
"find_self failed for {:?}: {:?}",
|
|
||||||
&closest_nr, e
|
|
||||||
);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
Ok(v) => v,
|
|
||||||
} => {
|
|
||||||
// Do nothing with non-values
|
|
||||||
continue;
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load Diff
@ -1,80 +1,360 @@
|
|||||||
use super::*;
|
use super::*;
|
||||||
use crate::dht::*;
|
use crate::crypto::*;
|
||||||
use alloc::fmt;
|
use alloc::fmt;
|
||||||
|
|
||||||
// Connectionless protocols like UDP are dependent on a NAT translation timeout
|
// Connectionless protocols like UDP are dependent on a NAT translation timeout
|
||||||
// We should ping them with some frequency and 30 seconds is typical timeout
|
// We should ping them with some frequency and 30 seconds is typical timeout
|
||||||
const CONNECTIONLESS_TIMEOUT_SECS: u32 = 29;
|
const CONNECTIONLESS_TIMEOUT_SECS: u32 = 29;
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
pub struct NodeRefFilter {
|
|
||||||
pub routing_domain_set: RoutingDomainSet,
|
|
||||||
pub dial_info_filter: DialInfoFilter,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for NodeRefFilter {
|
pub struct NodeRefBaseCommon {
|
||||||
fn default() -> Self {
|
|
||||||
Self::new()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl NodeRefFilter {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self {
|
|
||||||
routing_domain_set: RoutingDomainSet::all(),
|
|
||||||
dial_info_filter: DialInfoFilter::all(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn with_routing_domain(mut self, routing_domain: RoutingDomain) -> Self {
|
|
||||||
self.routing_domain_set = routing_domain.into();
|
|
||||||
self
|
|
||||||
}
|
|
||||||
pub fn with_routing_domain_set(mut self, routing_domain_set: RoutingDomainSet) -> Self {
|
|
||||||
self.routing_domain_set = routing_domain_set;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
pub fn with_dial_info_filter(mut self, dial_info_filter: DialInfoFilter) -> Self {
|
|
||||||
self.dial_info_filter = dial_info_filter;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
pub fn with_protocol_type(mut self, protocol_type: ProtocolType) -> Self {
|
|
||||||
self.dial_info_filter = self.dial_info_filter.with_protocol_type(protocol_type);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
pub fn with_protocol_type_set(mut self, protocol_set: ProtocolTypeSet) -> Self {
|
|
||||||
self.dial_info_filter = self.dial_info_filter.with_protocol_type_set(protocol_set);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
pub fn with_address_type(mut self, address_type: AddressType) -> Self {
|
|
||||||
self.dial_info_filter = self.dial_info_filter.with_address_type(address_type);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
pub fn with_address_type_set(mut self, address_set: AddressTypeSet) -> Self {
|
|
||||||
self.dial_info_filter = self.dial_info_filter.with_address_type_set(address_set);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
pub fn filtered(mut self, other_filter: &NodeRefFilter) -> Self {
|
|
||||||
self.routing_domain_set &= other_filter.routing_domain_set;
|
|
||||||
self.dial_info_filter = self
|
|
||||||
.dial_info_filter
|
|
||||||
.filtered(&other_filter.dial_info_filter);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
pub fn is_dead(&self) -> bool {
|
|
||||||
self.dial_info_filter.is_dead() || self.routing_domain_set.is_empty()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct NodeRef {
|
|
||||||
routing_table: RoutingTable,
|
routing_table: RoutingTable,
|
||||||
node_id: DHTKey,
|
node_id: DHTKey,
|
||||||
entry: Arc<BucketEntry>,
|
entry: Arc<BucketEntry>,
|
||||||
filter: Option<NodeRefFilter>,
|
filter: Option<NodeRefFilter>,
|
||||||
|
sequencing: Sequencing,
|
||||||
#[cfg(feature = "tracking")]
|
#[cfg(feature = "tracking")]
|
||||||
track_id: usize,
|
track_id: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
pub trait NodeRefBase: Sized {
|
||||||
|
// Common field access
|
||||||
|
fn common(&self) -> &NodeRefBaseCommon;
|
||||||
|
fn common_mut(&mut self) -> &mut NodeRefBaseCommon;
|
||||||
|
|
||||||
|
// Implementation-specific operators
|
||||||
|
fn operate<T, F>(&self, f: F) -> T
|
||||||
|
where
|
||||||
|
F: FnOnce(&RoutingTableInner, &BucketEntryInner) -> T;
|
||||||
|
fn operate_mut<T, F>(&self, f: F) -> T
|
||||||
|
where
|
||||||
|
F: FnOnce(&mut RoutingTableInner, &mut BucketEntryInner) -> T;
|
||||||
|
|
||||||
|
// Filtering
|
||||||
|
fn filter_ref(&self) -> Option<&NodeRefFilter> {
|
||||||
|
self.common().filter.as_ref()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn take_filter(&mut self) -> Option<NodeRefFilter> {
|
||||||
|
self.common_mut().filter.take()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn set_filter(&mut self, filter: Option<NodeRefFilter>) {
|
||||||
|
self.common_mut().filter = filter
|
||||||
|
}
|
||||||
|
|
||||||
|
fn set_sequencing(&mut self, sequencing: Sequencing) {
|
||||||
|
self.common_mut().sequencing = sequencing;
|
||||||
|
}
|
||||||
|
fn sequencing(&self) -> Sequencing {
|
||||||
|
self.common().sequencing
|
||||||
|
}
|
||||||
|
|
||||||
|
fn merge_filter(&mut self, filter: NodeRefFilter) {
|
||||||
|
let common_mut = self.common_mut();
|
||||||
|
if let Some(self_filter) = common_mut.filter.take() {
|
||||||
|
common_mut.filter = Some(self_filter.filtered(&filter));
|
||||||
|
} else {
|
||||||
|
common_mut.filter = Some(filter);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_filter_dead(&self) -> bool {
|
||||||
|
if let Some(filter) = &self.common().filter {
|
||||||
|
filter.is_dead()
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn routing_domain_set(&self) -> RoutingDomainSet {
|
||||||
|
self.common()
|
||||||
|
.filter
|
||||||
|
.as_ref()
|
||||||
|
.map(|f| f.routing_domain_set)
|
||||||
|
.unwrap_or(RoutingDomainSet::all())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn dial_info_filter(&self) -> DialInfoFilter {
|
||||||
|
self.common()
|
||||||
|
.filter
|
||||||
|
.as_ref()
|
||||||
|
.map(|f| f.dial_info_filter.clone())
|
||||||
|
.unwrap_or(DialInfoFilter::all())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn best_routing_domain(&self) -> Option<RoutingDomain> {
|
||||||
|
self.operate(|_rti, e| {
|
||||||
|
e.best_routing_domain(
|
||||||
|
self.common()
|
||||||
|
.filter
|
||||||
|
.as_ref()
|
||||||
|
.map(|f| f.routing_domain_set)
|
||||||
|
.unwrap_or(RoutingDomainSet::all()),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Accessors
|
||||||
|
fn routing_table(&self) -> RoutingTable {
|
||||||
|
self.common().routing_table.clone()
|
||||||
|
}
|
||||||
|
fn node_id(&self) -> DHTKey {
|
||||||
|
self.common().node_id
|
||||||
|
}
|
||||||
|
fn has_updated_since_last_network_change(&self) -> bool {
|
||||||
|
self.operate(|_rti, e| e.has_updated_since_last_network_change())
|
||||||
|
}
|
||||||
|
fn set_updated_since_last_network_change(&self) {
|
||||||
|
self.operate_mut(|_rti, e| e.set_updated_since_last_network_change(true));
|
||||||
|
}
|
||||||
|
fn update_node_status(&self, node_status: NodeStatus) {
|
||||||
|
self.operate_mut(|_rti, e| {
|
||||||
|
e.update_node_status(node_status);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
fn min_max_version(&self) -> Option<VersionRange> {
|
||||||
|
self.operate(|_rti, e| e.min_max_version())
|
||||||
|
}
|
||||||
|
fn set_min_max_version(&self, min_max_version: VersionRange) {
|
||||||
|
self.operate_mut(|_rti, e| e.set_min_max_version(min_max_version))
|
||||||
|
}
|
||||||
|
fn state(&self, cur_ts: u64) -> BucketEntryState {
|
||||||
|
self.operate(|_rti, e| e.state(cur_ts))
|
||||||
|
}
|
||||||
|
fn peer_stats(&self) -> PeerStats {
|
||||||
|
self.operate(|_rti, e| e.peer_stats().clone())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Per-RoutingDomain accessors
|
||||||
|
fn make_peer_info(&self, routing_domain: RoutingDomain) -> Option<PeerInfo> {
|
||||||
|
self.operate(|_rti, e| e.make_peer_info(self.node_id(), routing_domain))
|
||||||
|
}
|
||||||
|
fn node_info(&self, routing_domain: RoutingDomain) -> Option<NodeInfo> {
|
||||||
|
self.operate(|_rti, e| e.node_info(routing_domain).cloned())
|
||||||
|
}
|
||||||
|
fn signed_node_info_has_valid_signature(&self, routing_domain: RoutingDomain) -> bool {
|
||||||
|
self.operate(|_rti, e| {
|
||||||
|
e.signed_node_info(routing_domain)
|
||||||
|
.map(|sni| sni.has_valid_signature())
|
||||||
|
.unwrap_or(false)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
fn has_seen_our_node_info(&self, routing_domain: RoutingDomain) -> bool {
|
||||||
|
self.operate(|_rti, e| e.has_seen_our_node_info(routing_domain))
|
||||||
|
}
|
||||||
|
fn set_seen_our_node_info(&self, routing_domain: RoutingDomain) {
|
||||||
|
self.operate_mut(|_rti, e| e.set_seen_our_node_info(routing_domain, true));
|
||||||
|
}
|
||||||
|
fn network_class(&self, routing_domain: RoutingDomain) -> Option<NetworkClass> {
|
||||||
|
self.operate(|_rt, e| e.node_info(routing_domain).map(|n| n.network_class))
|
||||||
|
}
|
||||||
|
fn outbound_protocols(&self, routing_domain: RoutingDomain) -> Option<ProtocolTypeSet> {
|
||||||
|
self.operate(|_rt, e| e.node_info(routing_domain).map(|n| n.outbound_protocols))
|
||||||
|
}
|
||||||
|
fn address_types(&self, routing_domain: RoutingDomain) -> Option<AddressTypeSet> {
|
||||||
|
self.operate(|_rt, e| e.node_info(routing_domain).map(|n| n.address_types))
|
||||||
|
}
|
||||||
|
fn node_info_outbound_filter(&self, routing_domain: RoutingDomain) -> DialInfoFilter {
|
||||||
|
let mut dif = DialInfoFilter::all();
|
||||||
|
if let Some(outbound_protocols) = self.outbound_protocols(routing_domain) {
|
||||||
|
dif = dif.with_protocol_type_set(outbound_protocols);
|
||||||
|
}
|
||||||
|
if let Some(address_types) = self.address_types(routing_domain) {
|
||||||
|
dif = dif.with_address_type_set(address_types);
|
||||||
|
}
|
||||||
|
dif
|
||||||
|
}
|
||||||
|
fn relay(&self, routing_domain: RoutingDomain) -> Option<NodeRef> {
|
||||||
|
self.operate_mut(|rti, e| {
|
||||||
|
e.signed_node_info(routing_domain)
|
||||||
|
.and_then(|n| n.relay_peer_info())
|
||||||
|
.and_then(|t| {
|
||||||
|
// If relay is ourselves, then return None, because we can't relay through ourselves
|
||||||
|
// and to contact this node we should have had an existing inbound connection
|
||||||
|
if t.node_id.key == rti.unlocked_inner.node_id {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register relay node and return noderef
|
||||||
|
rti.register_node_with_signed_node_info(
|
||||||
|
self.routing_table(),
|
||||||
|
routing_domain,
|
||||||
|
t.node_id.key,
|
||||||
|
t.signed_node_info,
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filtered accessors
|
||||||
|
fn first_filtered_dial_info_detail(&self) -> Option<DialInfoDetail> {
|
||||||
|
let routing_domain_set = self.routing_domain_set();
|
||||||
|
let dial_info_filter = self.dial_info_filter();
|
||||||
|
|
||||||
|
let (sort, dial_info_filter) = match self.common().sequencing {
|
||||||
|
Sequencing::NoPreference => (None, dial_info_filter),
|
||||||
|
Sequencing::PreferOrdered => (
|
||||||
|
Some(DialInfoDetail::ordered_sequencing_sort),
|
||||||
|
dial_info_filter,
|
||||||
|
),
|
||||||
|
Sequencing::EnsureOrdered => (
|
||||||
|
Some(DialInfoDetail::ordered_sequencing_sort),
|
||||||
|
dial_info_filter.filtered(
|
||||||
|
&DialInfoFilter::all().with_protocol_type_set(ProtocolType::all_ordered_set()),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
};
|
||||||
|
|
||||||
|
self.operate(|_rt, e| {
|
||||||
|
for routing_domain in routing_domain_set {
|
||||||
|
if let Some(ni) = e.node_info(routing_domain) {
|
||||||
|
let filter = |did: &DialInfoDetail| did.matches_filter(&dial_info_filter);
|
||||||
|
if let Some(did) = ni.first_filtered_dial_info_detail(sort, filter) {
|
||||||
|
return Some(did);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn all_filtered_dial_info_details<F>(&self) -> Vec<DialInfoDetail> {
|
||||||
|
let routing_domain_set = self.routing_domain_set();
|
||||||
|
let dial_info_filter = self.dial_info_filter();
|
||||||
|
|
||||||
|
let (sort, dial_info_filter) = match self.common().sequencing {
|
||||||
|
Sequencing::NoPreference => (None, dial_info_filter),
|
||||||
|
Sequencing::PreferOrdered => (
|
||||||
|
Some(DialInfoDetail::ordered_sequencing_sort),
|
||||||
|
dial_info_filter,
|
||||||
|
),
|
||||||
|
Sequencing::EnsureOrdered => (
|
||||||
|
Some(DialInfoDetail::ordered_sequencing_sort),
|
||||||
|
dial_info_filter.filtered(
|
||||||
|
&DialInfoFilter::all().with_protocol_type_set(ProtocolType::all_ordered_set()),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut out = Vec::new();
|
||||||
|
self.operate(|_rt, e| {
|
||||||
|
for routing_domain in routing_domain_set {
|
||||||
|
if let Some(ni) = e.node_info(routing_domain) {
|
||||||
|
let filter = |did: &DialInfoDetail| did.matches_filter(&dial_info_filter);
|
||||||
|
if let Some(did) = ni.first_filtered_dial_info_detail(sort, filter) {
|
||||||
|
out.push(did);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
out.remove_duplicates();
|
||||||
|
out
|
||||||
|
}
|
||||||
|
|
||||||
|
fn last_connection(&self) -> Option<ConnectionDescriptor> {
|
||||||
|
// Get the last connections and the last time we saw anything with this connection
|
||||||
|
// Filtered first and then sorted by most recent
|
||||||
|
self.operate(|rti, e| {
|
||||||
|
let last_connections = e.last_connections(rti, self.common().filter.clone());
|
||||||
|
|
||||||
|
// Do some checks to ensure these are possibly still 'live'
|
||||||
|
for (last_connection, last_seen) in last_connections {
|
||||||
|
// Should we check the connection table?
|
||||||
|
if last_connection.protocol_type().is_connection_oriented() {
|
||||||
|
// Look the connection up in the connection manager and see if it's still there
|
||||||
|
let connection_manager =
|
||||||
|
rti.unlocked_inner.network_manager.connection_manager();
|
||||||
|
if connection_manager.get_connection(last_connection).is_some() {
|
||||||
|
return Some(last_connection);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// If this is not connection oriented, then we check our last seen time
|
||||||
|
// to see if this mapping has expired (beyond our timeout)
|
||||||
|
let cur_ts = intf::get_timestamp();
|
||||||
|
if (last_seen + (CONNECTIONLESS_TIMEOUT_SECS as u64 * 1_000_000u64)) >= cur_ts {
|
||||||
|
return Some(last_connection);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn clear_last_connections(&self) {
|
||||||
|
self.operate_mut(|_rti, e| e.clear_last_connections())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn set_last_connection(&self, connection_descriptor: ConnectionDescriptor, ts: u64) {
|
||||||
|
self.operate_mut(|rti, e| {
|
||||||
|
e.set_last_connection(connection_descriptor, ts);
|
||||||
|
rti.touch_recent_peer(self.common().node_id, connection_descriptor);
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn has_any_dial_info(&self) -> bool {
|
||||||
|
self.operate(|_rti, e| {
|
||||||
|
for rtd in RoutingDomain::all() {
|
||||||
|
if let Some(sni) = e.signed_node_info(rtd) {
|
||||||
|
if sni.has_any_dial_info() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
false
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn stats_question_sent(&self, ts: u64, bytes: u64, expects_answer: bool) {
|
||||||
|
self.operate_mut(|rti, e| {
|
||||||
|
rti.transfer_stats_accounting().add_up(bytes);
|
||||||
|
e.question_sent(ts, bytes, expects_answer);
|
||||||
|
})
|
||||||
|
}
|
||||||
|
fn stats_question_rcvd(&self, ts: u64, bytes: u64) {
|
||||||
|
self.operate_mut(|rti, e| {
|
||||||
|
rti.transfer_stats_accounting().add_down(bytes);
|
||||||
|
e.question_rcvd(ts, bytes);
|
||||||
|
})
|
||||||
|
}
|
||||||
|
fn stats_answer_sent(&self, bytes: u64) {
|
||||||
|
self.operate_mut(|rti, e| {
|
||||||
|
rti.transfer_stats_accounting().add_up(bytes);
|
||||||
|
e.answer_sent(bytes);
|
||||||
|
})
|
||||||
|
}
|
||||||
|
fn stats_answer_rcvd(&self, send_ts: u64, recv_ts: u64, bytes: u64) {
|
||||||
|
self.operate_mut(|rti, e| {
|
||||||
|
rti.transfer_stats_accounting().add_down(bytes);
|
||||||
|
rti.latency_stats_accounting()
|
||||||
|
.record_latency(recv_ts - send_ts);
|
||||||
|
e.answer_rcvd(send_ts, recv_ts, bytes);
|
||||||
|
})
|
||||||
|
}
|
||||||
|
fn stats_question_lost(&self) {
|
||||||
|
self.operate_mut(|_rti, e| {
|
||||||
|
e.question_lost();
|
||||||
|
})
|
||||||
|
}
|
||||||
|
fn stats_failed_to_send(&self, ts: u64, expects_answer: bool) {
|
||||||
|
self.operate_mut(|_rti, e| {
|
||||||
|
e.failed_to_send(ts, expects_answer);
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
/// Reference to a routing table entry
|
||||||
|
/// Keeps entry in the routing table until all references are gone
|
||||||
|
pub struct NodeRef {
|
||||||
|
common: NodeRefBaseCommon,
|
||||||
|
}
|
||||||
|
|
||||||
impl NodeRef {
|
impl NodeRef {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
routing_table: RoutingTable,
|
routing_table: RoutingTable,
|
||||||
@ -85,52 +365,15 @@ impl NodeRef {
|
|||||||
entry.ref_count.fetch_add(1u32, Ordering::Relaxed);
|
entry.ref_count.fetch_add(1u32, Ordering::Relaxed);
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
routing_table,
|
common: NodeRefBaseCommon {
|
||||||
node_id,
|
routing_table,
|
||||||
entry,
|
node_id,
|
||||||
filter,
|
entry,
|
||||||
#[cfg(feature = "tracking")]
|
filter,
|
||||||
track_id: entry.track(),
|
sequencing: Sequencing::NoPreference,
|
||||||
}
|
#[cfg(feature = "tracking")]
|
||||||
}
|
track_id: entry.track(),
|
||||||
|
},
|
||||||
// Operate on entry accessors
|
|
||||||
|
|
||||||
pub(super) fn operate<T, F>(&self, f: F) -> T
|
|
||||||
where
|
|
||||||
F: FnOnce(&RoutingTableInner, &BucketEntryInner) -> T,
|
|
||||||
{
|
|
||||||
let inner = &*self.routing_table.inner.read();
|
|
||||||
self.entry.with(|e| f(inner, e))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn operate_mut<T, F>(&self, f: F) -> T
|
|
||||||
where
|
|
||||||
F: FnOnce(&mut RoutingTableInner, &mut BucketEntryInner) -> T,
|
|
||||||
{
|
|
||||||
let inner = &mut *self.routing_table.inner.write();
|
|
||||||
self.entry.with_mut(|e| f(inner, e))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Filtering
|
|
||||||
|
|
||||||
pub fn filter_ref(&self) -> Option<&NodeRefFilter> {
|
|
||||||
self.filter.as_ref()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn take_filter(&mut self) -> Option<NodeRefFilter> {
|
|
||||||
self.filter.take()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn set_filter(&mut self, filter: Option<NodeRefFilter>) {
|
|
||||||
self.filter = filter
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn merge_filter(&mut self, filter: NodeRefFilter) {
|
|
||||||
if let Some(self_filter) = self.filter.take() {
|
|
||||||
self.filter = Some(self_filter.filtered(&filter));
|
|
||||||
} else {
|
|
||||||
self.filter = Some(filter);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -140,281 +383,73 @@ impl NodeRef {
|
|||||||
out
|
out
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_filter_dead(&self) -> bool {
|
pub fn locked<'a>(&self, rti: &'a RoutingTableInner) -> NodeRefLocked<'a> {
|
||||||
if let Some(filter) = &self.filter {
|
NodeRefLocked::new(rti, self.clone())
|
||||||
filter.is_dead()
|
}
|
||||||
} else {
|
pub fn locked_mut<'a>(&self, rti: &'a mut RoutingTableInner) -> NodeRefLockedMut<'a> {
|
||||||
false
|
NodeRefLockedMut::new(rti, self.clone())
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NodeRefBase for NodeRef {
|
||||||
|
fn common(&self) -> &NodeRefBaseCommon {
|
||||||
|
&self.common
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn routing_domain_set(&self) -> RoutingDomainSet {
|
fn common_mut(&mut self) -> &mut NodeRefBaseCommon {
|
||||||
self.filter
|
&mut self.common
|
||||||
.as_ref()
|
|
||||||
.map(|f| f.routing_domain_set)
|
|
||||||
.unwrap_or(RoutingDomainSet::all())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn dial_info_filter(&self) -> DialInfoFilter {
|
fn operate<T, F>(&self, f: F) -> T
|
||||||
self.filter
|
where
|
||||||
.as_ref()
|
F: FnOnce(&RoutingTableInner, &BucketEntryInner) -> T,
|
||||||
.map(|f| f.dial_info_filter.clone())
|
{
|
||||||
.unwrap_or(DialInfoFilter::all())
|
let inner = &*self.common.routing_table.inner.read();
|
||||||
|
self.common.entry.with(inner, f)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn best_routing_domain(&self) -> Option<RoutingDomain> {
|
fn operate_mut<T, F>(&self, f: F) -> T
|
||||||
self.operate(|_rti, e| {
|
where
|
||||||
e.best_routing_domain(
|
F: FnOnce(&mut RoutingTableInner, &mut BucketEntryInner) -> T,
|
||||||
self.filter
|
{
|
||||||
.as_ref()
|
let inner = &mut *self.common.routing_table.inner.write();
|
||||||
.map(|f| f.routing_domain_set)
|
self.common.entry.with_mut(inner, f)
|
||||||
.unwrap_or(RoutingDomainSet::all()),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Accessors
|
|
||||||
pub fn routing_table(&self) -> RoutingTable {
|
|
||||||
self.routing_table.clone()
|
|
||||||
}
|
|
||||||
pub fn node_id(&self) -> DHTKey {
|
|
||||||
self.node_id
|
|
||||||
}
|
|
||||||
pub fn has_updated_since_last_network_change(&self) -> bool {
|
|
||||||
self.operate(|_rti, e| e.has_updated_since_last_network_change())
|
|
||||||
}
|
|
||||||
pub fn set_updated_since_last_network_change(&self) {
|
|
||||||
self.operate_mut(|_rti, e| e.set_updated_since_last_network_change(true));
|
|
||||||
}
|
|
||||||
pub fn update_node_status(&self, node_status: NodeStatus) {
|
|
||||||
self.operate_mut(|_rti, e| {
|
|
||||||
e.update_node_status(node_status);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
pub fn min_max_version(&self) -> Option<(u8, u8)> {
|
|
||||||
self.operate(|_rti, e| e.min_max_version())
|
|
||||||
}
|
|
||||||
pub fn set_min_max_version(&self, min_max_version: (u8, u8)) {
|
|
||||||
self.operate_mut(|_rti, e| e.set_min_max_version(min_max_version))
|
|
||||||
}
|
|
||||||
pub fn state(&self, cur_ts: u64) -> BucketEntryState {
|
|
||||||
self.operate(|_rti, e| e.state(cur_ts))
|
|
||||||
}
|
|
||||||
pub fn peer_stats(&self) -> PeerStats {
|
|
||||||
self.operate(|_rti, e| e.peer_stats().clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Per-RoutingDomain accessors
|
|
||||||
pub fn make_peer_info(&self, routing_domain: RoutingDomain) -> Option<PeerInfo> {
|
|
||||||
self.operate(|_rti, e| e.make_peer_info(self.node_id(), routing_domain))
|
|
||||||
}
|
|
||||||
pub fn signed_node_info_has_valid_signature(&self, routing_domain: RoutingDomain) -> bool {
|
|
||||||
self.operate(|_rti, e| {
|
|
||||||
e.signed_node_info(routing_domain)
|
|
||||||
.map(|sni| sni.has_valid_signature())
|
|
||||||
.unwrap_or(false)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
pub fn has_seen_our_node_info(&self, routing_domain: RoutingDomain) -> bool {
|
|
||||||
self.operate(|_rti, e| e.has_seen_our_node_info(routing_domain))
|
|
||||||
}
|
|
||||||
pub fn set_seen_our_node_info(&self, routing_domain: RoutingDomain) {
|
|
||||||
self.operate_mut(|_rti, e| e.set_seen_our_node_info(routing_domain, true));
|
|
||||||
}
|
|
||||||
pub fn network_class(&self, routing_domain: RoutingDomain) -> Option<NetworkClass> {
|
|
||||||
self.operate(|_rt, e| e.node_info(routing_domain).map(|n| n.network_class))
|
|
||||||
}
|
|
||||||
pub fn outbound_protocols(&self, routing_domain: RoutingDomain) -> Option<ProtocolTypeSet> {
|
|
||||||
self.operate(|_rt, e| e.node_info(routing_domain).map(|n| n.outbound_protocols))
|
|
||||||
}
|
|
||||||
pub fn address_types(&self, routing_domain: RoutingDomain) -> Option<AddressTypeSet> {
|
|
||||||
self.operate(|_rt, e| e.node_info(routing_domain).map(|n| n.address_types))
|
|
||||||
}
|
|
||||||
pub fn node_info_outbound_filter(&self, routing_domain: RoutingDomain) -> DialInfoFilter {
|
|
||||||
let mut dif = DialInfoFilter::all();
|
|
||||||
if let Some(outbound_protocols) = self.outbound_protocols(routing_domain) {
|
|
||||||
dif = dif.with_protocol_type_set(outbound_protocols);
|
|
||||||
}
|
|
||||||
if let Some(address_types) = self.address_types(routing_domain) {
|
|
||||||
dif = dif.with_address_type_set(address_types);
|
|
||||||
}
|
|
||||||
dif
|
|
||||||
}
|
|
||||||
pub fn relay(&self, routing_domain: RoutingDomain) -> Option<NodeRef> {
|
|
||||||
let target_rpi = self.operate(|_rti, e| {
|
|
||||||
e.node_info(routing_domain)
|
|
||||||
.map(|n| n.relay_peer_info.as_ref().map(|pi| pi.as_ref().clone()))
|
|
||||||
})?;
|
|
||||||
target_rpi.and_then(|t| {
|
|
||||||
// If relay is ourselves, then return None, because we can't relay through ourselves
|
|
||||||
// and to contact this node we should have had an existing inbound connection
|
|
||||||
if t.node_id.key == self.routing_table.node_id() {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register relay node and return noderef
|
|
||||||
self.routing_table.register_node_with_signed_node_info(
|
|
||||||
routing_domain,
|
|
||||||
t.node_id.key,
|
|
||||||
t.signed_node_info,
|
|
||||||
false,
|
|
||||||
)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Filtered accessors
|
|
||||||
pub fn first_filtered_dial_info_detail(&self) -> Option<DialInfoDetail> {
|
|
||||||
let routing_domain_set = self.routing_domain_set();
|
|
||||||
let dial_info_filter = self.dial_info_filter();
|
|
||||||
|
|
||||||
self.operate(|_rt, e| {
|
|
||||||
for routing_domain in routing_domain_set {
|
|
||||||
if let Some(ni) = e.node_info(routing_domain) {
|
|
||||||
let filter = |did: &DialInfoDetail| did.matches_filter(&dial_info_filter);
|
|
||||||
if let Some(did) = ni.first_filtered_dial_info_detail(filter) {
|
|
||||||
return Some(did);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
None
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn all_filtered_dial_info_details<F>(&self) -> Vec<DialInfoDetail> {
|
|
||||||
let routing_domain_set = self.routing_domain_set();
|
|
||||||
let dial_info_filter = self.dial_info_filter();
|
|
||||||
|
|
||||||
let mut out = Vec::new();
|
|
||||||
self.operate(|_rt, e| {
|
|
||||||
for routing_domain in routing_domain_set {
|
|
||||||
if let Some(ni) = e.node_info(routing_domain) {
|
|
||||||
let filter = |did: &DialInfoDetail| did.matches_filter(&dial_info_filter);
|
|
||||||
if let Some(did) = ni.first_filtered_dial_info_detail(filter) {
|
|
||||||
out.push(did);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
out.remove_duplicates();
|
|
||||||
out
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn last_connection(&self) -> Option<ConnectionDescriptor> {
|
|
||||||
// Get the last connection and the last time we saw anything with this connection
|
|
||||||
let (last_connection, last_seen) =
|
|
||||||
self.operate(|rti, e| e.last_connection(rti, self.filter.clone()))?;
|
|
||||||
|
|
||||||
// Should we check the connection table?
|
|
||||||
if last_connection.protocol_type().is_connection_oriented() {
|
|
||||||
// Look the connection up in the connection manager and see if it's still there
|
|
||||||
let connection_manager = self.routing_table.network_manager().connection_manager();
|
|
||||||
connection_manager.get_connection(last_connection)?;
|
|
||||||
} else {
|
|
||||||
// If this is not connection oriented, then we check our last seen time
|
|
||||||
// to see if this mapping has expired (beyond our timeout)
|
|
||||||
let cur_ts = intf::get_timestamp();
|
|
||||||
if (last_seen + (CONNECTIONLESS_TIMEOUT_SECS as u64 * 1_000_000u64)) < cur_ts {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Some(last_connection)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn clear_last_connections(&self) {
|
|
||||||
self.operate_mut(|_rti, e| e.clear_last_connections())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn set_last_connection(&self, connection_descriptor: ConnectionDescriptor, ts: u64) {
|
|
||||||
self.operate_mut(|_rti, e| e.set_last_connection(connection_descriptor, ts));
|
|
||||||
self.routing_table
|
|
||||||
.touch_recent_peer(self.node_id(), connection_descriptor);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn has_any_dial_info(&self) -> bool {
|
|
||||||
self.operate(|_rti, e| {
|
|
||||||
for rtd in RoutingDomain::all() {
|
|
||||||
if let Some(ni) = e.node_info(rtd) {
|
|
||||||
if ni.has_any_dial_info() {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
false
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn stats_question_sent(&self, ts: u64, bytes: u64, expects_answer: bool) {
|
|
||||||
self.operate_mut(|rti, e| {
|
|
||||||
rti.self_transfer_stats_accounting.add_up(bytes);
|
|
||||||
e.question_sent(ts, bytes, expects_answer);
|
|
||||||
})
|
|
||||||
}
|
|
||||||
pub fn stats_question_rcvd(&self, ts: u64, bytes: u64) {
|
|
||||||
self.operate_mut(|rti, e| {
|
|
||||||
rti.self_transfer_stats_accounting.add_down(bytes);
|
|
||||||
e.question_rcvd(ts, bytes);
|
|
||||||
})
|
|
||||||
}
|
|
||||||
pub fn stats_answer_sent(&self, bytes: u64) {
|
|
||||||
self.operate_mut(|rti, e| {
|
|
||||||
rti.self_transfer_stats_accounting.add_up(bytes);
|
|
||||||
e.answer_sent(bytes);
|
|
||||||
})
|
|
||||||
}
|
|
||||||
pub fn stats_answer_rcvd(&self, send_ts: u64, recv_ts: u64, bytes: u64) {
|
|
||||||
self.operate_mut(|rti, e| {
|
|
||||||
rti.self_transfer_stats_accounting.add_down(bytes);
|
|
||||||
rti.self_latency_stats_accounting
|
|
||||||
.record_latency(recv_ts - send_ts);
|
|
||||||
e.answer_rcvd(send_ts, recv_ts, bytes);
|
|
||||||
})
|
|
||||||
}
|
|
||||||
pub fn stats_question_lost(&self) {
|
|
||||||
self.operate_mut(|_rti, e| {
|
|
||||||
e.question_lost();
|
|
||||||
})
|
|
||||||
}
|
|
||||||
pub fn stats_failed_to_send(&self, ts: u64, expects_answer: bool) {
|
|
||||||
self.operate_mut(|_rti, e| {
|
|
||||||
e.failed_to_send(ts, expects_answer);
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Clone for NodeRef {
|
impl Clone for NodeRef {
|
||||||
fn clone(&self) -> Self {
|
fn clone(&self) -> Self {
|
||||||
self.entry.ref_count.fetch_add(1u32, Ordering::Relaxed);
|
self.common
|
||||||
|
.entry
|
||||||
|
.ref_count
|
||||||
|
.fetch_add(1u32, Ordering::Relaxed);
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
routing_table: self.routing_table.clone(),
|
common: NodeRefBaseCommon {
|
||||||
node_id: self.node_id,
|
routing_table: self.common.routing_table.clone(),
|
||||||
entry: self.entry.clone(),
|
node_id: self.common.node_id,
|
||||||
filter: self.filter.clone(),
|
entry: self.common.entry.clone(),
|
||||||
#[cfg(feature = "tracking")]
|
filter: self.common.filter.clone(),
|
||||||
track_id: e.track(),
|
sequencing: self.common.sequencing,
|
||||||
|
#[cfg(feature = "tracking")]
|
||||||
|
track_id: self.common.entry.write().track(),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PartialEq for NodeRef {
|
|
||||||
fn eq(&self, other: &Self) -> bool {
|
|
||||||
self.node_id == other.node_id
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Eq for NodeRef {}
|
|
||||||
|
|
||||||
impl fmt::Display for NodeRef {
|
impl fmt::Display for NodeRef {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
write!(f, "{}", self.node_id.encode())
|
write!(f, "{}", self.common.node_id.encode())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl fmt::Debug for NodeRef {
|
impl fmt::Debug for NodeRef {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
f.debug_struct("NodeRef")
|
f.debug_struct("NodeRef")
|
||||||
.field("node_id", &self.node_id)
|
.field("node_id", &self.common.node_id)
|
||||||
.field("filter", &self.filter)
|
.field("filter", &self.common.filter)
|
||||||
|
.field("sequencing", &self.common.sequencing)
|
||||||
.finish()
|
.finish()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -422,12 +457,138 @@ impl fmt::Debug for NodeRef {
|
|||||||
impl Drop for NodeRef {
|
impl Drop for NodeRef {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
#[cfg(feature = "tracking")]
|
#[cfg(feature = "tracking")]
|
||||||
self.operate(|e| e.untrack(self.track_id));
|
self.common.entry.write().untrack(self.track_id);
|
||||||
|
|
||||||
// drop the noderef and queue a bucket kick if it was the last one
|
// drop the noderef and queue a bucket kick if it was the last one
|
||||||
let new_ref_count = self.entry.ref_count.fetch_sub(1u32, Ordering::Relaxed) - 1;
|
let new_ref_count = self
|
||||||
|
.common
|
||||||
|
.entry
|
||||||
|
.ref_count
|
||||||
|
.fetch_sub(1u32, Ordering::Relaxed)
|
||||||
|
- 1;
|
||||||
if new_ref_count == 0 {
|
if new_ref_count == 0 {
|
||||||
self.routing_table.queue_bucket_kick(self.node_id);
|
self.common
|
||||||
|
.routing_table
|
||||||
|
.queue_bucket_kick(self.common.node_id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
/// Locked reference to a routing table entry
|
||||||
|
/// For internal use inside the RoutingTable module where you have
|
||||||
|
/// already locked a RoutingTableInner
|
||||||
|
/// Keeps entry in the routing table until all references are gone
|
||||||
|
pub struct NodeRefLocked<'a> {
|
||||||
|
inner: Mutex<&'a RoutingTableInner>,
|
||||||
|
nr: NodeRef,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> NodeRefLocked<'a> {
|
||||||
|
pub fn new(inner: &'a RoutingTableInner, nr: NodeRef) -> Self {
|
||||||
|
Self {
|
||||||
|
inner: Mutex::new(inner),
|
||||||
|
nr,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> NodeRefBase for NodeRefLocked<'a> {
|
||||||
|
fn common(&self) -> &NodeRefBaseCommon {
|
||||||
|
&self.nr.common
|
||||||
|
}
|
||||||
|
|
||||||
|
fn common_mut(&mut self) -> &mut NodeRefBaseCommon {
|
||||||
|
&mut self.nr.common
|
||||||
|
}
|
||||||
|
|
||||||
|
fn operate<T, F>(&self, f: F) -> T
|
||||||
|
where
|
||||||
|
F: FnOnce(&RoutingTableInner, &BucketEntryInner) -> T,
|
||||||
|
{
|
||||||
|
let inner = &*self.inner.lock();
|
||||||
|
self.nr.common.entry.with(inner, f)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn operate_mut<T, F>(&self, _f: F) -> T
|
||||||
|
where
|
||||||
|
F: FnOnce(&mut RoutingTableInner, &mut BucketEntryInner) -> T,
|
||||||
|
{
|
||||||
|
panic!("need to locked_mut() for this operation")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> fmt::Display for NodeRefLocked<'a> {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
write!(f, "{}", self.nr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> fmt::Debug for NodeRefLocked<'a> {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
f.debug_struct("NodeRefLocked")
|
||||||
|
.field("nr", &self.nr)
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
/// Mutable locked reference to a routing table entry
|
||||||
|
/// For internal use inside the RoutingTable module where you have
|
||||||
|
/// already locked a RoutingTableInner
|
||||||
|
/// Keeps entry in the routing table until all references are gone
|
||||||
|
pub struct NodeRefLockedMut<'a> {
|
||||||
|
inner: Mutex<&'a mut RoutingTableInner>,
|
||||||
|
nr: NodeRef,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> NodeRefLockedMut<'a> {
|
||||||
|
pub fn new(inner: &'a mut RoutingTableInner, nr: NodeRef) -> Self {
|
||||||
|
Self {
|
||||||
|
inner: Mutex::new(inner),
|
||||||
|
nr,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> NodeRefBase for NodeRefLockedMut<'a> {
|
||||||
|
fn common(&self) -> &NodeRefBaseCommon {
|
||||||
|
&self.nr.common
|
||||||
|
}
|
||||||
|
|
||||||
|
fn common_mut(&mut self) -> &mut NodeRefBaseCommon {
|
||||||
|
&mut self.nr.common
|
||||||
|
}
|
||||||
|
|
||||||
|
fn operate<T, F>(&self, f: F) -> T
|
||||||
|
where
|
||||||
|
F: FnOnce(&RoutingTableInner, &BucketEntryInner) -> T,
|
||||||
|
{
|
||||||
|
let inner = &*self.inner.lock();
|
||||||
|
self.nr.common.entry.with(inner, f)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn operate_mut<T, F>(&self, f: F) -> T
|
||||||
|
where
|
||||||
|
F: FnOnce(&mut RoutingTableInner, &mut BucketEntryInner) -> T,
|
||||||
|
{
|
||||||
|
let inner = &mut *self.inner.lock();
|
||||||
|
self.nr.common.entry.with_mut(inner, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> fmt::Display for NodeRefLockedMut<'a> {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
write!(f, "{}", self.nr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> fmt::Debug for NodeRefLockedMut<'a> {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
f.debug_struct("NodeRefLockedMut")
|
||||||
|
.field("nr", &self.nr)
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
61
veilid-core/src/routing_table/node_ref_filter.rs
Normal file
61
veilid-core/src/routing_table/node_ref_filter.rs
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
|
||||||
|
pub struct NodeRefFilter {
|
||||||
|
pub routing_domain_set: RoutingDomainSet,
|
||||||
|
pub dial_info_filter: DialInfoFilter,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for NodeRefFilter {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::new()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NodeRefFilter {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
routing_domain_set: RoutingDomainSet::all(),
|
||||||
|
dial_info_filter: DialInfoFilter::all(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn with_routing_domain(mut self, routing_domain: RoutingDomain) -> Self {
|
||||||
|
self.routing_domain_set = routing_domain.into();
|
||||||
|
self
|
||||||
|
}
|
||||||
|
pub fn with_routing_domain_set(mut self, routing_domain_set: RoutingDomainSet) -> Self {
|
||||||
|
self.routing_domain_set = routing_domain_set;
|
||||||
|
self
|
||||||
|
}
|
||||||
|
pub fn with_dial_info_filter(mut self, dial_info_filter: DialInfoFilter) -> Self {
|
||||||
|
self.dial_info_filter = dial_info_filter;
|
||||||
|
self
|
||||||
|
}
|
||||||
|
pub fn with_protocol_type(mut self, protocol_type: ProtocolType) -> Self {
|
||||||
|
self.dial_info_filter = self.dial_info_filter.with_protocol_type(protocol_type);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
pub fn with_protocol_type_set(mut self, protocol_set: ProtocolTypeSet) -> Self {
|
||||||
|
self.dial_info_filter = self.dial_info_filter.with_protocol_type_set(protocol_set);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
pub fn with_address_type(mut self, address_type: AddressType) -> Self {
|
||||||
|
self.dial_info_filter = self.dial_info_filter.with_address_type(address_type);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
pub fn with_address_type_set(mut self, address_set: AddressTypeSet) -> Self {
|
||||||
|
self.dial_info_filter = self.dial_info_filter.with_address_type_set(address_set);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
pub fn filtered(mut self, other_filter: &NodeRefFilter) -> Self {
|
||||||
|
self.routing_domain_set &= other_filter.routing_domain_set;
|
||||||
|
self.dial_info_filter = self
|
||||||
|
.dial_info_filter
|
||||||
|
.filtered(&other_filter.dial_info_filter);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
pub fn is_dead(&self) -> bool {
|
||||||
|
self.dial_info_filter.is_dead() || self.routing_domain_set.is_empty()
|
||||||
|
}
|
||||||
|
}
|
174
veilid-core/src/routing_table/privacy.rs
Normal file
174
veilid-core/src/routing_table/privacy.rs
Normal file
@ -0,0 +1,174 @@
|
|||||||
|
use super::*;
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Compiled Privacy Objects
|
||||||
|
|
||||||
|
/// An encrypted private/safety route hop
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct RouteHopData {
|
||||||
|
/// The nonce used in the encryption ENC(Xn,DH(PKn,SKapr))
|
||||||
|
pub nonce: Nonce,
|
||||||
|
/// The encrypted blob
|
||||||
|
pub blob: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// How to find a route node
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub enum RouteNode {
|
||||||
|
/// Route node is optimized, no contact method information as this node id has been seen before
|
||||||
|
NodeId(NodeId),
|
||||||
|
/// Route node with full contact method information to ensure the peer is reachable
|
||||||
|
PeerInfo(PeerInfo),
|
||||||
|
}
|
||||||
|
impl fmt::Display for RouteNode {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
write!(
|
||||||
|
f,
|
||||||
|
"{}",
|
||||||
|
match self {
|
||||||
|
RouteNode::NodeId(x) => x.key.encode(),
|
||||||
|
RouteNode::PeerInfo(pi) => pi.node_id.key.encode(),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An unencrypted private/safety route hop
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct RouteHop {
|
||||||
|
/// The location of the hop
|
||||||
|
pub node: RouteNode,
|
||||||
|
/// The encrypted blob to pass to the next hop as its data (None for stubs)
|
||||||
|
pub next_hop: Option<RouteHopData>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The kind of hops a private route can have
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub enum PrivateRouteHops {
|
||||||
|
/// The first hop of a private route, unencrypted, route_hops == total hop count
|
||||||
|
FirstHop(RouteHop),
|
||||||
|
/// Private route internal node. Has > 0 private route hops left but < total hop count
|
||||||
|
Data(RouteHopData),
|
||||||
|
/// Private route has ended (hop count = 0)
|
||||||
|
Empty,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A private route for receiver privacy
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct PrivateRoute {
|
||||||
|
/// The public key used for the entire route
|
||||||
|
pub public_key: DHTKey,
|
||||||
|
pub hop_count: u8,
|
||||||
|
pub hops: PrivateRouteHops,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PrivateRoute {
|
||||||
|
/// Empty private route is the form used when receiving the last hop
|
||||||
|
pub fn new_empty(public_key: DHTKey) -> Self {
|
||||||
|
Self {
|
||||||
|
public_key,
|
||||||
|
hop_count: 0,
|
||||||
|
hops: PrivateRouteHops::Empty,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/// Stub route is the form used when no privacy is required, but you need to specify the destination for a safety route
|
||||||
|
pub fn new_stub(public_key: DHTKey, node: RouteNode) -> Self {
|
||||||
|
Self {
|
||||||
|
public_key,
|
||||||
|
hop_count: 1,
|
||||||
|
hops: PrivateRouteHops::FirstHop(RouteHop {
|
||||||
|
node,
|
||||||
|
next_hop: None,
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Remove the first unencrypted hop if possible
|
||||||
|
pub fn pop_first_hop(&mut self) -> Option<RouteNode> {
|
||||||
|
match &mut self.hops {
|
||||||
|
PrivateRouteHops::FirstHop(first_hop) => {
|
||||||
|
let first_hop_node = first_hop.node.clone();
|
||||||
|
|
||||||
|
// Reduce hop count
|
||||||
|
if self.hop_count > 0 {
|
||||||
|
self.hop_count -= 1;
|
||||||
|
} else {
|
||||||
|
error!("hop count should not be 0 for first hop");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Go to next hop
|
||||||
|
self.hops = match first_hop.next_hop.take() {
|
||||||
|
Some(rhd) => PrivateRouteHops::Data(rhd),
|
||||||
|
None => PrivateRouteHops::Empty,
|
||||||
|
};
|
||||||
|
|
||||||
|
return Some(first_hop_node);
|
||||||
|
}
|
||||||
|
PrivateRouteHops::Data(_) => return None,
|
||||||
|
PrivateRouteHops::Empty => return None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for PrivateRoute {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
write!(
|
||||||
|
f,
|
||||||
|
"PR({:?}+{}{})",
|
||||||
|
self.public_key,
|
||||||
|
self.hop_count,
|
||||||
|
match &self.hops {
|
||||||
|
PrivateRouteHops::FirstHop(fh) => {
|
||||||
|
format!("->{}", fh.node)
|
||||||
|
}
|
||||||
|
PrivateRouteHops::Data(_) => {
|
||||||
|
"->?".to_owned()
|
||||||
|
}
|
||||||
|
PrivateRouteHops::Empty => {
|
||||||
|
"".to_owned()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub enum SafetyRouteHops {
|
||||||
|
/// Has >= 1 safety route hops
|
||||||
|
Data(RouteHopData),
|
||||||
|
/// Has 0 safety route hops
|
||||||
|
Private(PrivateRoute),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct SafetyRoute {
|
||||||
|
pub public_key: DHTKey,
|
||||||
|
pub hop_count: u8,
|
||||||
|
pub hops: SafetyRouteHops,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SafetyRoute {
|
||||||
|
pub fn new_stub(public_key: DHTKey, private_route: PrivateRoute) -> Self {
|
||||||
|
assert!(matches!(private_route.hops, PrivateRouteHops::Data(_)));
|
||||||
|
Self {
|
||||||
|
public_key,
|
||||||
|
hop_count: 0,
|
||||||
|
hops: SafetyRouteHops::Private(private_route),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for SafetyRoute {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
write!(
|
||||||
|
f,
|
||||||
|
"SR({:?}+{}{})",
|
||||||
|
self.public_key,
|
||||||
|
self.hop_count,
|
||||||
|
match &self.hops {
|
||||||
|
SafetyRouteHops::Data(_) => "".to_owned(),
|
||||||
|
SafetyRouteHops::Private(p) => format!("->{}", p),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
1515
veilid-core/src/routing_table/route_spec_store.rs
Normal file
1515
veilid-core/src/routing_table/route_spec_store.rs
Normal file
File diff suppressed because it is too large
Load Diff
@ -3,8 +3,20 @@ use super::*;
|
|||||||
enum RoutingDomainChange {
|
enum RoutingDomainChange {
|
||||||
ClearDialInfoDetails,
|
ClearDialInfoDetails,
|
||||||
ClearRelayNode,
|
ClearRelayNode,
|
||||||
SetRelayNode { relay_node: NodeRef },
|
SetRelayNode {
|
||||||
AddDialInfoDetail { dial_info_detail: DialInfoDetail },
|
relay_node: NodeRef,
|
||||||
|
},
|
||||||
|
AddDialInfoDetail {
|
||||||
|
dial_info_detail: DialInfoDetail,
|
||||||
|
},
|
||||||
|
SetupNetwork {
|
||||||
|
outbound_protocols: ProtocolTypeSet,
|
||||||
|
inbound_protocols: ProtocolTypeSet,
|
||||||
|
address_types: AddressTypeSet,
|
||||||
|
},
|
||||||
|
SetNetworkClass {
|
||||||
|
network_class: Option<NetworkClass>,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct RoutingDomainEditor {
|
pub struct RoutingDomainEditor {
|
||||||
@ -67,31 +79,54 @@ impl RoutingDomainEditor {
|
|||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
#[instrument(level = "debug", skip(self))]
|
||||||
|
pub fn setup_network(
|
||||||
|
&mut self,
|
||||||
|
outbound_protocols: ProtocolTypeSet,
|
||||||
|
inbound_protocols: ProtocolTypeSet,
|
||||||
|
address_types: AddressTypeSet,
|
||||||
|
) {
|
||||||
|
self.changes.push(RoutingDomainChange::SetupNetwork {
|
||||||
|
outbound_protocols,
|
||||||
|
inbound_protocols,
|
||||||
|
address_types,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "debug", skip(self))]
|
||||||
|
pub fn set_network_class(&mut self, network_class: Option<NetworkClass>) {
|
||||||
|
self.changes
|
||||||
|
.push(RoutingDomainChange::SetNetworkClass { network_class })
|
||||||
|
}
|
||||||
|
|
||||||
#[instrument(level = "debug", skip(self))]
|
#[instrument(level = "debug", skip(self))]
|
||||||
pub async fn commit(self) {
|
pub async fn commit(self) {
|
||||||
|
// No locking if we have nothing to do
|
||||||
|
if self.changes.is_empty() {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
let mut changed = false;
|
let mut changed = false;
|
||||||
{
|
{
|
||||||
let node_id = self.routing_table.node_id();
|
let node_id = self.routing_table.node_id();
|
||||||
|
|
||||||
let mut inner = self.routing_table.inner.write();
|
let mut inner = self.routing_table.inner.write();
|
||||||
let inner = &mut *inner;
|
inner.with_routing_domain_mut(self.routing_domain, |detail| {
|
||||||
RoutingTable::with_routing_domain_mut(inner, self.routing_domain, |detail| {
|
|
||||||
for change in self.changes {
|
for change in self.changes {
|
||||||
match change {
|
match change {
|
||||||
RoutingDomainChange::ClearDialInfoDetails => {
|
RoutingDomainChange::ClearDialInfoDetails => {
|
||||||
debug!("[{:?}] cleared dial info details", self.routing_domain);
|
debug!("[{:?}] cleared dial info details", self.routing_domain);
|
||||||
detail.clear_dial_info_details();
|
detail.common_mut().clear_dial_info_details();
|
||||||
changed = true;
|
changed = true;
|
||||||
}
|
}
|
||||||
RoutingDomainChange::ClearRelayNode => {
|
RoutingDomainChange::ClearRelayNode => {
|
||||||
debug!("[{:?}] cleared relay node", self.routing_domain);
|
debug!("[{:?}] cleared relay node", self.routing_domain);
|
||||||
detail.set_relay_node(None);
|
detail.common_mut().set_relay_node(None);
|
||||||
changed = true;
|
changed = true;
|
||||||
}
|
}
|
||||||
RoutingDomainChange::SetRelayNode { relay_node } => {
|
RoutingDomainChange::SetRelayNode { relay_node } => {
|
||||||
debug!("[{:?}] set relay node: {}", self.routing_domain, relay_node);
|
debug!("[{:?}] set relay node: {}", self.routing_domain, relay_node);
|
||||||
detail.set_relay_node(Some(relay_node));
|
detail.common_mut().set_relay_node(Some(relay_node));
|
||||||
changed = true;
|
changed = true;
|
||||||
}
|
}
|
||||||
RoutingDomainChange::AddDialInfoDetail { dial_info_detail } => {
|
RoutingDomainChange::AddDialInfoDetail { dial_info_detail } => {
|
||||||
@ -99,27 +134,85 @@ impl RoutingDomainEditor {
|
|||||||
"[{:?}] add dial info detail: {:?}",
|
"[{:?}] add dial info detail: {:?}",
|
||||||
self.routing_domain, dial_info_detail
|
self.routing_domain, dial_info_detail
|
||||||
);
|
);
|
||||||
detail.add_dial_info_detail(dial_info_detail.clone());
|
detail
|
||||||
|
.common_mut()
|
||||||
|
.add_dial_info_detail(dial_info_detail.clone());
|
||||||
|
|
||||||
info!(
|
info!(
|
||||||
"{:?} Dial Info: {}",
|
"{:?} Dial Info: {}@{}",
|
||||||
self.routing_domain,
|
self.routing_domain,
|
||||||
NodeDialInfo {
|
NodeId::new(node_id),
|
||||||
node_id: NodeId::new(node_id),
|
dial_info_detail.dial_info
|
||||||
dial_info: dial_info_detail.dial_info
|
|
||||||
}
|
|
||||||
.to_string(),
|
|
||||||
);
|
);
|
||||||
changed = true;
|
changed = true;
|
||||||
}
|
}
|
||||||
|
RoutingDomainChange::SetupNetwork {
|
||||||
|
outbound_protocols,
|
||||||
|
inbound_protocols,
|
||||||
|
address_types,
|
||||||
|
} => {
|
||||||
|
let old_outbound_protocols = detail.common().outbound_protocols();
|
||||||
|
let old_inbound_protocols = detail.common().inbound_protocols();
|
||||||
|
let old_address_types = detail.common().address_types();
|
||||||
|
|
||||||
|
let this_changed = old_outbound_protocols != outbound_protocols
|
||||||
|
|| old_inbound_protocols != inbound_protocols
|
||||||
|
|| old_address_types != address_types;
|
||||||
|
|
||||||
|
debug!(
|
||||||
|
"[{:?}] setup network: {:?} {:?} {:?}",
|
||||||
|
self.routing_domain,
|
||||||
|
outbound_protocols,
|
||||||
|
inbound_protocols,
|
||||||
|
address_types
|
||||||
|
);
|
||||||
|
|
||||||
|
detail.common_mut().setup_network(
|
||||||
|
outbound_protocols,
|
||||||
|
inbound_protocols,
|
||||||
|
address_types,
|
||||||
|
);
|
||||||
|
if this_changed {
|
||||||
|
changed = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
RoutingDomainChange::SetNetworkClass { network_class } => {
|
||||||
|
let old_network_class = detail.common().network_class();
|
||||||
|
|
||||||
|
let this_changed = old_network_class != network_class;
|
||||||
|
|
||||||
|
debug!(
|
||||||
|
"[{:?}] set network class: {:?}",
|
||||||
|
self.routing_domain, network_class,
|
||||||
|
);
|
||||||
|
|
||||||
|
detail.common_mut().set_network_class(network_class);
|
||||||
|
if this_changed {
|
||||||
|
changed = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if changed {
|
||||||
|
// Clear our 'peer info' cache, the peerinfo for this routing domain will get regenerated next time it is asked for
|
||||||
|
detail.common_mut().clear_cache()
|
||||||
|
}
|
||||||
});
|
});
|
||||||
if changed {
|
if changed {
|
||||||
RoutingTable::reset_all_seen_our_node_info(inner, self.routing_domain);
|
// Mark that nothing in the routing table has seen our new node info
|
||||||
RoutingTable::reset_all_updated_since_last_network_change(inner);
|
inner.reset_all_seen_our_node_info(self.routing_domain);
|
||||||
|
//
|
||||||
|
inner.reset_all_updated_since_last_network_change();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// Clear the routespecstore cache if our PublicInternet dial info has changed
|
||||||
|
if changed {
|
||||||
|
if self.routing_domain == RoutingDomain::PublicInternet {
|
||||||
|
let rss = self.routing_table.route_spec_store();
|
||||||
|
rss.reset();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Send our updated node info to all the nodes in the routing table
|
||||||
if changed && self.send_node_info_updates {
|
if changed && self.send_node_info_updates {
|
||||||
let network_manager = self.routing_table.unlocked_inner.network_manager.clone();
|
let network_manager = self.routing_table.unlocked_inner.network_manager.clone();
|
||||||
network_manager
|
network_manager
|
||||||
|
@ -1,62 +1,422 @@
|
|||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
|
/// Mechanism required to contact another node
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub enum ContactMethod {
|
||||||
|
/// Node is not reachable by any means
|
||||||
|
Unreachable,
|
||||||
|
/// Connection should have already existed
|
||||||
|
Existing,
|
||||||
|
/// Contact the node directly
|
||||||
|
Direct(DialInfo),
|
||||||
|
/// Request via signal the node connect back directly (relay, target)
|
||||||
|
SignalReverse(DHTKey, DHTKey),
|
||||||
|
/// Request via signal the node negotiate a hole punch (relay, target_node)
|
||||||
|
SignalHolePunch(DHTKey, DHTKey),
|
||||||
|
/// Must use an inbound relay to reach the node
|
||||||
|
InboundRelay(DHTKey),
|
||||||
|
/// Must use outbound relay to reach the node
|
||||||
|
OutboundRelay(DHTKey),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct RoutingDomainDetailCommon {
|
||||||
|
routing_domain: RoutingDomain,
|
||||||
|
network_class: Option<NetworkClass>,
|
||||||
|
outbound_protocols: ProtocolTypeSet,
|
||||||
|
inbound_protocols: ProtocolTypeSet,
|
||||||
|
address_types: AddressTypeSet,
|
||||||
|
relay_node: Option<NodeRef>,
|
||||||
|
dial_info_details: Vec<DialInfoDetail>,
|
||||||
|
// caches
|
||||||
|
cached_peer_info: Mutex<Option<PeerInfo>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RoutingDomainDetailCommon {
|
||||||
|
pub fn new(routing_domain: RoutingDomain) -> Self {
|
||||||
|
Self {
|
||||||
|
routing_domain,
|
||||||
|
network_class: Default::default(),
|
||||||
|
outbound_protocols: Default::default(),
|
||||||
|
inbound_protocols: Default::default(),
|
||||||
|
address_types: Default::default(),
|
||||||
|
relay_node: Default::default(),
|
||||||
|
dial_info_details: Default::default(),
|
||||||
|
cached_peer_info: Mutex::new(Default::default()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set from network manager
|
||||||
|
pub(super) fn setup_network(
|
||||||
|
&mut self,
|
||||||
|
outbound_protocols: ProtocolTypeSet,
|
||||||
|
inbound_protocols: ProtocolTypeSet,
|
||||||
|
address_types: AddressTypeSet,
|
||||||
|
) {
|
||||||
|
self.outbound_protocols = outbound_protocols;
|
||||||
|
self.inbound_protocols = inbound_protocols;
|
||||||
|
self.address_types = address_types;
|
||||||
|
self.clear_cache();
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn set_network_class(&mut self, network_class: Option<NetworkClass>) {
|
||||||
|
self.network_class = network_class;
|
||||||
|
self.clear_cache();
|
||||||
|
}
|
||||||
|
pub fn network_class(&self) -> Option<NetworkClass> {
|
||||||
|
self.network_class
|
||||||
|
}
|
||||||
|
pub fn outbound_protocols(&self) -> ProtocolTypeSet {
|
||||||
|
self.outbound_protocols
|
||||||
|
}
|
||||||
|
pub fn inbound_protocols(&self) -> ProtocolTypeSet {
|
||||||
|
self.inbound_protocols
|
||||||
|
}
|
||||||
|
pub fn address_types(&self) -> AddressTypeSet {
|
||||||
|
self.address_types
|
||||||
|
}
|
||||||
|
pub fn relay_node(&self) -> Option<NodeRef> {
|
||||||
|
self.relay_node.clone()
|
||||||
|
}
|
||||||
|
pub(super) fn set_relay_node(&mut self, opt_relay_node: Option<NodeRef>) {
|
||||||
|
self.relay_node = opt_relay_node.map(|nr| {
|
||||||
|
nr.filtered_clone(NodeRefFilter::new().with_routing_domain(self.routing_domain))
|
||||||
|
});
|
||||||
|
self.clear_cache();
|
||||||
|
}
|
||||||
|
pub fn dial_info_details(&self) -> &Vec<DialInfoDetail> {
|
||||||
|
&self.dial_info_details
|
||||||
|
}
|
||||||
|
pub(super) fn clear_dial_info_details(&mut self) {
|
||||||
|
self.dial_info_details.clear();
|
||||||
|
self.clear_cache();
|
||||||
|
}
|
||||||
|
pub(super) fn add_dial_info_detail(&mut self, did: DialInfoDetail) {
|
||||||
|
self.dial_info_details.push(did);
|
||||||
|
self.dial_info_details.sort();
|
||||||
|
self.clear_cache();
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn has_valid_own_node_info(&self) -> bool {
|
||||||
|
self.network_class.unwrap_or(NetworkClass::Invalid) != NetworkClass::Invalid
|
||||||
|
}
|
||||||
|
|
||||||
|
fn make_peer_info(&self, rti: &RoutingTableInner) -> PeerInfo {
|
||||||
|
let node_info = NodeInfo {
|
||||||
|
network_class: self.network_class.unwrap_or(NetworkClass::Invalid),
|
||||||
|
outbound_protocols: self.outbound_protocols,
|
||||||
|
address_types: self.address_types,
|
||||||
|
min_version: MIN_CRYPTO_VERSION,
|
||||||
|
max_version: MAX_CRYPTO_VERSION,
|
||||||
|
dial_info_detail_list: self.dial_info_details.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let relay_info = self
|
||||||
|
.relay_node
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|rn| {
|
||||||
|
let opt_relay_pi = rn.locked(rti).make_peer_info(self.routing_domain);
|
||||||
|
if let Some(relay_pi) = opt_relay_pi {
|
||||||
|
match relay_pi.signed_node_info {
|
||||||
|
SignedNodeInfo::Direct(d) => Some((relay_pi.node_id, d)),
|
||||||
|
SignedNodeInfo::Relayed(_) => {
|
||||||
|
warn!("relay node should not have a relay itself! if this happens, a relay updated its signed node info and became a relay, which should cause the relay to be dropped");
|
||||||
|
None
|
||||||
|
},
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let signed_node_info = match relay_info {
|
||||||
|
Some((relay_id, relay_sdni)) => SignedNodeInfo::Relayed(
|
||||||
|
SignedRelayedNodeInfo::with_secret(
|
||||||
|
NodeId::new(rti.unlocked_inner.node_id),
|
||||||
|
node_info,
|
||||||
|
relay_id,
|
||||||
|
relay_sdni,
|
||||||
|
&rti.unlocked_inner.node_id_secret,
|
||||||
|
)
|
||||||
|
.unwrap(),
|
||||||
|
),
|
||||||
|
None => SignedNodeInfo::Direct(
|
||||||
|
SignedDirectNodeInfo::with_secret(
|
||||||
|
NodeId::new(rti.unlocked_inner.node_id),
|
||||||
|
node_info,
|
||||||
|
&rti.unlocked_inner.node_id_secret,
|
||||||
|
)
|
||||||
|
.unwrap()
|
||||||
|
),
|
||||||
|
};
|
||||||
|
|
||||||
|
PeerInfo::new(NodeId::new(rti.unlocked_inner.node_id), signed_node_info)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn with_peer_info<F, R>(&self, rti: &RoutingTableInner, f: F) -> R
|
||||||
|
where
|
||||||
|
F: FnOnce(&PeerInfo) -> R,
|
||||||
|
{
|
||||||
|
let mut cpi = self.cached_peer_info.lock();
|
||||||
|
if cpi.is_none() {
|
||||||
|
// Regenerate peer info
|
||||||
|
let pi = self.make_peer_info(rti);
|
||||||
|
|
||||||
|
// Cache the peer info
|
||||||
|
*cpi = Some(pi);
|
||||||
|
}
|
||||||
|
f(cpi.as_ref().unwrap())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn inbound_dial_info_filter(&self) -> DialInfoFilter {
|
||||||
|
DialInfoFilter::all()
|
||||||
|
.with_protocol_type_set(self.inbound_protocols)
|
||||||
|
.with_address_type_set(self.address_types)
|
||||||
|
}
|
||||||
|
pub fn outbound_dial_info_filter(&self) -> DialInfoFilter {
|
||||||
|
DialInfoFilter::all()
|
||||||
|
.with_protocol_type_set(self.outbound_protocols)
|
||||||
|
.with_address_type_set(self.address_types)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn clear_cache(&self) {
|
||||||
|
*self.cached_peer_info.lock() = None;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// General trait for all routing domains
|
/// General trait for all routing domains
|
||||||
pub trait RoutingDomainDetail {
|
pub trait RoutingDomainDetail {
|
||||||
|
// Common accessors
|
||||||
|
fn common(&self) -> &RoutingDomainDetailCommon;
|
||||||
|
fn common_mut(&mut self) -> &mut RoutingDomainDetailCommon;
|
||||||
|
|
||||||
|
/// Can this routing domain contain a particular address
|
||||||
fn can_contain_address(&self, address: Address) -> bool;
|
fn can_contain_address(&self, address: Address) -> bool;
|
||||||
fn relay_node(&self) -> Option<NodeRef>;
|
|
||||||
fn set_relay_node(&mut self, opt_relay_node: Option<NodeRef>);
|
/// Get the contact method required for node A to reach node B in this routing domain
|
||||||
fn dial_info_details(&self) -> &Vec<DialInfoDetail>;
|
/// Routing table must be locked for reading to use this function
|
||||||
fn clear_dial_info_details(&mut self);
|
fn get_contact_method(
|
||||||
fn add_dial_info_detail(&mut self, did: DialInfoDetail);
|
&self,
|
||||||
|
rti: &RoutingTableInner,
|
||||||
|
peer_a: &PeerInfo,
|
||||||
|
peer_b: &PeerInfo,
|
||||||
|
dial_info_filter: DialInfoFilter,
|
||||||
|
sequencing: Sequencing,
|
||||||
|
) -> ContactMethod;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
/// Public Internet routing domain internals
|
/// Public Internet routing domain internals
|
||||||
#[derive(Debug, Default)]
|
#[derive(Debug)]
|
||||||
pub struct PublicInternetRoutingDomainDetail {
|
pub struct PublicInternetRoutingDomainDetail {
|
||||||
/// An optional node we relay through for this domain
|
/// Common implementation for all routing domains
|
||||||
relay_node: Option<NodeRef>,
|
common: RoutingDomainDetailCommon,
|
||||||
/// The dial infos on this domain we can be reached by
|
}
|
||||||
dial_info_details: Vec<DialInfoDetail>,
|
|
||||||
|
impl Default for PublicInternetRoutingDomainDetail {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
common: RoutingDomainDetailCommon::new(RoutingDomain::PublicInternet),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn first_filtered_dial_info_detail(
|
||||||
|
from_node: &NodeInfo,
|
||||||
|
to_node: &NodeInfo,
|
||||||
|
dial_info_filter: &DialInfoFilter,
|
||||||
|
sequencing: Sequencing,
|
||||||
|
) -> Option<DialInfoDetail> {
|
||||||
|
let dial_info_filter = dial_info_filter.clone().filtered(
|
||||||
|
&DialInfoFilter::all()
|
||||||
|
.with_address_type_set(from_node.address_types)
|
||||||
|
.with_protocol_type_set(from_node.outbound_protocols),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Get first filtered dialinfo
|
||||||
|
let (sort, dial_info_filter) = match sequencing {
|
||||||
|
Sequencing::NoPreference => (None, dial_info_filter),
|
||||||
|
Sequencing::PreferOrdered => (
|
||||||
|
Some(DialInfoDetail::ordered_sequencing_sort),
|
||||||
|
dial_info_filter,
|
||||||
|
),
|
||||||
|
Sequencing::EnsureOrdered => (
|
||||||
|
Some(DialInfoDetail::ordered_sequencing_sort),
|
||||||
|
dial_info_filter.filtered(
|
||||||
|
&DialInfoFilter::all().with_protocol_type_set(ProtocolType::all_ordered_set()),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
};
|
||||||
|
// If the filter is dead then we won't be able to connect
|
||||||
|
if dial_info_filter.is_dead() {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
let direct_filter = |did: &DialInfoDetail| did.matches_filter(&dial_info_filter);
|
||||||
|
|
||||||
|
// Get the best match dial info for node B if we have it
|
||||||
|
to_node.first_filtered_dial_info_detail(sort, direct_filter)
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RoutingDomainDetail for PublicInternetRoutingDomainDetail {
|
impl RoutingDomainDetail for PublicInternetRoutingDomainDetail {
|
||||||
|
fn common(&self) -> &RoutingDomainDetailCommon {
|
||||||
|
&self.common
|
||||||
|
}
|
||||||
|
fn common_mut(&mut self) -> &mut RoutingDomainDetailCommon {
|
||||||
|
&mut self.common
|
||||||
|
}
|
||||||
fn can_contain_address(&self, address: Address) -> bool {
|
fn can_contain_address(&self, address: Address) -> bool {
|
||||||
address.is_global()
|
address.is_global()
|
||||||
}
|
}
|
||||||
fn relay_node(&self) -> Option<NodeRef> {
|
fn get_contact_method(
|
||||||
self.relay_node.clone()
|
&self,
|
||||||
}
|
_rti: &RoutingTableInner,
|
||||||
fn set_relay_node(&mut self, opt_relay_node: Option<NodeRef>) {
|
peer_a: &PeerInfo,
|
||||||
self.relay_node = opt_relay_node.map(|nr| {
|
peer_b: &PeerInfo,
|
||||||
nr.filtered_clone(
|
dial_info_filter: DialInfoFilter,
|
||||||
NodeRefFilter::new().with_routing_domain(RoutingDomain::PublicInternet),
|
sequencing: Sequencing,
|
||||||
|
) -> ContactMethod {
|
||||||
|
// Get the nodeinfos for convenience
|
||||||
|
let node_a = peer_a.signed_node_info.node_info();
|
||||||
|
let node_b = peer_b.signed_node_info.node_info();
|
||||||
|
|
||||||
|
// Get the best match dial info for node B if we have it
|
||||||
|
if let Some(target_did) =
|
||||||
|
first_filtered_dial_info_detail(node_a, node_b, &dial_info_filter, sequencing)
|
||||||
|
{
|
||||||
|
// Do we need to signal before going inbound?
|
||||||
|
if !target_did.class.requires_signal() {
|
||||||
|
// Go direct without signaling
|
||||||
|
return ContactMethod::Direct(target_did.dial_info);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the target's inbound relay, it must have one or it is not reachable
|
||||||
|
if let Some(node_b_relay) = peer_b.signed_node_info.relay_info() {
|
||||||
|
let node_b_relay_id = peer_b.signed_node_info.relay_id().unwrap();
|
||||||
|
// Note that relay_peer_info could be node_a, in which case a connection already exists
|
||||||
|
// and we shouldn't have even gotten here
|
||||||
|
if node_b_relay_id.key == peer_a.node_id.key {
|
||||||
|
return ContactMethod::Existing;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Can node A reach the inbound relay directly?
|
||||||
|
if first_filtered_dial_info_detail(
|
||||||
|
node_a,
|
||||||
|
node_b_relay,
|
||||||
|
&dial_info_filter,
|
||||||
|
sequencing,
|
||||||
|
)
|
||||||
|
.is_some()
|
||||||
|
{
|
||||||
|
// Can node A receive anything inbound ever?
|
||||||
|
if matches!(node_a.network_class, NetworkClass::InboundCapable) {
|
||||||
|
///////// Reverse connection
|
||||||
|
|
||||||
|
// Get the best match dial info for an reverse inbound connection from node B to node A
|
||||||
|
if let Some(reverse_did) = first_filtered_dial_info_detail(
|
||||||
|
node_b,
|
||||||
|
node_a,
|
||||||
|
&dial_info_filter,
|
||||||
|
sequencing,
|
||||||
|
) {
|
||||||
|
// Ensure we aren't on the same public IP address (no hairpin nat)
|
||||||
|
if reverse_did.dial_info.to_ip_addr()
|
||||||
|
!= target_did.dial_info.to_ip_addr()
|
||||||
|
{
|
||||||
|
// Can we receive a direct reverse connection?
|
||||||
|
if !reverse_did.class.requires_signal() {
|
||||||
|
return ContactMethod::SignalReverse(
|
||||||
|
node_b_relay_id.key,
|
||||||
|
peer_b.node_id.key,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
///////// UDP hole-punch
|
||||||
|
|
||||||
|
// Does node B have a direct udp dialinfo node A can reach?
|
||||||
|
let udp_dial_info_filter = dial_info_filter
|
||||||
|
.clone()
|
||||||
|
.filtered(&DialInfoFilter::all().with_protocol_type(ProtocolType::UDP));
|
||||||
|
if let Some(target_udp_did) = first_filtered_dial_info_detail(
|
||||||
|
node_a,
|
||||||
|
node_b,
|
||||||
|
&udp_dial_info_filter,
|
||||||
|
sequencing,
|
||||||
|
) {
|
||||||
|
// Does node A have a direct udp dialinfo that node B can reach?
|
||||||
|
if let Some(reverse_udp_did) = first_filtered_dial_info_detail(
|
||||||
|
node_b,
|
||||||
|
node_a,
|
||||||
|
&udp_dial_info_filter,
|
||||||
|
sequencing,
|
||||||
|
) {
|
||||||
|
// Ensure we aren't on the same public IP address (no hairpin nat)
|
||||||
|
if reverse_udp_did.dial_info.to_ip_addr()
|
||||||
|
!= target_udp_did.dial_info.to_ip_addr()
|
||||||
|
{
|
||||||
|
// The target and ourselves have a udp dialinfo that they can reach
|
||||||
|
return ContactMethod::SignalHolePunch(
|
||||||
|
node_b_relay_id.key,
|
||||||
|
peer_a.node_id.key,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Otherwise we have to inbound relay
|
||||||
|
}
|
||||||
|
|
||||||
|
return ContactMethod::InboundRelay(node_b_relay_id.key);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// If the node B has no direct dial info, it needs to have an inbound relay
|
||||||
|
else if let Some(node_b_relay) = peer_b.signed_node_info.relay_info() {
|
||||||
|
let node_b_relay_id = peer_b.signed_node_info.relay_id().unwrap();
|
||||||
|
|
||||||
|
// Can we reach the full relay?
|
||||||
|
if first_filtered_dial_info_detail(
|
||||||
|
node_a,
|
||||||
|
&node_b_relay,
|
||||||
|
&dial_info_filter,
|
||||||
|
sequencing,
|
||||||
)
|
)
|
||||||
})
|
.is_some()
|
||||||
}
|
{
|
||||||
fn dial_info_details(&self) -> &Vec<DialInfoDetail> {
|
return ContactMethod::InboundRelay(node_b_relay_id.key);
|
||||||
&self.dial_info_details
|
}
|
||||||
}
|
}
|
||||||
fn clear_dial_info_details(&mut self) {
|
|
||||||
self.dial_info_details.clear();
|
// If node A can't reach the node by other means, it may need to use its own relay
|
||||||
}
|
if let Some(node_a_relay_id) = peer_a.signed_node_info.relay_id() {
|
||||||
fn add_dial_info_detail(&mut self, did: DialInfoDetail) {
|
return ContactMethod::OutboundRelay(node_a_relay_id.key);
|
||||||
self.dial_info_details.push(did);
|
}
|
||||||
self.dial_info_details.sort();
|
|
||||||
|
ContactMethod::Unreachable
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Local Network routing domain internals
|
/// Local Network routing domain internals
|
||||||
#[derive(Debug, Default)]
|
#[derive(Debug)]
|
||||||
pub struct LocalInternetRoutingDomainDetail {
|
pub struct LocalNetworkRoutingDomainDetail {
|
||||||
/// An optional node we relay through for this domain
|
|
||||||
relay_node: Option<NodeRef>,
|
|
||||||
/// The dial infos on this domain we can be reached by
|
|
||||||
dial_info_details: Vec<DialInfoDetail>,
|
|
||||||
/// The local networks this domain will communicate with
|
/// The local networks this domain will communicate with
|
||||||
local_networks: Vec<(IpAddr, IpAddr)>,
|
local_networks: Vec<(IpAddr, IpAddr)>,
|
||||||
|
/// Common implementation for all routing domains
|
||||||
|
common: RoutingDomainDetailCommon,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl LocalInternetRoutingDomainDetail {
|
impl Default for LocalNetworkRoutingDomainDetail {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
local_networks: Default::default(),
|
||||||
|
common: RoutingDomainDetailCommon::new(RoutingDomain::LocalNetwork),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LocalNetworkRoutingDomainDetail {
|
||||||
pub fn set_local_networks(&mut self, mut local_networks: Vec<(IpAddr, IpAddr)>) -> bool {
|
pub fn set_local_networks(&mut self, mut local_networks: Vec<(IpAddr, IpAddr)>) -> bool {
|
||||||
local_networks.sort();
|
local_networks.sort();
|
||||||
if local_networks == self.local_networks {
|
if local_networks == self.local_networks {
|
||||||
@ -67,7 +427,13 @@ impl LocalInternetRoutingDomainDetail {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RoutingDomainDetail for LocalInternetRoutingDomainDetail {
|
impl RoutingDomainDetail for LocalNetworkRoutingDomainDetail {
|
||||||
|
fn common(&self) -> &RoutingDomainDetailCommon {
|
||||||
|
&self.common
|
||||||
|
}
|
||||||
|
fn common_mut(&mut self) -> &mut RoutingDomainDetailCommon {
|
||||||
|
&mut self.common
|
||||||
|
}
|
||||||
fn can_contain_address(&self, address: Address) -> bool {
|
fn can_contain_address(&self, address: Address) -> bool {
|
||||||
let ip = address.to_ip_addr();
|
let ip = address.to_ip_addr();
|
||||||
for localnet in &self.local_networks {
|
for localnet in &self.local_networks {
|
||||||
@ -77,22 +443,48 @@ impl RoutingDomainDetail for LocalInternetRoutingDomainDetail {
|
|||||||
}
|
}
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
fn relay_node(&self) -> Option<NodeRef> {
|
|
||||||
self.relay_node.clone()
|
fn get_contact_method(
|
||||||
}
|
&self,
|
||||||
fn set_relay_node(&mut self, opt_relay_node: Option<NodeRef>) {
|
_rti: &RoutingTableInner,
|
||||||
self.relay_node = opt_relay_node.map(|nr| {
|
peer_a: &PeerInfo,
|
||||||
nr.filtered_clone(NodeRefFilter::new().with_routing_domain(RoutingDomain::LocalNetwork))
|
peer_b: &PeerInfo,
|
||||||
});
|
dial_info_filter: DialInfoFilter,
|
||||||
}
|
sequencing: Sequencing,
|
||||||
fn dial_info_details(&self) -> &Vec<DialInfoDetail> {
|
) -> ContactMethod {
|
||||||
&self.dial_info_details
|
// Scope the filter down to protocols node A can do outbound
|
||||||
}
|
let dial_info_filter = dial_info_filter.filtered(
|
||||||
fn clear_dial_info_details(&mut self) {
|
&DialInfoFilter::all()
|
||||||
self.dial_info_details.clear();
|
.with_address_type_set(peer_a.signed_node_info.node_info().address_types)
|
||||||
}
|
.with_protocol_type_set(peer_a.signed_node_info.node_info().outbound_protocols),
|
||||||
fn add_dial_info_detail(&mut self, did: DialInfoDetail) {
|
);
|
||||||
self.dial_info_details.push(did);
|
|
||||||
self.dial_info_details.sort();
|
// Get first filtered dialinfo
|
||||||
|
let (sort, dial_info_filter) = match sequencing {
|
||||||
|
Sequencing::NoPreference => (None, dial_info_filter),
|
||||||
|
Sequencing::PreferOrdered => (
|
||||||
|
Some(DialInfoDetail::ordered_sequencing_sort),
|
||||||
|
dial_info_filter,
|
||||||
|
),
|
||||||
|
Sequencing::EnsureOrdered => (
|
||||||
|
Some(DialInfoDetail::ordered_sequencing_sort),
|
||||||
|
dial_info_filter.filtered(
|
||||||
|
&DialInfoFilter::all().with_protocol_type_set(ProtocolType::all_ordered_set()),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
};
|
||||||
|
// If the filter is dead then we won't be able to connect
|
||||||
|
if dial_info_filter.is_dead() {
|
||||||
|
return ContactMethod::Unreachable;
|
||||||
|
}
|
||||||
|
|
||||||
|
let filter = |did: &DialInfoDetail| did.matches_filter(&dial_info_filter);
|
||||||
|
|
||||||
|
let opt_target_did = peer_b.signed_node_info.node_info().first_filtered_dial_info_detail(sort, filter);
|
||||||
|
if let Some(target_did) = opt_target_did {
|
||||||
|
return ContactMethod::Direct(target_did.dial_info);
|
||||||
|
}
|
||||||
|
|
||||||
|
ContactMethod::Unreachable
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
999
veilid-core/src/routing_table/routing_table_inner.rs
Normal file
999
veilid-core/src/routing_table/routing_table_inner.rs
Normal file
@ -0,0 +1,999 @@
|
|||||||
|
use super::*;
|
||||||
|
|
||||||
|
const RECENT_PEERS_TABLE_SIZE: usize = 64;
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Copy)]
|
||||||
|
pub struct RecentPeersEntry {
|
||||||
|
pub last_connection: ConnectionDescriptor,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// RoutingTable rwlock-internal data
|
||||||
|
pub struct RoutingTableInner {
|
||||||
|
/// Extra pointer to unlocked members to simplify access
|
||||||
|
pub(super) unlocked_inner: Arc<RoutingTableUnlockedInner>,
|
||||||
|
/// Routing table buckets that hold entries
|
||||||
|
pub(super) buckets: Vec<Bucket>,
|
||||||
|
/// A fast counter for the number of entries in the table, total
|
||||||
|
pub(super) bucket_entry_count: usize,
|
||||||
|
/// The public internet routing domain
|
||||||
|
pub(super) public_internet_routing_domain: PublicInternetRoutingDomainDetail,
|
||||||
|
/// The dial info we use on the local network
|
||||||
|
pub(super) local_network_routing_domain: LocalNetworkRoutingDomainDetail,
|
||||||
|
/// Interim accounting mechanism for this node's RPC latency to any other node
|
||||||
|
pub(super) self_latency_stats_accounting: LatencyStatsAccounting,
|
||||||
|
/// Interim accounting mechanism for the total bandwidth to/from this node
|
||||||
|
pub(super) self_transfer_stats_accounting: TransferStatsAccounting,
|
||||||
|
/// Statistics about the total bandwidth to/from this node
|
||||||
|
pub(super) self_transfer_stats: TransferStatsDownUp,
|
||||||
|
/// Peers we have recently communicated with
|
||||||
|
pub(super) recent_peers: LruCache<DHTKey, RecentPeersEntry>,
|
||||||
|
/// Storage for private/safety RouteSpecs
|
||||||
|
pub(super) route_spec_store: Option<RouteSpecStore>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RoutingTableInner {
|
||||||
|
pub(super) fn new(unlocked_inner: Arc<RoutingTableUnlockedInner>) -> RoutingTableInner {
|
||||||
|
RoutingTableInner {
|
||||||
|
unlocked_inner,
|
||||||
|
buckets: Vec::new(),
|
||||||
|
public_internet_routing_domain: PublicInternetRoutingDomainDetail::default(),
|
||||||
|
local_network_routing_domain: LocalNetworkRoutingDomainDetail::default(),
|
||||||
|
bucket_entry_count: 0,
|
||||||
|
self_latency_stats_accounting: LatencyStatsAccounting::new(),
|
||||||
|
self_transfer_stats_accounting: TransferStatsAccounting::new(),
|
||||||
|
self_transfer_stats: TransferStatsDownUp::default(),
|
||||||
|
recent_peers: LruCache::new(RECENT_PEERS_TABLE_SIZE),
|
||||||
|
route_spec_store: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn network_manager(&self) -> NetworkManager {
|
||||||
|
self.unlocked_inner.network_manager.clone()
|
||||||
|
}
|
||||||
|
pub fn rpc_processor(&self) -> RPCProcessor {
|
||||||
|
self.network_manager().rpc_processor()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn node_id(&self) -> DHTKey {
|
||||||
|
self.unlocked_inner.node_id
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn node_id_secret(&self) -> DHTKeySecret {
|
||||||
|
self.unlocked_inner.node_id_secret
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn config(&self) -> VeilidConfig {
|
||||||
|
self.unlocked_inner.config.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn transfer_stats_accounting(&mut self) -> &mut TransferStatsAccounting {
|
||||||
|
&mut self.self_transfer_stats_accounting
|
||||||
|
}
|
||||||
|
pub fn latency_stats_accounting(&mut self) -> &mut LatencyStatsAccounting {
|
||||||
|
&mut self.self_latency_stats_accounting
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn routing_domain_for_address(&self, address: Address) -> Option<RoutingDomain> {
|
||||||
|
for rd in RoutingDomain::all() {
|
||||||
|
let can_contain = self.with_routing_domain(rd, |rdd| rdd.can_contain_address(address));
|
||||||
|
if can_contain {
|
||||||
|
return Some(rd);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn with_routing_domain<F, R>(&self, domain: RoutingDomain, f: F) -> R
|
||||||
|
where
|
||||||
|
F: FnOnce(&dyn RoutingDomainDetail) -> R,
|
||||||
|
{
|
||||||
|
match domain {
|
||||||
|
RoutingDomain::PublicInternet => f(&self.public_internet_routing_domain),
|
||||||
|
RoutingDomain::LocalNetwork => f(&self.local_network_routing_domain),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn with_routing_domain_mut<F, R>(&mut self, domain: RoutingDomain, f: F) -> R
|
||||||
|
where
|
||||||
|
F: FnOnce(&mut dyn RoutingDomainDetail) -> R,
|
||||||
|
{
|
||||||
|
match domain {
|
||||||
|
RoutingDomain::PublicInternet => f(&mut self.public_internet_routing_domain),
|
||||||
|
RoutingDomain::LocalNetwork => f(&mut self.local_network_routing_domain),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn relay_node(&self, domain: RoutingDomain) -> Option<NodeRef> {
|
||||||
|
self.with_routing_domain(domain, |rd| rd.common().relay_node())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn has_dial_info(&self, domain: RoutingDomain) -> bool {
|
||||||
|
self.with_routing_domain(domain, |rd| !rd.common().dial_info_details().is_empty())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn dial_info_details(&self, domain: RoutingDomain) -> Vec<DialInfoDetail> {
|
||||||
|
self.with_routing_domain(domain, |rd| rd.common().dial_info_details().clone())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn first_filtered_dial_info_detail(
|
||||||
|
&self,
|
||||||
|
routing_domain_set: RoutingDomainSet,
|
||||||
|
filter: &DialInfoFilter,
|
||||||
|
) -> Option<DialInfoDetail> {
|
||||||
|
for routing_domain in routing_domain_set {
|
||||||
|
let did = self.with_routing_domain(routing_domain, |rd| {
|
||||||
|
for did in rd.common().dial_info_details() {
|
||||||
|
if did.matches_filter(filter) {
|
||||||
|
return Some(did.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
});
|
||||||
|
if did.is_some() {
|
||||||
|
return did;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn all_filtered_dial_info_details(
|
||||||
|
&self,
|
||||||
|
routing_domain_set: RoutingDomainSet,
|
||||||
|
filter: &DialInfoFilter,
|
||||||
|
) -> Vec<DialInfoDetail> {
|
||||||
|
let mut ret = Vec::new();
|
||||||
|
for routing_domain in routing_domain_set {
|
||||||
|
self.with_routing_domain(routing_domain, |rd| {
|
||||||
|
for did in rd.common().dial_info_details() {
|
||||||
|
if did.matches_filter(filter) {
|
||||||
|
ret.push(did.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
ret.remove_duplicates();
|
||||||
|
ret
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn ensure_dial_info_is_valid(&self, domain: RoutingDomain, dial_info: &DialInfo) -> bool {
|
||||||
|
let address = dial_info.socket_address().address();
|
||||||
|
let can_contain_address =
|
||||||
|
self.with_routing_domain(domain, |rd| rd.can_contain_address(address));
|
||||||
|
|
||||||
|
if !can_contain_address {
|
||||||
|
log_rtab!(debug "can not add dial info to this routing domain");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if !dial_info.is_valid() {
|
||||||
|
log_rtab!(debug
|
||||||
|
"shouldn't be registering invalid addresses: {:?}",
|
||||||
|
dial_info
|
||||||
|
);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn node_info_is_valid_in_routing_domain(
|
||||||
|
&self,
|
||||||
|
routing_domain: RoutingDomain,
|
||||||
|
node_info: &NodeInfo,
|
||||||
|
) -> bool {
|
||||||
|
// Should not be passing around nodeinfo with an invalid network class
|
||||||
|
if matches!(node_info.network_class, NetworkClass::Invalid) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
// Ensure all of the dial info works in this routing domain
|
||||||
|
for did in &node_info.dial_info_detail_list {
|
||||||
|
if !self.ensure_dial_info_is_valid(routing_domain, &did.dial_info) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn signed_node_info_is_valid_in_routing_domain(
|
||||||
|
&self,
|
||||||
|
routing_domain: RoutingDomain,
|
||||||
|
signed_node_info: &SignedNodeInfo,
|
||||||
|
) -> bool {
|
||||||
|
if !self.node_info_is_valid_in_routing_domain(routing_domain, signed_node_info.node_info())
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
// Ensure the relay is also valid in this routing domain if it is provided
|
||||||
|
if let Some(relay_ni) = signed_node_info.relay_info() {
|
||||||
|
if !self.node_info_is_valid_in_routing_domain(routing_domain, relay_ni) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "trace", skip(self), ret)]
|
||||||
|
pub fn get_contact_method(
|
||||||
|
&self,
|
||||||
|
routing_domain: RoutingDomain,
|
||||||
|
peer_a: &PeerInfo,
|
||||||
|
peer_b: &PeerInfo,
|
||||||
|
dial_info_filter: DialInfoFilter,
|
||||||
|
sequencing: Sequencing,
|
||||||
|
) -> ContactMethod {
|
||||||
|
self.with_routing_domain(routing_domain, |rdd| {
|
||||||
|
rdd.get_contact_method(self, peer_a, peer_b, dial_info_filter, sequencing)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn reset_all_seen_our_node_info(&mut self, routing_domain: RoutingDomain) {
|
||||||
|
let cur_ts = intf::get_timestamp();
|
||||||
|
self.with_entries_mut(cur_ts, BucketEntryState::Dead, |rti, _, v| {
|
||||||
|
v.with_mut(rti, |_rti, e| {
|
||||||
|
e.set_seen_our_node_info(routing_domain, false);
|
||||||
|
});
|
||||||
|
Option::<()>::None
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn reset_all_updated_since_last_network_change(&mut self) {
|
||||||
|
let cur_ts = intf::get_timestamp();
|
||||||
|
self.with_entries_mut(cur_ts, BucketEntryState::Dead, |rti, _, v| {
|
||||||
|
v.with_mut(rti, |_rti, e| {
|
||||||
|
e.set_updated_since_last_network_change(false)
|
||||||
|
});
|
||||||
|
Option::<()>::None
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return a copy of our node's peerinfo
|
||||||
|
pub fn get_own_peer_info(&self, routing_domain: RoutingDomain) -> PeerInfo {
|
||||||
|
self.with_routing_domain(routing_domain, |rdd| {
|
||||||
|
rdd.common().with_peer_info(self, |pi| pi.clone())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return our currently registered network class
|
||||||
|
pub fn has_valid_own_node_info(&self, routing_domain: RoutingDomain) -> bool {
|
||||||
|
self.with_routing_domain(routing_domain, |rdd| rdd.common().has_valid_own_node_info())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the domain's currently registered network class
|
||||||
|
pub fn get_network_class(&self, routing_domain: RoutingDomain) -> Option<NetworkClass> {
|
||||||
|
self.with_routing_domain(routing_domain, |rdd| rdd.common().network_class())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the domain's filter for what we can receivein the form of a dial info filter
|
||||||
|
pub fn get_inbound_dial_info_filter(&self, routing_domain: RoutingDomain) -> DialInfoFilter {
|
||||||
|
self.with_routing_domain(routing_domain, |rdd| {
|
||||||
|
rdd.common().inbound_dial_info_filter()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the domain's filter for what we can receive in the form of a node ref filter
|
||||||
|
pub fn get_inbound_node_ref_filter(&self, routing_domain: RoutingDomain) -> NodeRefFilter {
|
||||||
|
let dif = self.get_inbound_dial_info_filter(routing_domain);
|
||||||
|
NodeRefFilter::new()
|
||||||
|
.with_routing_domain(routing_domain)
|
||||||
|
.with_dial_info_filter(dif)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the domain's filter for what we can send out in the form of a dial info filter
|
||||||
|
pub fn get_outbound_dial_info_filter(&self, routing_domain: RoutingDomain) -> DialInfoFilter {
|
||||||
|
self.with_routing_domain(routing_domain, |rdd| {
|
||||||
|
rdd.common().outbound_dial_info_filter()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
/// Return the domain's filter for what we can receive in the form of a node ref filter
|
||||||
|
pub fn get_outbound_node_ref_filter(&self, routing_domain: RoutingDomain) -> NodeRefFilter {
|
||||||
|
let dif = self.get_outbound_dial_info_filter(routing_domain);
|
||||||
|
NodeRefFilter::new()
|
||||||
|
.with_routing_domain(routing_domain)
|
||||||
|
.with_dial_info_filter(dif)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn bucket_depth(index: usize) -> usize {
|
||||||
|
match index {
|
||||||
|
0 => 256,
|
||||||
|
1 => 128,
|
||||||
|
2 => 64,
|
||||||
|
3 => 32,
|
||||||
|
4 => 16,
|
||||||
|
5 => 8,
|
||||||
|
6 => 4,
|
||||||
|
7 => 4,
|
||||||
|
8 => 4,
|
||||||
|
9 => 4,
|
||||||
|
_ => 4,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn init_buckets(&mut self, routing_table: RoutingTable) {
|
||||||
|
// Size the buckets (one per bit)
|
||||||
|
self.buckets.clear();
|
||||||
|
self.buckets.reserve(DHT_KEY_LENGTH * 8);
|
||||||
|
for _ in 0..DHT_KEY_LENGTH * 8 {
|
||||||
|
let bucket = Bucket::new(routing_table.clone());
|
||||||
|
self.buckets.push(bucket);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn configure_local_network_routing_domain(
|
||||||
|
&mut self,
|
||||||
|
local_networks: Vec<(IpAddr, IpAddr)>,
|
||||||
|
) {
|
||||||
|
log_net!(debug "configure_local_network_routing_domain: {:#?}", local_networks);
|
||||||
|
|
||||||
|
let changed = self
|
||||||
|
.local_network_routing_domain
|
||||||
|
.set_local_networks(local_networks);
|
||||||
|
|
||||||
|
// If the local network topology has changed, nuke the existing local node info and let new local discovery happen
|
||||||
|
if changed {
|
||||||
|
let cur_ts = intf::get_timestamp();
|
||||||
|
self.with_entries_mut(cur_ts, BucketEntryState::Dead, |rti, _, e| {
|
||||||
|
e.with_mut(rti, |_rti, e| {
|
||||||
|
e.clear_signed_node_info(RoutingDomain::LocalNetwork);
|
||||||
|
e.set_seen_our_node_info(RoutingDomain::LocalNetwork, false);
|
||||||
|
e.set_updated_since_last_network_change(false);
|
||||||
|
});
|
||||||
|
Option::<()>::None
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Attempt to empty the routing table
|
||||||
|
/// should only be performed when there are no node_refs (detached)
|
||||||
|
pub fn purge_buckets(&mut self) {
|
||||||
|
log_rtab!(
|
||||||
|
"Starting routing table buckets purge. Table currently has {} nodes",
|
||||||
|
self.bucket_entry_count
|
||||||
|
);
|
||||||
|
for bucket in &mut self.buckets {
|
||||||
|
bucket.kick(0);
|
||||||
|
}
|
||||||
|
log_rtab!(debug
|
||||||
|
"Routing table buckets purge complete. Routing table now has {} nodes",
|
||||||
|
self.bucket_entry_count
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Attempt to remove last_connections from entries
|
||||||
|
pub fn purge_last_connections(&mut self) {
|
||||||
|
log_rtab!(
|
||||||
|
"Starting routing table last_connections purge. Table currently has {} nodes",
|
||||||
|
self.bucket_entry_count
|
||||||
|
);
|
||||||
|
for bucket in &self.buckets {
|
||||||
|
for entry in bucket.entries() {
|
||||||
|
entry.1.with_mut_inner(|e| {
|
||||||
|
e.clear_last_connections();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log_rtab!(debug
|
||||||
|
"Routing table last_connections purge complete. Routing table now has {} nodes",
|
||||||
|
self.bucket_entry_count
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Attempt to settle buckets and remove entries down to the desired number
|
||||||
|
/// which may not be possible due extant NodeRefs
|
||||||
|
pub fn kick_bucket(&mut self, idx: usize) {
|
||||||
|
let bucket = &mut self.buckets[idx];
|
||||||
|
let bucket_depth = Self::bucket_depth(idx);
|
||||||
|
|
||||||
|
if let Some(dead_node_ids) = bucket.kick(bucket_depth) {
|
||||||
|
// Remove counts
|
||||||
|
self.bucket_entry_count -= dead_node_ids.len();
|
||||||
|
log_rtab!(debug "Routing table now has {} nodes", self.bucket_entry_count);
|
||||||
|
|
||||||
|
// Now purge the routing table inner vectors
|
||||||
|
//let filter = |k: &DHTKey| dead_node_ids.contains(k);
|
||||||
|
//inner.closest_reliable_nodes.retain(filter);
|
||||||
|
//inner.fastest_reliable_nodes.retain(filter);
|
||||||
|
//inner.closest_nodes.retain(filter);
|
||||||
|
//inner.fastest_nodes.retain(filter);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn find_bucket_index(&self, node_id: DHTKey) -> usize {
|
||||||
|
distance(&node_id, &self.unlocked_inner.node_id)
|
||||||
|
.first_nonzero_bit()
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_entry_count(
|
||||||
|
&self,
|
||||||
|
routing_domain_set: RoutingDomainSet,
|
||||||
|
min_state: BucketEntryState,
|
||||||
|
) -> usize {
|
||||||
|
let mut count = 0usize;
|
||||||
|
let cur_ts = intf::get_timestamp();
|
||||||
|
self.with_entries(cur_ts, min_state, |rti, _, e| {
|
||||||
|
if e.with(rti, |_rti, e| e.best_routing_domain(routing_domain_set))
|
||||||
|
.is_some()
|
||||||
|
{
|
||||||
|
count += 1;
|
||||||
|
}
|
||||||
|
Option::<()>::None
|
||||||
|
});
|
||||||
|
count
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn with_entries<T, F: FnMut(&RoutingTableInner, DHTKey, Arc<BucketEntry>) -> Option<T>>(
|
||||||
|
&self,
|
||||||
|
cur_ts: u64,
|
||||||
|
min_state: BucketEntryState,
|
||||||
|
mut f: F,
|
||||||
|
) -> Option<T> {
|
||||||
|
let mut entryvec = Vec::with_capacity(self.bucket_entry_count);
|
||||||
|
for bucket in &self.buckets {
|
||||||
|
for entry in bucket.entries() {
|
||||||
|
if entry.1.with(self, |_rti, e| e.state(cur_ts) >= min_state) {
|
||||||
|
entryvec.push((*entry.0, entry.1.clone()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for entry in entryvec {
|
||||||
|
if let Some(out) = f(self, entry.0, entry.1) {
|
||||||
|
return Some(out);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn with_entries_mut<
|
||||||
|
T,
|
||||||
|
F: FnMut(&mut RoutingTableInner, DHTKey, Arc<BucketEntry>) -> Option<T>,
|
||||||
|
>(
|
||||||
|
&mut self,
|
||||||
|
cur_ts: u64,
|
||||||
|
min_state: BucketEntryState,
|
||||||
|
mut f: F,
|
||||||
|
) -> Option<T> {
|
||||||
|
let mut entryvec = Vec::with_capacity(self.bucket_entry_count);
|
||||||
|
for bucket in &self.buckets {
|
||||||
|
for entry in bucket.entries() {
|
||||||
|
if entry.1.with(self, |_rti, e| e.state(cur_ts) >= min_state) {
|
||||||
|
entryvec.push((*entry.0, entry.1.clone()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for entry in entryvec {
|
||||||
|
if let Some(out) = f(self, entry.0, entry.1) {
|
||||||
|
return Some(out);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_nodes_needing_updates(
|
||||||
|
&self,
|
||||||
|
outer_self: RoutingTable,
|
||||||
|
routing_domain: RoutingDomain,
|
||||||
|
cur_ts: u64,
|
||||||
|
all: bool,
|
||||||
|
) -> Vec<NodeRef> {
|
||||||
|
let mut node_refs = Vec::<NodeRef>::with_capacity(self.bucket_entry_count);
|
||||||
|
self.with_entries(cur_ts, BucketEntryState::Unreliable, |rti, k, v| {
|
||||||
|
// Only update nodes that haven't seen our node info yet
|
||||||
|
if all || !v.with(rti, |_rti, e| e.has_seen_our_node_info(routing_domain)) {
|
||||||
|
node_refs.push(NodeRef::new(
|
||||||
|
outer_self.clone(),
|
||||||
|
k,
|
||||||
|
v,
|
||||||
|
Some(NodeRefFilter::new().with_routing_domain(routing_domain)),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
Option::<()>::None
|
||||||
|
});
|
||||||
|
node_refs
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_nodes_needing_ping(
|
||||||
|
&self,
|
||||||
|
outer_self: RoutingTable,
|
||||||
|
routing_domain: RoutingDomain,
|
||||||
|
cur_ts: u64,
|
||||||
|
) -> Vec<NodeRef> {
|
||||||
|
// Collect relay nodes
|
||||||
|
let opt_relay_id = self.with_routing_domain(routing_domain, |rd| {
|
||||||
|
rd.common().relay_node().map(|rn| rn.node_id())
|
||||||
|
});
|
||||||
|
|
||||||
|
// Collect all entries that are 'needs_ping' and have some node info making them reachable somehow
|
||||||
|
let mut node_refs = Vec::<NodeRef>::with_capacity(self.bucket_entry_count);
|
||||||
|
self.with_entries(cur_ts, BucketEntryState::Unreliable, |rti, k, v| {
|
||||||
|
if v.with(rti, |_rti, e| {
|
||||||
|
e.has_node_info(routing_domain.into())
|
||||||
|
&& e.needs_ping(cur_ts, opt_relay_id == Some(k))
|
||||||
|
}) {
|
||||||
|
node_refs.push(NodeRef::new(
|
||||||
|
outer_self.clone(),
|
||||||
|
k,
|
||||||
|
v,
|
||||||
|
Some(NodeRefFilter::new().with_routing_domain(routing_domain)),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
Option::<()>::None
|
||||||
|
});
|
||||||
|
node_refs
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_all_nodes(&self, outer_self: RoutingTable, cur_ts: u64) -> Vec<NodeRef> {
|
||||||
|
let mut node_refs = Vec::<NodeRef>::with_capacity(self.bucket_entry_count);
|
||||||
|
self.with_entries(cur_ts, BucketEntryState::Unreliable, |_rti, k, v| {
|
||||||
|
node_refs.push(NodeRef::new(outer_self.clone(), k, v, None));
|
||||||
|
Option::<()>::None
|
||||||
|
});
|
||||||
|
node_refs
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a node reference, possibly creating a bucket entry
|
||||||
|
/// the 'update_func' closure is called on the node, and, if created,
|
||||||
|
/// in a locked fashion as to ensure the bucket entry state is always valid
|
||||||
|
pub fn create_node_ref<F>(
|
||||||
|
&mut self,
|
||||||
|
outer_self: RoutingTable,
|
||||||
|
node_id: DHTKey,
|
||||||
|
update_func: F,
|
||||||
|
) -> Option<NodeRef>
|
||||||
|
where
|
||||||
|
F: FnOnce(&mut RoutingTableInner, &mut BucketEntryInner),
|
||||||
|
{
|
||||||
|
// Ensure someone isn't trying register this node itself
|
||||||
|
if node_id == self.node_id() {
|
||||||
|
log_rtab!(debug "can't register own node");
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Look up existing entry
|
||||||
|
let idx = self.find_bucket_index(node_id);
|
||||||
|
let noderef = {
|
||||||
|
let bucket = &self.buckets[idx];
|
||||||
|
let entry = bucket.entry(&node_id);
|
||||||
|
entry.map(|e| NodeRef::new(outer_self.clone(), node_id, e, None))
|
||||||
|
};
|
||||||
|
|
||||||
|
// If one doesn't exist, insert into bucket, possibly evicting a bucket member
|
||||||
|
let noderef = match noderef {
|
||||||
|
None => {
|
||||||
|
// Make new entry
|
||||||
|
self.bucket_entry_count += 1;
|
||||||
|
let cnt = self.bucket_entry_count;
|
||||||
|
let bucket = &mut self.buckets[idx];
|
||||||
|
let nr = bucket.add_entry(node_id);
|
||||||
|
|
||||||
|
// Update the entry
|
||||||
|
let entry = bucket.entry(&node_id).unwrap();
|
||||||
|
entry.with_mut(self, update_func);
|
||||||
|
|
||||||
|
// Kick the bucket
|
||||||
|
self.unlocked_inner.kick_queue.lock().insert(idx);
|
||||||
|
log_rtab!(debug "Routing table now has {} nodes, {} live", cnt, self.get_entry_count(RoutingDomainSet::all(), BucketEntryState::Unreliable));
|
||||||
|
|
||||||
|
nr
|
||||||
|
}
|
||||||
|
Some(nr) => {
|
||||||
|
// Update the entry
|
||||||
|
let bucket = &mut self.buckets[idx];
|
||||||
|
let entry = bucket.entry(&node_id).unwrap();
|
||||||
|
entry.with_mut(self, update_func);
|
||||||
|
|
||||||
|
nr
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Some(noderef)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Resolve an existing routing table entry and return a reference to it
|
||||||
|
pub fn lookup_node_ref(&self, outer_self: RoutingTable, node_id: DHTKey) -> Option<NodeRef> {
|
||||||
|
if node_id == self.unlocked_inner.node_id {
|
||||||
|
log_rtab!(error "can't look up own node id in routing table");
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
let idx = self.find_bucket_index(node_id);
|
||||||
|
let bucket = &self.buckets[idx];
|
||||||
|
bucket
|
||||||
|
.entry(&node_id)
|
||||||
|
.map(|e| NodeRef::new(outer_self, node_id, e, None))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Resolve an existing routing table entry and return a filtered reference to it
|
||||||
|
pub fn lookup_and_filter_noderef(
|
||||||
|
&self,
|
||||||
|
outer_self: RoutingTable,
|
||||||
|
node_id: DHTKey,
|
||||||
|
routing_domain_set: RoutingDomainSet,
|
||||||
|
dial_info_filter: DialInfoFilter,
|
||||||
|
) -> Option<NodeRef> {
|
||||||
|
let nr = self.lookup_node_ref(outer_self, node_id)?;
|
||||||
|
Some(
|
||||||
|
nr.filtered_clone(
|
||||||
|
NodeRefFilter::new()
|
||||||
|
.with_dial_info_filter(dial_info_filter)
|
||||||
|
.with_routing_domain_set(routing_domain_set),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Resolve an existing routing table entry and call a function on its entry without using a noderef
|
||||||
|
pub fn with_node_entry<F, R>(&self, node_id: DHTKey, f: F) -> Option<R>
|
||||||
|
where
|
||||||
|
F: FnOnce(Arc<BucketEntry>) -> R,
|
||||||
|
{
|
||||||
|
if node_id == self.unlocked_inner.node_id {
|
||||||
|
log_rtab!(error "can't look up own node id in routing table");
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
let idx = self.find_bucket_index(node_id);
|
||||||
|
let bucket = &self.buckets[idx];
|
||||||
|
if let Some(e) = bucket.entry(&node_id) {
|
||||||
|
return Some(f(e));
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Shortcut function to add a node to our routing table if it doesn't exist
|
||||||
|
/// and add the dial info we have for it. Returns a noderef filtered to
|
||||||
|
/// the routing domain in which this node was registered for convenience.
|
||||||
|
pub fn register_node_with_signed_node_info(
|
||||||
|
&mut self,
|
||||||
|
outer_self: RoutingTable,
|
||||||
|
routing_domain: RoutingDomain,
|
||||||
|
node_id: DHTKey,
|
||||||
|
signed_node_info: SignedNodeInfo,
|
||||||
|
allow_invalid: bool,
|
||||||
|
) -> Option<NodeRef> {
|
||||||
|
// validate signed node info is not something malicious
|
||||||
|
if node_id == self.node_id() {
|
||||||
|
log_rtab!(debug "can't register own node id in routing table");
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
if let Some(relay_id) = signed_node_info.relay_id() {
|
||||||
|
if relay_id.key == node_id {
|
||||||
|
log_rtab!(debug "node can not be its own relay");
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !allow_invalid {
|
||||||
|
// verify signature
|
||||||
|
if !signed_node_info.has_valid_signature() {
|
||||||
|
log_rtab!(debug "signed node info for {} has invalid signature", node_id);
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
// verify signed node info is valid in this routing domain
|
||||||
|
if !self.signed_node_info_is_valid_in_routing_domain(routing_domain, &signed_node_info)
|
||||||
|
{
|
||||||
|
log_rtab!(debug "signed node info for {} not valid in the {:?} routing domain", node_id, routing_domain);
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
self.create_node_ref(outer_self, node_id, |_rti, e| {
|
||||||
|
e.update_signed_node_info(routing_domain, signed_node_info);
|
||||||
|
})
|
||||||
|
.map(|mut nr| {
|
||||||
|
nr.set_filter(Some(
|
||||||
|
NodeRefFilter::new().with_routing_domain(routing_domain),
|
||||||
|
));
|
||||||
|
nr
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Shortcut function to add a node to our routing table if it doesn't exist
|
||||||
|
/// and add the last peer address we have for it, since that's pretty common
|
||||||
|
pub fn register_node_with_existing_connection(
|
||||||
|
&mut self,
|
||||||
|
outer_self: RoutingTable,
|
||||||
|
node_id: DHTKey,
|
||||||
|
descriptor: ConnectionDescriptor,
|
||||||
|
timestamp: u64,
|
||||||
|
) -> Option<NodeRef> {
|
||||||
|
let out = self.create_node_ref(outer_self, node_id, |_rti, e| {
|
||||||
|
// this node is live because it literally just connected to us
|
||||||
|
e.touch_last_seen(timestamp);
|
||||||
|
});
|
||||||
|
if let Some(nr) = &out {
|
||||||
|
// set the most recent node address for connection finding and udp replies
|
||||||
|
nr.locked_mut(self)
|
||||||
|
.set_last_connection(descriptor, timestamp);
|
||||||
|
}
|
||||||
|
out
|
||||||
|
}
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////
|
||||||
|
// Routing Table Health Metrics
|
||||||
|
|
||||||
|
pub fn get_routing_table_health(&self) -> RoutingTableHealth {
|
||||||
|
let mut health = RoutingTableHealth::default();
|
||||||
|
let cur_ts = intf::get_timestamp();
|
||||||
|
for bucket in &self.buckets {
|
||||||
|
for (_, v) in bucket.entries() {
|
||||||
|
match v.with(self, |_rti, e| e.state(cur_ts)) {
|
||||||
|
BucketEntryState::Reliable => {
|
||||||
|
health.reliable_entry_count += 1;
|
||||||
|
}
|
||||||
|
BucketEntryState::Unreliable => {
|
||||||
|
health.unreliable_entry_count += 1;
|
||||||
|
}
|
||||||
|
BucketEntryState::Dead => {
|
||||||
|
health.dead_entry_count += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
health
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn touch_recent_peer(&mut self, node_id: DHTKey, last_connection: ConnectionDescriptor) {
|
||||||
|
self.recent_peers
|
||||||
|
.insert(node_id, RecentPeersEntry { last_connection });
|
||||||
|
}
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////
|
||||||
|
// Find Nodes
|
||||||
|
|
||||||
|
// Retrieve the fastest nodes in the routing table matching an entry filter
|
||||||
|
pub fn find_fast_public_nodes_filtered(
|
||||||
|
&self,
|
||||||
|
outer_self: RoutingTable,
|
||||||
|
node_count: usize,
|
||||||
|
mut filters: VecDeque<RoutingTableEntryFilter>,
|
||||||
|
) -> Vec<NodeRef> {
|
||||||
|
let public_node_filter = Box::new(
|
||||||
|
|rti: &RoutingTableInner, _k: DHTKey, v: Option<Arc<BucketEntry>>| {
|
||||||
|
let entry = v.unwrap();
|
||||||
|
entry.with(rti, |_rti, e| {
|
||||||
|
// skip nodes on local network
|
||||||
|
if e.node_info(RoutingDomain::LocalNetwork).is_some() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
// skip nodes not on public internet
|
||||||
|
if e.node_info(RoutingDomain::PublicInternet).is_none() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
true
|
||||||
|
})
|
||||||
|
},
|
||||||
|
) as RoutingTableEntryFilter;
|
||||||
|
filters.push_front(public_node_filter);
|
||||||
|
|
||||||
|
self.find_fastest_nodes(
|
||||||
|
node_count,
|
||||||
|
filters,
|
||||||
|
|_rti: &RoutingTableInner, k: DHTKey, v: Option<Arc<BucketEntry>>| {
|
||||||
|
NodeRef::new(outer_self.clone(), k, v.unwrap().clone(), None)
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn filter_has_valid_signed_node_info(
|
||||||
|
&self,
|
||||||
|
routing_domain: RoutingDomain,
|
||||||
|
has_valid_own_node_info: bool,
|
||||||
|
v: Option<Arc<BucketEntry>>,
|
||||||
|
) -> bool {
|
||||||
|
match v {
|
||||||
|
None => has_valid_own_node_info,
|
||||||
|
Some(entry) => entry.with(self, |_rti, e| {
|
||||||
|
e.signed_node_info(routing_domain.into())
|
||||||
|
.map(|sni| sni.has_valid_signature())
|
||||||
|
.unwrap_or(false)
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn transform_to_peer_info(
|
||||||
|
&self,
|
||||||
|
routing_domain: RoutingDomain,
|
||||||
|
own_peer_info: PeerInfo,
|
||||||
|
k: DHTKey,
|
||||||
|
v: Option<Arc<BucketEntry>>,
|
||||||
|
) -> PeerInfo {
|
||||||
|
match v {
|
||||||
|
None => own_peer_info,
|
||||||
|
Some(entry) => entry.with(self, |_rti, e| e.make_peer_info(k, routing_domain).unwrap()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn find_peers_with_sort_and_filter<C, T, O>(
|
||||||
|
&self,
|
||||||
|
node_count: usize,
|
||||||
|
cur_ts: u64,
|
||||||
|
mut filters: VecDeque<RoutingTableEntryFilter>,
|
||||||
|
mut compare: C,
|
||||||
|
mut transform: T,
|
||||||
|
) -> Vec<O>
|
||||||
|
where
|
||||||
|
C: for<'a, 'b> FnMut(
|
||||||
|
&'a RoutingTableInner,
|
||||||
|
&'b (DHTKey, Option<Arc<BucketEntry>>),
|
||||||
|
&'b (DHTKey, Option<Arc<BucketEntry>>),
|
||||||
|
) -> core::cmp::Ordering,
|
||||||
|
T: for<'r> FnMut(&'r RoutingTableInner, DHTKey, Option<Arc<BucketEntry>>) -> O,
|
||||||
|
{
|
||||||
|
// collect all the nodes for sorting
|
||||||
|
let mut nodes =
|
||||||
|
Vec::<(DHTKey, Option<Arc<BucketEntry>>)>::with_capacity(self.bucket_entry_count + 1);
|
||||||
|
|
||||||
|
// add our own node (only one of there with the None entry)
|
||||||
|
let mut filtered = false;
|
||||||
|
for filter in &mut filters {
|
||||||
|
if !filter(self, self.unlocked_inner.node_id, None) {
|
||||||
|
filtered = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !filtered {
|
||||||
|
nodes.push((self.unlocked_inner.node_id, None));
|
||||||
|
}
|
||||||
|
|
||||||
|
// add all nodes from buckets
|
||||||
|
self.with_entries(cur_ts, BucketEntryState::Unreliable, |rti, k, v| {
|
||||||
|
// Apply filter
|
||||||
|
for filter in &mut filters {
|
||||||
|
if filter(rti, k, Some(v.clone())) {
|
||||||
|
nodes.push((k, Some(v.clone())));
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Option::<()>::None
|
||||||
|
});
|
||||||
|
|
||||||
|
// sort by preference for returning nodes
|
||||||
|
nodes.sort_by(|a, b| compare(self, a, b));
|
||||||
|
|
||||||
|
// return transformed vector for filtered+sorted nodes
|
||||||
|
let cnt = usize::min(node_count, nodes.len());
|
||||||
|
let mut out = Vec::<O>::with_capacity(cnt);
|
||||||
|
for node in nodes {
|
||||||
|
let val = transform(self, node.0, node.1);
|
||||||
|
out.push(val);
|
||||||
|
}
|
||||||
|
|
||||||
|
out
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn find_fastest_nodes<T, O>(
|
||||||
|
&self,
|
||||||
|
node_count: usize,
|
||||||
|
mut filters: VecDeque<RoutingTableEntryFilter>,
|
||||||
|
transform: T,
|
||||||
|
) -> Vec<O>
|
||||||
|
where
|
||||||
|
T: for<'r> FnMut(&'r RoutingTableInner, DHTKey, Option<Arc<BucketEntry>>) -> O,
|
||||||
|
{
|
||||||
|
let cur_ts = intf::get_timestamp();
|
||||||
|
|
||||||
|
// Add filter to remove dead nodes always
|
||||||
|
let filter_dead = Box::new(
|
||||||
|
move |rti: &RoutingTableInner, _k: DHTKey, v: Option<Arc<BucketEntry>>| {
|
||||||
|
if let Some(entry) = &v {
|
||||||
|
// always filter out dead nodes
|
||||||
|
if entry.with(rti, |_rti, e| e.state(cur_ts) == BucketEntryState::Dead) {
|
||||||
|
false
|
||||||
|
} else {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// always filter out self peer, as it is irrelevant to the 'fastest nodes' search
|
||||||
|
false
|
||||||
|
}
|
||||||
|
},
|
||||||
|
) as RoutingTableEntryFilter;
|
||||||
|
filters.push_front(filter_dead);
|
||||||
|
|
||||||
|
// Fastest sort
|
||||||
|
let sort = |rti: &RoutingTableInner,
|
||||||
|
(a_key, a_entry): &(DHTKey, Option<Arc<BucketEntry>>),
|
||||||
|
(b_key, b_entry): &(DHTKey, Option<Arc<BucketEntry>>)| {
|
||||||
|
// same nodes are always the same
|
||||||
|
if a_key == b_key {
|
||||||
|
return core::cmp::Ordering::Equal;
|
||||||
|
}
|
||||||
|
// our own node always comes last (should not happen, here for completeness)
|
||||||
|
if a_entry.is_none() {
|
||||||
|
return core::cmp::Ordering::Greater;
|
||||||
|
}
|
||||||
|
if b_entry.is_none() {
|
||||||
|
return core::cmp::Ordering::Less;
|
||||||
|
}
|
||||||
|
// reliable nodes come first
|
||||||
|
let ae = a_entry.as_ref().unwrap();
|
||||||
|
let be = b_entry.as_ref().unwrap();
|
||||||
|
ae.with(rti, |rti, ae| {
|
||||||
|
be.with(rti, |_rti, be| {
|
||||||
|
let ra = ae.check_reliable(cur_ts);
|
||||||
|
let rb = be.check_reliable(cur_ts);
|
||||||
|
if ra != rb {
|
||||||
|
if ra {
|
||||||
|
return core::cmp::Ordering::Less;
|
||||||
|
} else {
|
||||||
|
return core::cmp::Ordering::Greater;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// latency is the next metric, closer nodes first
|
||||||
|
let a_latency = match ae.peer_stats().latency.as_ref() {
|
||||||
|
None => {
|
||||||
|
// treat unknown latency as slow
|
||||||
|
return core::cmp::Ordering::Greater;
|
||||||
|
}
|
||||||
|
Some(l) => l,
|
||||||
|
};
|
||||||
|
let b_latency = match be.peer_stats().latency.as_ref() {
|
||||||
|
None => {
|
||||||
|
// treat unknown latency as slow
|
||||||
|
return core::cmp::Ordering::Less;
|
||||||
|
}
|
||||||
|
Some(l) => l,
|
||||||
|
};
|
||||||
|
// Sort by average latency
|
||||||
|
a_latency.average.cmp(&b_latency.average)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
};
|
||||||
|
|
||||||
|
let out =
|
||||||
|
self.find_peers_with_sort_and_filter(node_count, cur_ts, filters, sort, transform);
|
||||||
|
out
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn find_closest_nodes<T, O>(
|
||||||
|
&self,
|
||||||
|
node_id: DHTKey,
|
||||||
|
filters: VecDeque<RoutingTableEntryFilter>,
|
||||||
|
transform: T,
|
||||||
|
) -> Vec<O>
|
||||||
|
where
|
||||||
|
T: for<'r> FnMut(&'r RoutingTableInner, DHTKey, Option<Arc<BucketEntry>>) -> O,
|
||||||
|
{
|
||||||
|
let cur_ts = intf::get_timestamp();
|
||||||
|
let node_count = {
|
||||||
|
let config = self.config();
|
||||||
|
let c = config.get();
|
||||||
|
c.network.dht.max_find_node_count as usize
|
||||||
|
};
|
||||||
|
|
||||||
|
// closest sort
|
||||||
|
let sort = |rti: &RoutingTableInner,
|
||||||
|
(a_key, a_entry): &(DHTKey, Option<Arc<BucketEntry>>),
|
||||||
|
(b_key, b_entry): &(DHTKey, Option<Arc<BucketEntry>>)| {
|
||||||
|
// same nodes are always the same
|
||||||
|
if a_key == b_key {
|
||||||
|
return core::cmp::Ordering::Equal;
|
||||||
|
}
|
||||||
|
|
||||||
|
// reliable nodes come first, pessimistically treating our own node as unreliable
|
||||||
|
let ra = a_entry
|
||||||
|
.as_ref()
|
||||||
|
.map_or(false, |x| x.with(rti, |_rti, x| x.check_reliable(cur_ts)));
|
||||||
|
let rb = b_entry
|
||||||
|
.as_ref()
|
||||||
|
.map_or(false, |x| x.with(rti, |_rti, x| x.check_reliable(cur_ts)));
|
||||||
|
if ra != rb {
|
||||||
|
if ra {
|
||||||
|
return core::cmp::Ordering::Less;
|
||||||
|
} else {
|
||||||
|
return core::cmp::Ordering::Greater;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// distance is the next metric, closer nodes first
|
||||||
|
let da = distance(a_key, &node_id);
|
||||||
|
let db = distance(b_key, &node_id);
|
||||||
|
da.cmp(&db)
|
||||||
|
};
|
||||||
|
|
||||||
|
let out =
|
||||||
|
self.find_peers_with_sort_and_filter(node_count, cur_ts, filters, sort, transform);
|
||||||
|
log_rtab!(">> find_closest_nodes: node count = {}", out.len());
|
||||||
|
out
|
||||||
|
}
|
||||||
|
}
|
@ -22,8 +22,13 @@ impl RoutingTable {
|
|||||||
);
|
);
|
||||||
|
|
||||||
// Roll all bucket entry transfers
|
// Roll all bucket entry transfers
|
||||||
for b in &mut inner.buckets {
|
let entries: Vec<Arc<BucketEntry>> = inner
|
||||||
b.roll_transfers(last_ts, cur_ts);
|
.buckets
|
||||||
|
.iter()
|
||||||
|
.flat_map(|b| b.entries().map(|(_k, v)| v.clone()))
|
||||||
|
.collect();
|
||||||
|
for v in entries {
|
||||||
|
v.with_mut(inner, |_rti, e| e.roll_transfers(last_ts, cur_ts));
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -42,7 +47,7 @@ impl RoutingTable {
|
|||||||
.collect();
|
.collect();
|
||||||
let mut inner = self.inner.write();
|
let mut inner = self.inner.write();
|
||||||
for idx in kick_queue {
|
for idx in kick_queue {
|
||||||
Self::kick_bucket(&mut *inner, idx)
|
inner.kick_bucket(idx)
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -1,41 +0,0 @@
|
|||||||
use crate::dht::*;
|
|
||||||
use crate::*;
|
|
||||||
use core::convert::TryInto;
|
|
||||||
use rpc_processor::*;
|
|
||||||
|
|
||||||
pub fn decode_block_id(public_key: &veilid_capnp::b_l_a_k_e3_hash::Reader) -> DHTKey {
|
|
||||||
let u0 = public_key.get_u0().to_be_bytes();
|
|
||||||
let u1 = public_key.get_u1().to_be_bytes();
|
|
||||||
let u2 = public_key.get_u2().to_be_bytes();
|
|
||||||
let u3 = public_key.get_u3().to_be_bytes();
|
|
||||||
|
|
||||||
let mut x: [u8; 32] = Default::default();
|
|
||||||
x[0..8].copy_from_slice(&u0);
|
|
||||||
x[8..16].copy_from_slice(&u1);
|
|
||||||
x[16..24].copy_from_slice(&u2);
|
|
||||||
x[24..32].copy_from_slice(&u3);
|
|
||||||
|
|
||||||
DHTKey::new(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn encode_block_id(
|
|
||||||
key: &DHTKey,
|
|
||||||
builder: &mut veilid_capnp::b_l_a_k_e3_hash::Builder,
|
|
||||||
) -> Result<(), RPCError> {
|
|
||||||
if !key.valid {
|
|
||||||
return Err(RPCError::protocol("invalid key"));
|
|
||||||
}
|
|
||||||
builder.set_u0(u64::from_be_bytes(
|
|
||||||
key.bytes[0..8].try_into().map_err(RPCError::internal)?,
|
|
||||||
));
|
|
||||||
builder.set_u1(u64::from_be_bytes(
|
|
||||||
key.bytes[8..16].try_into().map_err(RPCError::internal)?,
|
|
||||||
));
|
|
||||||
builder.set_u2(u64::from_be_bytes(
|
|
||||||
key.bytes[16..24].try_into().map_err(RPCError::internal)?,
|
|
||||||
));
|
|
||||||
builder.set_u3(u64::from_be_bytes(
|
|
||||||
key.bytes[24..32].try_into().map_err(RPCError::internal)?,
|
|
||||||
));
|
|
||||||
Ok(())
|
|
||||||
}
|
|
@ -1,9 +1,9 @@
|
|||||||
use crate::dht::*;
|
use crate::crypto::*;
|
||||||
use crate::*;
|
use crate::*;
|
||||||
use core::convert::TryInto;
|
use core::convert::TryInto;
|
||||||
use rpc_processor::*;
|
use rpc_processor::*;
|
||||||
|
|
||||||
pub fn decode_public_key(public_key: &veilid_capnp::curve25519_public_key::Reader) -> DHTKey {
|
pub fn decode_dht_key(public_key: &veilid_capnp::key256::Reader) -> DHTKey {
|
||||||
let u0 = public_key.get_u0().to_be_bytes();
|
let u0 = public_key.get_u0().to_be_bytes();
|
||||||
let u1 = public_key.get_u1().to_be_bytes();
|
let u1 = public_key.get_u1().to_be_bytes();
|
||||||
let u2 = public_key.get_u2().to_be_bytes();
|
let u2 = public_key.get_u2().to_be_bytes();
|
||||||
@ -18,13 +18,10 @@ pub fn decode_public_key(public_key: &veilid_capnp::curve25519_public_key::Reade
|
|||||||
DHTKey::new(x)
|
DHTKey::new(x)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn encode_public_key(
|
pub fn encode_dht_key(
|
||||||
key: &DHTKey,
|
key: &DHTKey,
|
||||||
builder: &mut veilid_capnp::curve25519_public_key::Builder,
|
builder: &mut veilid_capnp::key256::Builder,
|
||||||
) -> Result<(), RPCError> {
|
) -> Result<(), RPCError> {
|
||||||
if !key.valid {
|
|
||||||
return Err(RPCError::protocol("invalid key"));
|
|
||||||
}
|
|
||||||
builder.set_u0(u64::from_be_bytes(
|
builder.set_u0(u64::from_be_bytes(
|
||||||
key.bytes[0..8]
|
key.bytes[0..8]
|
||||||
.try_into()
|
.try_into()
|
@ -1,14 +1,7 @@
|
|||||||
use crate::*;
|
use crate::*;
|
||||||
use rpc_processor::*;
|
use rpc_processor::*;
|
||||||
|
|
||||||
pub fn encode_signature(
|
pub fn encode_signature(sig: &DHTSignature, builder: &mut veilid_capnp::signature512::Builder) {
|
||||||
sig: &DHTSignature,
|
|
||||||
builder: &mut veilid_capnp::ed25519_signature::Builder,
|
|
||||||
) {
|
|
||||||
if !sig.valid {
|
|
||||||
panic!("don't encode invalid signatures");
|
|
||||||
}
|
|
||||||
|
|
||||||
let sig = &sig.bytes;
|
let sig = &sig.bytes;
|
||||||
|
|
||||||
builder.set_u0(u64::from_be_bytes(
|
builder.set_u0(u64::from_be_bytes(
|
||||||
@ -37,7 +30,7 @@ pub fn encode_signature(
|
|||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn decode_signature(reader: &veilid_capnp::ed25519_signature::Reader) -> DHTSignature {
|
pub fn decode_signature(reader: &veilid_capnp::signature512::Reader) -> DHTSignature {
|
||||||
let u0 = reader.get_u0().to_be_bytes();
|
let u0 = reader.get_u0().to_be_bytes();
|
||||||
let u1 = reader.get_u1().to_be_bytes();
|
let u1 = reader.get_u1().to_be_bytes();
|
||||||
let u2 = reader.get_u2().to_be_bytes();
|
let u2 = reader.get_u2().to_be_bytes();
|
@ -1,11 +1,11 @@
|
|||||||
mod address;
|
mod address;
|
||||||
mod address_type_set;
|
mod address_type_set;
|
||||||
mod block_id;
|
mod dht_key;
|
||||||
|
mod dht_signature;
|
||||||
mod dial_info;
|
mod dial_info;
|
||||||
mod dial_info_class;
|
mod dial_info_class;
|
||||||
mod dial_info_detail;
|
mod dial_info_detail;
|
||||||
mod network_class;
|
mod network_class;
|
||||||
mod node_dial_info;
|
|
||||||
mod node_info;
|
mod node_info;
|
||||||
mod node_status;
|
mod node_status;
|
||||||
mod nonce;
|
mod nonce;
|
||||||
@ -13,11 +13,11 @@ mod operations;
|
|||||||
mod peer_info;
|
mod peer_info;
|
||||||
mod private_safety_route;
|
mod private_safety_route;
|
||||||
mod protocol_type_set;
|
mod protocol_type_set;
|
||||||
mod public_key;
|
|
||||||
mod sender_info;
|
mod sender_info;
|
||||||
mod signal_info;
|
mod signal_info;
|
||||||
mod signature;
|
mod signed_direct_node_info;
|
||||||
mod signed_node_info;
|
mod signed_node_info;
|
||||||
|
mod signed_relayed_node_info;
|
||||||
mod socket_address;
|
mod socket_address;
|
||||||
mod tunnel;
|
mod tunnel;
|
||||||
mod value_data;
|
mod value_data;
|
||||||
@ -25,12 +25,12 @@ mod value_key;
|
|||||||
|
|
||||||
pub use address::*;
|
pub use address::*;
|
||||||
pub use address_type_set::*;
|
pub use address_type_set::*;
|
||||||
pub use block_id::*;
|
pub use dht_key::*;
|
||||||
|
pub use dht_signature::*;
|
||||||
pub use dial_info::*;
|
pub use dial_info::*;
|
||||||
pub use dial_info_class::*;
|
pub use dial_info_class::*;
|
||||||
pub use dial_info_detail::*;
|
pub use dial_info_detail::*;
|
||||||
pub use network_class::*;
|
pub use network_class::*;
|
||||||
pub use node_dial_info::*;
|
|
||||||
pub use node_info::*;
|
pub use node_info::*;
|
||||||
pub use node_status::*;
|
pub use node_status::*;
|
||||||
pub use nonce::*;
|
pub use nonce::*;
|
||||||
@ -38,11 +38,11 @@ pub use operations::*;
|
|||||||
pub use peer_info::*;
|
pub use peer_info::*;
|
||||||
pub use private_safety_route::*;
|
pub use private_safety_route::*;
|
||||||
pub use protocol_type_set::*;
|
pub use protocol_type_set::*;
|
||||||
pub use public_key::*;
|
|
||||||
pub use sender_info::*;
|
pub use sender_info::*;
|
||||||
pub use signal_info::*;
|
pub use signal_info::*;
|
||||||
pub use signature::*;
|
pub use signed_direct_node_info::*;
|
||||||
pub use signed_node_info::*;
|
pub use signed_node_info::*;
|
||||||
|
pub use signed_relayed_node_info::*;
|
||||||
pub use socket_address::*;
|
pub use socket_address::*;
|
||||||
pub use tunnel::*;
|
pub use tunnel::*;
|
||||||
pub use value_data::*;
|
pub use value_data::*;
|
||||||
|
@ -5,7 +5,7 @@ pub fn encode_network_class(network_class: NetworkClass) -> veilid_capnp::Networ
|
|||||||
NetworkClass::InboundCapable => veilid_capnp::NetworkClass::InboundCapable,
|
NetworkClass::InboundCapable => veilid_capnp::NetworkClass::InboundCapable,
|
||||||
NetworkClass::OutboundOnly => veilid_capnp::NetworkClass::OutboundOnly,
|
NetworkClass::OutboundOnly => veilid_capnp::NetworkClass::OutboundOnly,
|
||||||
NetworkClass::WebApp => veilid_capnp::NetworkClass::WebApp,
|
NetworkClass::WebApp => veilid_capnp::NetworkClass::WebApp,
|
||||||
NetworkClass::Invalid => panic!("invalid network class should not be encoded"),
|
NetworkClass::Invalid => veilid_capnp::NetworkClass::Invalid,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -14,5 +14,6 @@ pub fn decode_network_class(network_class: veilid_capnp::NetworkClass) -> Networ
|
|||||||
veilid_capnp::NetworkClass::InboundCapable => NetworkClass::InboundCapable,
|
veilid_capnp::NetworkClass::InboundCapable => NetworkClass::InboundCapable,
|
||||||
veilid_capnp::NetworkClass::OutboundOnly => NetworkClass::OutboundOnly,
|
veilid_capnp::NetworkClass::OutboundOnly => NetworkClass::OutboundOnly,
|
||||||
veilid_capnp::NetworkClass::WebApp => NetworkClass::WebApp,
|
veilid_capnp::NetworkClass::WebApp => NetworkClass::WebApp,
|
||||||
|
veilid_capnp::NetworkClass::Invalid => NetworkClass::Invalid,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,29 +0,0 @@
|
|||||||
use crate::*;
|
|
||||||
use rpc_processor::*;
|
|
||||||
|
|
||||||
pub fn encode_node_dial_info(
|
|
||||||
ndis: &NodeDialInfo,
|
|
||||||
builder: &mut veilid_capnp::node_dial_info::Builder,
|
|
||||||
) -> Result<(), RPCError> {
|
|
||||||
let mut ni_builder = builder.reborrow().init_node_id();
|
|
||||||
encode_public_key(&ndis.node_id.key, &mut ni_builder)?;
|
|
||||||
let mut di_builder = builder.reborrow().init_dial_info();
|
|
||||||
encode_dial_info(&ndis.dial_info, &mut di_builder)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn decode_node_dial_info(
|
|
||||||
reader: &veilid_capnp::node_dial_info::Reader,
|
|
||||||
) -> Result<NodeDialInfo, RPCError> {
|
|
||||||
let node_id = decode_public_key(&reader.get_node_id().map_err(RPCError::map_protocol(
|
|
||||||
"invalid public key in node_dial_info",
|
|
||||||
))?);
|
|
||||||
let dial_info = decode_dial_info(&reader.get_dial_info().map_err(RPCError::map_protocol(
|
|
||||||
"invalid dial_info in node_dial_info",
|
|
||||||
))?)?;
|
|
||||||
|
|
||||||
Ok(NodeDialInfo {
|
|
||||||
node_id: NodeId::new(node_id),
|
|
||||||
dial_info,
|
|
||||||
})
|
|
||||||
}
|
|
@ -31,18 +31,10 @@ pub fn encode_node_info(
|
|||||||
encode_dial_info_detail(&node_info.dial_info_detail_list[idx], &mut did_builder)?;
|
encode_dial_info_detail(&node_info.dial_info_detail_list[idx], &mut did_builder)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(rpi) = &node_info.relay_peer_info {
|
|
||||||
let mut rpi_builder = builder.reborrow().init_relay_peer_info();
|
|
||||||
encode_peer_info(rpi, &mut rpi_builder)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn decode_node_info(
|
pub fn decode_node_info(reader: &veilid_capnp::node_info::Reader) -> Result<NodeInfo, RPCError> {
|
||||||
reader: &veilid_capnp::node_info::Reader,
|
|
||||||
allow_relay_peer_info: bool,
|
|
||||||
) -> Result<NodeInfo, RPCError> {
|
|
||||||
let network_class = decode_network_class(
|
let network_class = decode_network_class(
|
||||||
reader
|
reader
|
||||||
.reborrow()
|
.reborrow()
|
||||||
@ -81,22 +73,6 @@ pub fn decode_node_info(
|
|||||||
dial_info_detail_list.push(decode_dial_info_detail(&did)?)
|
dial_info_detail_list.push(decode_dial_info_detail(&did)?)
|
||||||
}
|
}
|
||||||
|
|
||||||
let relay_peer_info = if allow_relay_peer_info {
|
|
||||||
if reader.has_relay_peer_info() {
|
|
||||||
Some(Box::new(decode_peer_info(
|
|
||||||
&reader
|
|
||||||
.reborrow()
|
|
||||||
.get_relay_peer_info()
|
|
||||||
.map_err(RPCError::protocol)?,
|
|
||||||
false,
|
|
||||||
)?))
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(NodeInfo {
|
Ok(NodeInfo {
|
||||||
network_class,
|
network_class,
|
||||||
outbound_protocols,
|
outbound_protocols,
|
||||||
@ -104,6 +80,5 @@ pub fn decode_node_info(
|
|||||||
min_version,
|
min_version,
|
||||||
max_version,
|
max_version,
|
||||||
dial_info_detail_list,
|
dial_info_detail_list,
|
||||||
relay_peer_info,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -1,10 +1,7 @@
|
|||||||
use crate::*;
|
use crate::*;
|
||||||
use rpc_processor::*;
|
use rpc_processor::*;
|
||||||
|
|
||||||
pub fn encode_nonce(
|
pub fn encode_nonce(nonce: &Nonce, builder: &mut veilid_capnp::nonce24::Builder) {
|
||||||
nonce: &Nonce,
|
|
||||||
builder: &mut veilid_capnp::x_cha_cha20_poly1305_nonce::Builder,
|
|
||||||
) {
|
|
||||||
builder.set_u0(u64::from_be_bytes(
|
builder.set_u0(u64::from_be_bytes(
|
||||||
nonce[0..8].try_into().expect("slice with incorrect length"),
|
nonce[0..8].try_into().expect("slice with incorrect length"),
|
||||||
));
|
));
|
||||||
@ -20,7 +17,7 @@ pub fn encode_nonce(
|
|||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn decode_nonce(reader: &veilid_capnp::x_cha_cha20_poly1305_nonce::Reader) -> Nonce {
|
pub fn decode_nonce(reader: &veilid_capnp::nonce24::Reader) -> Nonce {
|
||||||
let u0 = reader.get_u0().to_be_bytes();
|
let u0 = reader.get_u0().to_be_bytes();
|
||||||
let u1 = reader.get_u1().to_be_bytes();
|
let u1 = reader.get_u1().to_be_bytes();
|
||||||
let u2 = reader.get_u2().to_be_bytes();
|
let u2 = reader.get_u2().to_be_bytes();
|
||||||
|
@ -11,9 +11,6 @@ impl RPCAnswer {
|
|||||||
pub fn new(detail: RPCAnswerDetail) -> Self {
|
pub fn new(detail: RPCAnswerDetail) -> Self {
|
||||||
Self { detail }
|
Self { detail }
|
||||||
}
|
}
|
||||||
// pub fn detail(&self) -> &RPCAnswerDetail {
|
|
||||||
// &self.detail
|
|
||||||
// }
|
|
||||||
pub fn into_detail(self) -> RPCAnswerDetail {
|
pub fn into_detail(self) -> RPCAnswerDetail {
|
||||||
self.detail
|
self.detail
|
||||||
}
|
}
|
||||||
@ -35,6 +32,7 @@ impl RPCAnswer {
|
|||||||
pub enum RPCAnswerDetail {
|
pub enum RPCAnswerDetail {
|
||||||
StatusA(RPCOperationStatusA),
|
StatusA(RPCOperationStatusA),
|
||||||
FindNodeA(RPCOperationFindNodeA),
|
FindNodeA(RPCOperationFindNodeA),
|
||||||
|
AppCallA(RPCOperationAppCallA),
|
||||||
GetValueA(RPCOperationGetValueA),
|
GetValueA(RPCOperationGetValueA),
|
||||||
SetValueA(RPCOperationSetValueA),
|
SetValueA(RPCOperationSetValueA),
|
||||||
WatchValueA(RPCOperationWatchValueA),
|
WatchValueA(RPCOperationWatchValueA),
|
||||||
@ -50,6 +48,7 @@ impl RPCAnswerDetail {
|
|||||||
match self {
|
match self {
|
||||||
RPCAnswerDetail::StatusA(_) => "StatusA",
|
RPCAnswerDetail::StatusA(_) => "StatusA",
|
||||||
RPCAnswerDetail::FindNodeA(_) => "FindNodeA",
|
RPCAnswerDetail::FindNodeA(_) => "FindNodeA",
|
||||||
|
RPCAnswerDetail::AppCallA(_) => "AppCallA",
|
||||||
RPCAnswerDetail::GetValueA(_) => "GetValueA",
|
RPCAnswerDetail::GetValueA(_) => "GetValueA",
|
||||||
RPCAnswerDetail::SetValueA(_) => "SetValueA",
|
RPCAnswerDetail::SetValueA(_) => "SetValueA",
|
||||||
RPCAnswerDetail::WatchValueA(_) => "WatchValueA",
|
RPCAnswerDetail::WatchValueA(_) => "WatchValueA",
|
||||||
@ -76,6 +75,11 @@ impl RPCAnswerDetail {
|
|||||||
let out = RPCOperationFindNodeA::decode(&op_reader)?;
|
let out = RPCOperationFindNodeA::decode(&op_reader)?;
|
||||||
RPCAnswerDetail::FindNodeA(out)
|
RPCAnswerDetail::FindNodeA(out)
|
||||||
}
|
}
|
||||||
|
veilid_capnp::answer::detail::AppCallA(r) => {
|
||||||
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
|
let out = RPCOperationAppCallA::decode(&op_reader)?;
|
||||||
|
RPCAnswerDetail::AppCallA(out)
|
||||||
|
}
|
||||||
veilid_capnp::answer::detail::GetValueA(r) => {
|
veilid_capnp::answer::detail::GetValueA(r) => {
|
||||||
let op_reader = r.map_err(RPCError::protocol)?;
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCOperationGetValueA::decode(&op_reader)?;
|
let out = RPCOperationGetValueA::decode(&op_reader)?;
|
||||||
@ -126,6 +130,7 @@ impl RPCAnswerDetail {
|
|||||||
match self {
|
match self {
|
||||||
RPCAnswerDetail::StatusA(d) => d.encode(&mut builder.reborrow().init_status_a()),
|
RPCAnswerDetail::StatusA(d) => d.encode(&mut builder.reborrow().init_status_a()),
|
||||||
RPCAnswerDetail::FindNodeA(d) => d.encode(&mut builder.reborrow().init_find_node_a()),
|
RPCAnswerDetail::FindNodeA(d) => d.encode(&mut builder.reborrow().init_find_node_a()),
|
||||||
|
RPCAnswerDetail::AppCallA(d) => d.encode(&mut builder.reborrow().init_app_call_a()),
|
||||||
RPCAnswerDetail::GetValueA(d) => d.encode(&mut builder.reborrow().init_get_value_a()),
|
RPCAnswerDetail::GetValueA(d) => d.encode(&mut builder.reborrow().init_get_value_a()),
|
||||||
RPCAnswerDetail::SetValueA(d) => d.encode(&mut builder.reborrow().init_set_value_a()),
|
RPCAnswerDetail::SetValueA(d) => d.encode(&mut builder.reborrow().init_set_value_a()),
|
||||||
RPCAnswerDetail::WatchValueA(d) => {
|
RPCAnswerDetail::WatchValueA(d) => {
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
mod answer;
|
mod answer;
|
||||||
mod operation;
|
mod operation;
|
||||||
|
mod operation_app_call;
|
||||||
|
mod operation_app_message;
|
||||||
mod operation_cancel_tunnel;
|
mod operation_cancel_tunnel;
|
||||||
mod operation_complete_tunnel;
|
mod operation_complete_tunnel;
|
||||||
mod operation_find_block;
|
mod operation_find_block;
|
||||||
@ -22,6 +24,8 @@ mod statement;
|
|||||||
|
|
||||||
pub use answer::*;
|
pub use answer::*;
|
||||||
pub use operation::*;
|
pub use operation::*;
|
||||||
|
pub use operation_app_call::*;
|
||||||
|
pub use operation_app_message::*;
|
||||||
pub use operation_cancel_tunnel::*;
|
pub use operation_cancel_tunnel::*;
|
||||||
pub use operation_complete_tunnel::*;
|
pub use operation_complete_tunnel::*;
|
||||||
pub use operation_find_block::*;
|
pub use operation_find_block::*;
|
||||||
|
@ -19,7 +19,7 @@ impl RPCOperationKind {
|
|||||||
|
|
||||||
pub fn decode(
|
pub fn decode(
|
||||||
kind_reader: &veilid_capnp::operation::kind::Reader,
|
kind_reader: &veilid_capnp::operation::kind::Reader,
|
||||||
sender_node_id: &DHTKey,
|
opt_sender_node_id: Option<&DHTKey>,
|
||||||
) -> Result<Self, RPCError> {
|
) -> Result<Self, RPCError> {
|
||||||
let which_reader = kind_reader.which().map_err(RPCError::protocol)?;
|
let which_reader = kind_reader.which().map_err(RPCError::protocol)?;
|
||||||
let out = match which_reader {
|
let out = match which_reader {
|
||||||
@ -30,7 +30,7 @@ impl RPCOperationKind {
|
|||||||
}
|
}
|
||||||
veilid_capnp::operation::kind::Which::Statement(r) => {
|
veilid_capnp::operation::kind::Which::Statement(r) => {
|
||||||
let q_reader = r.map_err(RPCError::protocol)?;
|
let q_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCStatement::decode(&q_reader, sender_node_id)?;
|
let out = RPCStatement::decode(&q_reader, opt_sender_node_id)?;
|
||||||
RPCOperationKind::Statement(out)
|
RPCOperationKind::Statement(out)
|
||||||
}
|
}
|
||||||
veilid_capnp::operation::kind::Which::Answer(r) => {
|
veilid_capnp::operation::kind::Which::Answer(r) => {
|
||||||
@ -111,22 +111,26 @@ impl RPCOperation {
|
|||||||
|
|
||||||
pub fn decode(
|
pub fn decode(
|
||||||
operation_reader: &veilid_capnp::operation::Reader,
|
operation_reader: &veilid_capnp::operation::Reader,
|
||||||
sender_node_id: &DHTKey,
|
opt_sender_node_id: Option<&DHTKey>,
|
||||||
) -> Result<Self, RPCError> {
|
) -> Result<Self, RPCError> {
|
||||||
let op_id = operation_reader.get_op_id();
|
let op_id = operation_reader.get_op_id();
|
||||||
|
|
||||||
let sender_node_info = if operation_reader.has_sender_node_info() {
|
let sender_node_info = if operation_reader.has_sender_node_info() {
|
||||||
let sni_reader = operation_reader
|
if let Some(sender_node_id) = opt_sender_node_id {
|
||||||
.get_sender_node_info()
|
let sni_reader = operation_reader
|
||||||
.map_err(RPCError::protocol)?;
|
.get_sender_node_info()
|
||||||
let sni = decode_signed_node_info(&sni_reader, sender_node_id, true)?;
|
.map_err(RPCError::protocol)?;
|
||||||
Some(sni)
|
let sni = decode_signed_node_info(&sni_reader, sender_node_id)?;
|
||||||
|
Some(sni)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
let kind_reader = operation_reader.get_kind();
|
let kind_reader = operation_reader.get_kind();
|
||||||
let kind = RPCOperationKind::decode(&kind_reader, sender_node_id)?;
|
let kind = RPCOperationKind::decode(&kind_reader, opt_sender_node_id)?;
|
||||||
|
|
||||||
Ok(RPCOperation {
|
Ok(RPCOperation {
|
||||||
op_id,
|
op_id,
|
||||||
|
@ -0,0 +1,44 @@
|
|||||||
|
use crate::*;
|
||||||
|
use rpc_processor::*;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct RPCOperationAppCallQ {
|
||||||
|
pub message: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RPCOperationAppCallQ {
|
||||||
|
pub fn decode(
|
||||||
|
reader: &veilid_capnp::operation_app_call_q::Reader,
|
||||||
|
) -> Result<RPCOperationAppCallQ, RPCError> {
|
||||||
|
let message = reader.get_message().map_err(RPCError::protocol)?.to_vec();
|
||||||
|
Ok(RPCOperationAppCallQ { message })
|
||||||
|
}
|
||||||
|
pub fn encode(
|
||||||
|
&self,
|
||||||
|
builder: &mut veilid_capnp::operation_app_call_q::Builder,
|
||||||
|
) -> Result<(), RPCError> {
|
||||||
|
builder.set_message(&self.message);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct RPCOperationAppCallA {
|
||||||
|
pub message: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RPCOperationAppCallA {
|
||||||
|
pub fn decode(
|
||||||
|
reader: &veilid_capnp::operation_app_call_a::Reader,
|
||||||
|
) -> Result<RPCOperationAppCallA, RPCError> {
|
||||||
|
let message = reader.get_message().map_err(RPCError::protocol)?.to_vec();
|
||||||
|
Ok(RPCOperationAppCallA { message })
|
||||||
|
}
|
||||||
|
pub fn encode(
|
||||||
|
&self,
|
||||||
|
builder: &mut veilid_capnp::operation_app_call_a::Builder,
|
||||||
|
) -> Result<(), RPCError> {
|
||||||
|
builder.set_message(&self.message);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,23 @@
|
|||||||
|
use crate::*;
|
||||||
|
use rpc_processor::*;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct RPCOperationAppMessage {
|
||||||
|
pub message: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RPCOperationAppMessage {
|
||||||
|
pub fn decode(
|
||||||
|
reader: &veilid_capnp::operation_app_message::Reader,
|
||||||
|
) -> Result<RPCOperationAppMessage, RPCError> {
|
||||||
|
let message = reader.get_message().map_err(RPCError::protocol)?.to_vec();
|
||||||
|
Ok(RPCOperationAppMessage { message })
|
||||||
|
}
|
||||||
|
pub fn encode(
|
||||||
|
&self,
|
||||||
|
builder: &mut veilid_capnp::operation_app_message::Builder,
|
||||||
|
) -> Result<(), RPCError> {
|
||||||
|
builder.set_message(&self.message);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
@ -11,7 +11,7 @@ impl RPCOperationFindBlockQ {
|
|||||||
reader: &veilid_capnp::operation_find_block_q::Reader,
|
reader: &veilid_capnp::operation_find_block_q::Reader,
|
||||||
) -> Result<RPCOperationFindBlockQ, RPCError> {
|
) -> Result<RPCOperationFindBlockQ, RPCError> {
|
||||||
let bi_reader = reader.get_block_id().map_err(RPCError::protocol)?;
|
let bi_reader = reader.get_block_id().map_err(RPCError::protocol)?;
|
||||||
let block_id = decode_block_id(&bi_reader);
|
let block_id = decode_dht_key(&bi_reader);
|
||||||
|
|
||||||
Ok(RPCOperationFindBlockQ { block_id })
|
Ok(RPCOperationFindBlockQ { block_id })
|
||||||
}
|
}
|
||||||
@ -20,7 +20,7 @@ impl RPCOperationFindBlockQ {
|
|||||||
builder: &mut veilid_capnp::operation_find_block_q::Builder,
|
builder: &mut veilid_capnp::operation_find_block_q::Builder,
|
||||||
) -> Result<(), RPCError> {
|
) -> Result<(), RPCError> {
|
||||||
let mut bi_builder = builder.reborrow().init_block_id();
|
let mut bi_builder = builder.reborrow().init_block_id();
|
||||||
encode_block_id(&self.block_id, &mut bi_builder)?;
|
encode_dht_key(&self.block_id, &mut bi_builder)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -47,7 +47,7 @@ impl RPCOperationFindBlockA {
|
|||||||
.map_err(RPCError::map_internal("too many suppliers"))?,
|
.map_err(RPCError::map_internal("too many suppliers"))?,
|
||||||
);
|
);
|
||||||
for s in suppliers_reader.iter() {
|
for s in suppliers_reader.iter() {
|
||||||
let peer_info = decode_peer_info(&s, true)?;
|
let peer_info = decode_peer_info(&s)?;
|
||||||
suppliers.push(peer_info);
|
suppliers.push(peer_info);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -59,7 +59,7 @@ impl RPCOperationFindBlockA {
|
|||||||
.map_err(RPCError::map_internal("too many peers"))?,
|
.map_err(RPCError::map_internal("too many peers"))?,
|
||||||
);
|
);
|
||||||
for p in peers_reader.iter() {
|
for p in peers_reader.iter() {
|
||||||
let peer_info = decode_peer_info(&p, true)?;
|
let peer_info = decode_peer_info(&p)?;
|
||||||
peers.push(peer_info);
|
peers.push(peer_info);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -11,7 +11,7 @@ impl RPCOperationFindNodeQ {
|
|||||||
reader: &veilid_capnp::operation_find_node_q::Reader,
|
reader: &veilid_capnp::operation_find_node_q::Reader,
|
||||||
) -> Result<RPCOperationFindNodeQ, RPCError> {
|
) -> Result<RPCOperationFindNodeQ, RPCError> {
|
||||||
let ni_reader = reader.get_node_id().map_err(RPCError::protocol)?;
|
let ni_reader = reader.get_node_id().map_err(RPCError::protocol)?;
|
||||||
let node_id = decode_public_key(&ni_reader);
|
let node_id = decode_dht_key(&ni_reader);
|
||||||
Ok(RPCOperationFindNodeQ { node_id })
|
Ok(RPCOperationFindNodeQ { node_id })
|
||||||
}
|
}
|
||||||
pub fn encode(
|
pub fn encode(
|
||||||
@ -19,7 +19,7 @@ impl RPCOperationFindNodeQ {
|
|||||||
builder: &mut veilid_capnp::operation_find_node_q::Builder,
|
builder: &mut veilid_capnp::operation_find_node_q::Builder,
|
||||||
) -> Result<(), RPCError> {
|
) -> Result<(), RPCError> {
|
||||||
let mut ni_builder = builder.reborrow().init_node_id();
|
let mut ni_builder = builder.reborrow().init_node_id();
|
||||||
encode_public_key(&self.node_id, &mut ni_builder)?;
|
encode_dht_key(&self.node_id, &mut ni_builder)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -41,7 +41,7 @@ impl RPCOperationFindNodeA {
|
|||||||
.map_err(RPCError::map_internal("too many peers"))?,
|
.map_err(RPCError::map_internal("too many peers"))?,
|
||||||
);
|
);
|
||||||
for p in peers_reader.iter() {
|
for p in peers_reader.iter() {
|
||||||
let peer_info = decode_peer_info(&p, true)?;
|
let peer_info = decode_peer_info(&p)?;
|
||||||
peers.push(peer_info);
|
peers.push(peer_info);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -48,7 +48,7 @@ impl RPCOperationGetValueA {
|
|||||||
.map_err(RPCError::map_internal("too many peers"))?,
|
.map_err(RPCError::map_internal("too many peers"))?,
|
||||||
);
|
);
|
||||||
for p in peers_reader.iter() {
|
for p in peers_reader.iter() {
|
||||||
let peer_info = decode_peer_info(&p, true)?;
|
let peer_info = decode_peer_info(&p)?;
|
||||||
peers.push(peer_info);
|
peers.push(peer_info);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -9,10 +9,16 @@ pub struct RPCOperationNodeInfoUpdate {
|
|||||||
impl RPCOperationNodeInfoUpdate {
|
impl RPCOperationNodeInfoUpdate {
|
||||||
pub fn decode(
|
pub fn decode(
|
||||||
reader: &veilid_capnp::operation_node_info_update::Reader,
|
reader: &veilid_capnp::operation_node_info_update::Reader,
|
||||||
sender_node_id: &DHTKey,
|
opt_sender_node_id: Option<&DHTKey>,
|
||||||
) -> Result<RPCOperationNodeInfoUpdate, RPCError> {
|
) -> Result<RPCOperationNodeInfoUpdate, RPCError> {
|
||||||
|
if opt_sender_node_id.is_none() {
|
||||||
|
return Err(RPCError::protocol(
|
||||||
|
"can't decode node info update without sender node id",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
let sender_node_id = opt_sender_node_id.unwrap();
|
||||||
let sni_reader = reader.get_signed_node_info().map_err(RPCError::protocol)?;
|
let sni_reader = reader.get_signed_node_info().map_err(RPCError::protocol)?;
|
||||||
let signed_node_info = decode_signed_node_info(&sni_reader, sender_node_id, true)?;
|
let signed_node_info = decode_signed_node_info(&sni_reader, sender_node_id)?;
|
||||||
|
|
||||||
Ok(RPCOperationNodeInfoUpdate { signed_node_info })
|
Ok(RPCOperationNodeInfoUpdate { signed_node_info })
|
||||||
}
|
}
|
||||||
|
@ -3,14 +3,16 @@ use rpc_processor::*;
|
|||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct RoutedOperation {
|
pub struct RoutedOperation {
|
||||||
|
pub version: u8,
|
||||||
pub signatures: Vec<DHTSignature>,
|
pub signatures: Vec<DHTSignature>,
|
||||||
pub nonce: Nonce,
|
pub nonce: Nonce,
|
||||||
pub data: Vec<u8>,
|
pub data: Vec<u8>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RoutedOperation {
|
impl RoutedOperation {
|
||||||
pub fn new(nonce: Nonce, data: Vec<u8>) -> Self {
|
pub fn new(version: u8, nonce: Nonce, data: Vec<u8>) -> Self {
|
||||||
Self {
|
Self {
|
||||||
|
version,
|
||||||
signatures: Vec::new(),
|
signatures: Vec::new(),
|
||||||
nonce,
|
nonce,
|
||||||
data,
|
data,
|
||||||
@ -32,11 +34,13 @@ impl RoutedOperation {
|
|||||||
signatures.push(sig);
|
signatures.push(sig);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let version = reader.get_version();
|
||||||
let n_reader = reader.get_nonce().map_err(RPCError::protocol)?;
|
let n_reader = reader.get_nonce().map_err(RPCError::protocol)?;
|
||||||
let nonce = decode_nonce(&n_reader);
|
let nonce = decode_nonce(&n_reader);
|
||||||
let data = reader.get_data().map_err(RPCError::protocol)?.to_vec();
|
let data = reader.get_data().map_err(RPCError::protocol)?.to_vec();
|
||||||
|
|
||||||
Ok(RoutedOperation {
|
Ok(RoutedOperation {
|
||||||
|
version,
|
||||||
signatures,
|
signatures,
|
||||||
nonce,
|
nonce,
|
||||||
data,
|
data,
|
||||||
@ -47,6 +51,7 @@ impl RoutedOperation {
|
|||||||
&self,
|
&self,
|
||||||
builder: &mut veilid_capnp::routed_operation::Builder,
|
builder: &mut veilid_capnp::routed_operation::Builder,
|
||||||
) -> Result<(), RPCError> {
|
) -> Result<(), RPCError> {
|
||||||
|
builder.reborrow().set_version(self.version);
|
||||||
let mut sigs_builder = builder.reborrow().init_signatures(
|
let mut sigs_builder = builder.reborrow().init_signatures(
|
||||||
self.signatures
|
self.signatures
|
||||||
.len()
|
.len()
|
||||||
|
@ -53,7 +53,7 @@ impl RPCOperationSetValueA {
|
|||||||
.map_err(RPCError::map_internal("too many peers"))?,
|
.map_err(RPCError::map_internal("too many peers"))?,
|
||||||
);
|
);
|
||||||
for p in peers_reader.iter() {
|
for p in peers_reader.iter() {
|
||||||
let peer_info = decode_peer_info(&p, true)?;
|
let peer_info = decode_peer_info(&p)?;
|
||||||
peers.push(peer_info);
|
peers.push(peer_info);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3,42 +3,59 @@ use rpc_processor::*;
|
|||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct RPCOperationStatusQ {
|
pub struct RPCOperationStatusQ {
|
||||||
pub node_status: NodeStatus,
|
pub node_status: Option<NodeStatus>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RPCOperationStatusQ {
|
impl RPCOperationStatusQ {
|
||||||
pub fn decode(
|
pub fn decode(
|
||||||
reader: &veilid_capnp::operation_status_q::Reader,
|
reader: &veilid_capnp::operation_status_q::Reader,
|
||||||
) -> Result<RPCOperationStatusQ, RPCError> {
|
) -> Result<RPCOperationStatusQ, RPCError> {
|
||||||
let ns_reader = reader.get_node_status().map_err(RPCError::protocol)?;
|
let node_status = if reader.has_node_status() {
|
||||||
let node_status = decode_node_status(&ns_reader)?;
|
let ns_reader = reader.get_node_status().map_err(RPCError::protocol)?;
|
||||||
|
let node_status = decode_node_status(&ns_reader)?;
|
||||||
|
Some(node_status)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
Ok(RPCOperationStatusQ { node_status })
|
Ok(RPCOperationStatusQ { node_status })
|
||||||
}
|
}
|
||||||
pub fn encode(
|
pub fn encode(
|
||||||
&self,
|
&self,
|
||||||
builder: &mut veilid_capnp::operation_status_q::Builder,
|
builder: &mut veilid_capnp::operation_status_q::Builder,
|
||||||
) -> Result<(), RPCError> {
|
) -> Result<(), RPCError> {
|
||||||
let mut ns_builder = builder.reborrow().init_node_status();
|
if let Some(ns) = &self.node_status {
|
||||||
encode_node_status(&self.node_status, &mut ns_builder)?;
|
let mut ns_builder = builder.reborrow().init_node_status();
|
||||||
|
encode_node_status(&ns, &mut ns_builder)?;
|
||||||
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct RPCOperationStatusA {
|
pub struct RPCOperationStatusA {
|
||||||
pub node_status: NodeStatus,
|
pub node_status: Option<NodeStatus>,
|
||||||
pub sender_info: SenderInfo,
|
pub sender_info: Option<SenderInfo>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RPCOperationStatusA {
|
impl RPCOperationStatusA {
|
||||||
pub fn decode(
|
pub fn decode(
|
||||||
reader: &veilid_capnp::operation_status_a::Reader,
|
reader: &veilid_capnp::operation_status_a::Reader,
|
||||||
) -> Result<RPCOperationStatusA, RPCError> {
|
) -> Result<RPCOperationStatusA, RPCError> {
|
||||||
let ns_reader = reader.get_node_status().map_err(RPCError::protocol)?;
|
let node_status = if reader.has_node_status() {
|
||||||
let node_status = decode_node_status(&ns_reader)?;
|
let ns_reader = reader.get_node_status().map_err(RPCError::protocol)?;
|
||||||
|
let node_status = decode_node_status(&ns_reader)?;
|
||||||
|
Some(node_status)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
let si_reader = reader.get_sender_info().map_err(RPCError::protocol)?;
|
let sender_info = if reader.has_sender_info() {
|
||||||
let sender_info = decode_sender_info(&si_reader)?;
|
let si_reader = reader.get_sender_info().map_err(RPCError::protocol)?;
|
||||||
|
let sender_info = decode_sender_info(&si_reader)?;
|
||||||
|
Some(sender_info)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
Ok(RPCOperationStatusA {
|
Ok(RPCOperationStatusA {
|
||||||
node_status,
|
node_status,
|
||||||
@ -49,10 +66,14 @@ impl RPCOperationStatusA {
|
|||||||
&self,
|
&self,
|
||||||
builder: &mut veilid_capnp::operation_status_a::Builder,
|
builder: &mut veilid_capnp::operation_status_a::Builder,
|
||||||
) -> Result<(), RPCError> {
|
) -> Result<(), RPCError> {
|
||||||
let mut ns_builder = builder.reborrow().init_node_status();
|
if let Some(ns) = &self.node_status {
|
||||||
encode_node_status(&self.node_status, &mut ns_builder)?;
|
let mut ns_builder = builder.reborrow().init_node_status();
|
||||||
let mut si_builder = builder.reborrow().init_sender_info();
|
encode_node_status(&ns, &mut ns_builder)?;
|
||||||
encode_sender_info(&self.sender_info, &mut si_builder)?;
|
}
|
||||||
|
if let Some(si) = &self.sender_info {
|
||||||
|
let mut si_builder = builder.reborrow().init_sender_info();
|
||||||
|
encode_sender_info(&si, &mut si_builder)?;
|
||||||
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -11,7 +11,7 @@ impl RPCOperationSupplyBlockQ {
|
|||||||
reader: &veilid_capnp::operation_supply_block_q::Reader,
|
reader: &veilid_capnp::operation_supply_block_q::Reader,
|
||||||
) -> Result<RPCOperationSupplyBlockQ, RPCError> {
|
) -> Result<RPCOperationSupplyBlockQ, RPCError> {
|
||||||
let bi_reader = reader.get_block_id().map_err(RPCError::protocol)?;
|
let bi_reader = reader.get_block_id().map_err(RPCError::protocol)?;
|
||||||
let block_id = decode_block_id(&bi_reader);
|
let block_id = decode_dht_key(&bi_reader);
|
||||||
|
|
||||||
Ok(RPCOperationSupplyBlockQ { block_id })
|
Ok(RPCOperationSupplyBlockQ { block_id })
|
||||||
}
|
}
|
||||||
@ -20,7 +20,7 @@ impl RPCOperationSupplyBlockQ {
|
|||||||
builder: &mut veilid_capnp::operation_supply_block_q::Builder,
|
builder: &mut veilid_capnp::operation_supply_block_q::Builder,
|
||||||
) -> Result<(), RPCError> {
|
) -> Result<(), RPCError> {
|
||||||
let mut bi_builder = builder.reborrow().init_block_id();
|
let mut bi_builder = builder.reborrow().init_block_id();
|
||||||
encode_block_id(&self.block_id, &mut bi_builder)?;
|
encode_dht_key(&self.block_id, &mut bi_builder)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -49,7 +49,7 @@ impl RPCOperationSupplyBlockA {
|
|||||||
.map_err(RPCError::map_internal("too many peers"))?,
|
.map_err(RPCError::map_internal("too many peers"))?,
|
||||||
);
|
);
|
||||||
for p in peers_reader.iter() {
|
for p in peers_reader.iter() {
|
||||||
let peer_info = decode_peer_info(&p, true)?;
|
let peer_info = decode_peer_info(&p)?;
|
||||||
peers.push(peer_info);
|
peers.push(peer_info);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -43,7 +43,7 @@ impl RPCOperationWatchValueA {
|
|||||||
.map_err(RPCError::map_internal("too many peers"))?,
|
.map_err(RPCError::map_internal("too many peers"))?,
|
||||||
);
|
);
|
||||||
for p in peers_reader.iter() {
|
for p in peers_reader.iter() {
|
||||||
let peer_info = decode_peer_info(&p, true)?;
|
let peer_info = decode_peer_info(&p)?;
|
||||||
peers.push(peer_info);
|
peers.push(peer_info);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -40,6 +40,7 @@ impl RPCQuestion {
|
|||||||
pub enum RPCQuestionDetail {
|
pub enum RPCQuestionDetail {
|
||||||
StatusQ(RPCOperationStatusQ),
|
StatusQ(RPCOperationStatusQ),
|
||||||
FindNodeQ(RPCOperationFindNodeQ),
|
FindNodeQ(RPCOperationFindNodeQ),
|
||||||
|
AppCallQ(RPCOperationAppCallQ),
|
||||||
GetValueQ(RPCOperationGetValueQ),
|
GetValueQ(RPCOperationGetValueQ),
|
||||||
SetValueQ(RPCOperationSetValueQ),
|
SetValueQ(RPCOperationSetValueQ),
|
||||||
WatchValueQ(RPCOperationWatchValueQ),
|
WatchValueQ(RPCOperationWatchValueQ),
|
||||||
@ -55,6 +56,7 @@ impl RPCQuestionDetail {
|
|||||||
match self {
|
match self {
|
||||||
RPCQuestionDetail::StatusQ(_) => "StatusQ",
|
RPCQuestionDetail::StatusQ(_) => "StatusQ",
|
||||||
RPCQuestionDetail::FindNodeQ(_) => "FindNodeQ",
|
RPCQuestionDetail::FindNodeQ(_) => "FindNodeQ",
|
||||||
|
RPCQuestionDetail::AppCallQ(_) => "AppCallQ",
|
||||||
RPCQuestionDetail::GetValueQ(_) => "GetValueQ",
|
RPCQuestionDetail::GetValueQ(_) => "GetValueQ",
|
||||||
RPCQuestionDetail::SetValueQ(_) => "SetValueQ",
|
RPCQuestionDetail::SetValueQ(_) => "SetValueQ",
|
||||||
RPCQuestionDetail::WatchValueQ(_) => "WatchValueQ",
|
RPCQuestionDetail::WatchValueQ(_) => "WatchValueQ",
|
||||||
@ -81,6 +83,11 @@ impl RPCQuestionDetail {
|
|||||||
let out = RPCOperationFindNodeQ::decode(&op_reader)?;
|
let out = RPCOperationFindNodeQ::decode(&op_reader)?;
|
||||||
RPCQuestionDetail::FindNodeQ(out)
|
RPCQuestionDetail::FindNodeQ(out)
|
||||||
}
|
}
|
||||||
|
veilid_capnp::question::detail::Which::AppCallQ(r) => {
|
||||||
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
|
let out = RPCOperationAppCallQ::decode(&op_reader)?;
|
||||||
|
RPCQuestionDetail::AppCallQ(out)
|
||||||
|
}
|
||||||
veilid_capnp::question::detail::GetValueQ(r) => {
|
veilid_capnp::question::detail::GetValueQ(r) => {
|
||||||
let op_reader = r.map_err(RPCError::protocol)?;
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCOperationGetValueQ::decode(&op_reader)?;
|
let out = RPCOperationGetValueQ::decode(&op_reader)?;
|
||||||
@ -131,6 +138,7 @@ impl RPCQuestionDetail {
|
|||||||
match self {
|
match self {
|
||||||
RPCQuestionDetail::StatusQ(d) => d.encode(&mut builder.reborrow().init_status_q()),
|
RPCQuestionDetail::StatusQ(d) => d.encode(&mut builder.reborrow().init_status_q()),
|
||||||
RPCQuestionDetail::FindNodeQ(d) => d.encode(&mut builder.reborrow().init_find_node_q()),
|
RPCQuestionDetail::FindNodeQ(d) => d.encode(&mut builder.reborrow().init_find_node_q()),
|
||||||
|
RPCQuestionDetail::AppCallQ(d) => d.encode(&mut builder.reborrow().init_app_call_q()),
|
||||||
RPCQuestionDetail::GetValueQ(d) => d.encode(&mut builder.reborrow().init_get_value_q()),
|
RPCQuestionDetail::GetValueQ(d) => d.encode(&mut builder.reborrow().init_get_value_q()),
|
||||||
RPCQuestionDetail::SetValueQ(d) => d.encode(&mut builder.reborrow().init_set_value_q()),
|
RPCQuestionDetail::SetValueQ(d) => d.encode(&mut builder.reborrow().init_set_value_q()),
|
||||||
RPCQuestionDetail::WatchValueQ(d) => {
|
RPCQuestionDetail::WatchValueQ(d) => {
|
||||||
|
@ -22,10 +22,10 @@ impl RPCStatement {
|
|||||||
}
|
}
|
||||||
pub fn decode(
|
pub fn decode(
|
||||||
reader: &veilid_capnp::statement::Reader,
|
reader: &veilid_capnp::statement::Reader,
|
||||||
sender_node_id: &DHTKey,
|
opt_sender_node_id: Option<&DHTKey>,
|
||||||
) -> Result<RPCStatement, RPCError> {
|
) -> Result<RPCStatement, RPCError> {
|
||||||
let d_reader = reader.get_detail();
|
let d_reader = reader.get_detail();
|
||||||
let detail = RPCStatementDetail::decode(&d_reader, sender_node_id)?;
|
let detail = RPCStatementDetail::decode(&d_reader, opt_sender_node_id)?;
|
||||||
Ok(RPCStatement { detail })
|
Ok(RPCStatement { detail })
|
||||||
}
|
}
|
||||||
pub fn encode(&self, builder: &mut veilid_capnp::statement::Builder) -> Result<(), RPCError> {
|
pub fn encode(&self, builder: &mut veilid_capnp::statement::Builder) -> Result<(), RPCError> {
|
||||||
@ -42,6 +42,7 @@ pub enum RPCStatementDetail {
|
|||||||
ValueChanged(RPCOperationValueChanged),
|
ValueChanged(RPCOperationValueChanged),
|
||||||
Signal(RPCOperationSignal),
|
Signal(RPCOperationSignal),
|
||||||
ReturnReceipt(RPCOperationReturnReceipt),
|
ReturnReceipt(RPCOperationReturnReceipt),
|
||||||
|
AppMessage(RPCOperationAppMessage),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RPCStatementDetail {
|
impl RPCStatementDetail {
|
||||||
@ -53,11 +54,12 @@ impl RPCStatementDetail {
|
|||||||
RPCStatementDetail::ValueChanged(_) => "ValueChanged",
|
RPCStatementDetail::ValueChanged(_) => "ValueChanged",
|
||||||
RPCStatementDetail::Signal(_) => "Signal",
|
RPCStatementDetail::Signal(_) => "Signal",
|
||||||
RPCStatementDetail::ReturnReceipt(_) => "ReturnReceipt",
|
RPCStatementDetail::ReturnReceipt(_) => "ReturnReceipt",
|
||||||
|
RPCStatementDetail::AppMessage(_) => "AppMessage",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub fn decode(
|
pub fn decode(
|
||||||
reader: &veilid_capnp::statement::detail::Reader,
|
reader: &veilid_capnp::statement::detail::Reader,
|
||||||
sender_node_id: &DHTKey,
|
opt_sender_node_id: Option<&DHTKey>,
|
||||||
) -> Result<RPCStatementDetail, RPCError> {
|
) -> Result<RPCStatementDetail, RPCError> {
|
||||||
let which_reader = reader.which().map_err(RPCError::protocol)?;
|
let which_reader = reader.which().map_err(RPCError::protocol)?;
|
||||||
let out = match which_reader {
|
let out = match which_reader {
|
||||||
@ -73,7 +75,7 @@ impl RPCStatementDetail {
|
|||||||
}
|
}
|
||||||
veilid_capnp::statement::detail::NodeInfoUpdate(r) => {
|
veilid_capnp::statement::detail::NodeInfoUpdate(r) => {
|
||||||
let op_reader = r.map_err(RPCError::protocol)?;
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
let out = RPCOperationNodeInfoUpdate::decode(&op_reader, sender_node_id)?;
|
let out = RPCOperationNodeInfoUpdate::decode(&op_reader, opt_sender_node_id)?;
|
||||||
RPCStatementDetail::NodeInfoUpdate(out)
|
RPCStatementDetail::NodeInfoUpdate(out)
|
||||||
}
|
}
|
||||||
veilid_capnp::statement::detail::ValueChanged(r) => {
|
veilid_capnp::statement::detail::ValueChanged(r) => {
|
||||||
@ -91,6 +93,11 @@ impl RPCStatementDetail {
|
|||||||
let out = RPCOperationReturnReceipt::decode(&op_reader)?;
|
let out = RPCOperationReturnReceipt::decode(&op_reader)?;
|
||||||
RPCStatementDetail::ReturnReceipt(out)
|
RPCStatementDetail::ReturnReceipt(out)
|
||||||
}
|
}
|
||||||
|
veilid_capnp::statement::detail::AppMessage(r) => {
|
||||||
|
let op_reader = r.map_err(RPCError::protocol)?;
|
||||||
|
let out = RPCOperationAppMessage::decode(&op_reader)?;
|
||||||
|
RPCStatementDetail::AppMessage(out)
|
||||||
|
}
|
||||||
};
|
};
|
||||||
Ok(out)
|
Ok(out)
|
||||||
}
|
}
|
||||||
@ -113,6 +120,9 @@ impl RPCStatementDetail {
|
|||||||
RPCStatementDetail::ReturnReceipt(d) => {
|
RPCStatementDetail::ReturnReceipt(d) => {
|
||||||
d.encode(&mut builder.reborrow().init_return_receipt())
|
d.encode(&mut builder.reborrow().init_return_receipt())
|
||||||
}
|
}
|
||||||
|
RPCStatementDetail::AppMessage(d) => {
|
||||||
|
d.encode(&mut builder.reborrow().init_app_message())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -7,17 +7,14 @@ pub fn encode_peer_info(
|
|||||||
) -> Result<(), RPCError> {
|
) -> Result<(), RPCError> {
|
||||||
//
|
//
|
||||||
let mut nid_builder = builder.reborrow().init_node_id();
|
let mut nid_builder = builder.reborrow().init_node_id();
|
||||||
encode_public_key(&peer_info.node_id.key, &mut nid_builder)?;
|
encode_dht_key(&peer_info.node_id.key, &mut nid_builder)?;
|
||||||
let mut sni_builder = builder.reborrow().init_signed_node_info();
|
let mut sni_builder = builder.reborrow().init_signed_node_info();
|
||||||
encode_signed_node_info(&peer_info.signed_node_info, &mut sni_builder)?;
|
encode_signed_node_info(&peer_info.signed_node_info, &mut sni_builder)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn decode_peer_info(
|
pub fn decode_peer_info(reader: &veilid_capnp::peer_info::Reader) -> Result<PeerInfo, RPCError> {
|
||||||
reader: &veilid_capnp::peer_info::Reader,
|
|
||||||
allow_relay_peer_info: bool,
|
|
||||||
) -> Result<PeerInfo, RPCError> {
|
|
||||||
let nid_reader = reader
|
let nid_reader = reader
|
||||||
.reborrow()
|
.reborrow()
|
||||||
.get_node_id()
|
.get_node_id()
|
||||||
@ -26,9 +23,8 @@ pub fn decode_peer_info(
|
|||||||
.reborrow()
|
.reborrow()
|
||||||
.get_signed_node_info()
|
.get_signed_node_info()
|
||||||
.map_err(RPCError::protocol)?;
|
.map_err(RPCError::protocol)?;
|
||||||
let node_id = NodeId::new(decode_public_key(&nid_reader));
|
let node_id = NodeId::new(decode_dht_key(&nid_reader));
|
||||||
let signed_node_info =
|
let signed_node_info = decode_signed_node_info(&sni_reader, &node_id.key)?;
|
||||||
decode_signed_node_info(&sni_reader, &node_id.key, allow_relay_peer_info)?;
|
|
||||||
|
|
||||||
Ok(PeerInfo {
|
Ok(PeerInfo {
|
||||||
node_id,
|
node_id,
|
||||||
|
@ -2,80 +2,6 @@ use super::*;
|
|||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub struct RouteHopData {
|
|
||||||
pub nonce: Nonce,
|
|
||||||
pub blob: Vec<u8>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub struct RouteHop {
|
|
||||||
pub dial_info: NodeDialInfo,
|
|
||||||
pub next_hop: Option<RouteHopData>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub struct PrivateRoute {
|
|
||||||
pub public_key: DHTKey,
|
|
||||||
pub hop_count: u8,
|
|
||||||
pub hops: Option<RouteHop>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PrivateRoute {
|
|
||||||
pub fn new_stub(public_key: DHTKey) -> Self {
|
|
||||||
Self {
|
|
||||||
public_key,
|
|
||||||
hop_count: 0,
|
|
||||||
hops: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for PrivateRoute {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
||||||
write!(
|
|
||||||
f,
|
|
||||||
"PR({:?}+{}{})",
|
|
||||||
self.public_key,
|
|
||||||
self.hop_count,
|
|
||||||
if let Some(hops) = &self.hops {
|
|
||||||
format!("->{}", hops.dial_info)
|
|
||||||
} else {
|
|
||||||
"".to_owned()
|
|
||||||
}
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub enum SafetyRouteHops {
|
|
||||||
Data(RouteHopData),
|
|
||||||
Private(PrivateRoute),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub struct SafetyRoute {
|
|
||||||
pub public_key: DHTKey,
|
|
||||||
pub hop_count: u8,
|
|
||||||
pub hops: SafetyRouteHops,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for SafetyRoute {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
||||||
write!(
|
|
||||||
f,
|
|
||||||
"SR({:?}+{}{})",
|
|
||||||
self.public_key,
|
|
||||||
self.hop_count,
|
|
||||||
match &self.hops {
|
|
||||||
SafetyRouteHops::Data(_) => "".to_owned(),
|
|
||||||
SafetyRouteHops::Private(p) => format!("->{}", p),
|
|
||||||
}
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
pub fn encode_route_hop_data(
|
pub fn encode_route_hop_data(
|
||||||
route_hop_data: &RouteHopData,
|
route_hop_data: &RouteHopData,
|
||||||
builder: &mut veilid_capnp::route_hop_data::Builder,
|
builder: &mut veilid_capnp::route_hop_data::Builder,
|
||||||
@ -98,62 +24,6 @@ pub fn encode_route_hop_data(
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn encode_route_hop(
|
|
||||||
route_hop: &RouteHop,
|
|
||||||
builder: &mut veilid_capnp::route_hop::Builder,
|
|
||||||
) -> Result<(), RPCError> {
|
|
||||||
encode_node_dial_info(
|
|
||||||
&route_hop.dial_info,
|
|
||||||
&mut builder.reborrow().init_dial_info(),
|
|
||||||
)?;
|
|
||||||
if let Some(rhd) = &route_hop.next_hop {
|
|
||||||
let mut rhd_builder = builder.reborrow().init_next_hop();
|
|
||||||
encode_route_hop_data(rhd, &mut rhd_builder)?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn encode_private_route(
|
|
||||||
private_route: &PrivateRoute,
|
|
||||||
builder: &mut veilid_capnp::private_route::Builder,
|
|
||||||
) -> Result<(), RPCError> {
|
|
||||||
encode_public_key(
|
|
||||||
&private_route.public_key,
|
|
||||||
&mut builder.reborrow().init_public_key(),
|
|
||||||
)?;
|
|
||||||
builder.set_hop_count(private_route.hop_count);
|
|
||||||
if let Some(rh) = &private_route.hops {
|
|
||||||
let mut rh_builder = builder.reborrow().init_first_hop();
|
|
||||||
encode_route_hop(rh, &mut rh_builder)?;
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn encode_safety_route(
|
|
||||||
safety_route: &SafetyRoute,
|
|
||||||
builder: &mut veilid_capnp::safety_route::Builder,
|
|
||||||
) -> Result<(), RPCError> {
|
|
||||||
encode_public_key(
|
|
||||||
&safety_route.public_key,
|
|
||||||
&mut builder.reborrow().init_public_key(),
|
|
||||||
)?;
|
|
||||||
builder.set_hop_count(safety_route.hop_count);
|
|
||||||
let h_builder = builder.reborrow().init_hops();
|
|
||||||
match &safety_route.hops {
|
|
||||||
SafetyRouteHops::Data(rhd) => {
|
|
||||||
let mut rhd_builder = h_builder.init_data();
|
|
||||||
encode_route_hop_data(rhd, &mut rhd_builder)?;
|
|
||||||
}
|
|
||||||
SafetyRouteHops::Private(pr) => {
|
|
||||||
let mut pr_builder = h_builder.init_private();
|
|
||||||
encode_private_route(pr, &mut pr_builder)?;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn decode_route_hop_data(
|
pub fn decode_route_hop_data(
|
||||||
reader: &veilid_capnp::route_hop_data::Reader,
|
reader: &veilid_capnp::route_hop_data::Reader,
|
||||||
) -> Result<RouteHopData, RPCError> {
|
) -> Result<RouteHopData, RPCError> {
|
||||||
@ -173,13 +43,45 @@ pub fn decode_route_hop_data(
|
|||||||
Ok(RouteHopData { nonce, blob })
|
Ok(RouteHopData { nonce, blob })
|
||||||
}
|
}
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
pub fn encode_route_hop(
|
||||||
|
route_hop: &RouteHop,
|
||||||
|
builder: &mut veilid_capnp::route_hop::Builder,
|
||||||
|
) -> Result<(), RPCError> {
|
||||||
|
let node_builder = builder.reborrow().init_node();
|
||||||
|
match &route_hop.node {
|
||||||
|
RouteNode::NodeId(ni) => {
|
||||||
|
let mut ni_builder = node_builder.init_node_id();
|
||||||
|
encode_dht_key(&ni.key, &mut ni_builder)?;
|
||||||
|
}
|
||||||
|
RouteNode::PeerInfo(pi) => {
|
||||||
|
let mut pi_builder = node_builder.init_peer_info();
|
||||||
|
encode_peer_info(&pi, &mut pi_builder)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if let Some(rhd) = &route_hop.next_hop {
|
||||||
|
let mut rhd_builder = builder.reborrow().init_next_hop();
|
||||||
|
encode_route_hop_data(rhd, &mut rhd_builder)?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
pub fn decode_route_hop(reader: &veilid_capnp::route_hop::Reader) -> Result<RouteHop, RPCError> {
|
pub fn decode_route_hop(reader: &veilid_capnp::route_hop::Reader) -> Result<RouteHop, RPCError> {
|
||||||
let dial_info = decode_node_dial_info(
|
let n_reader = reader.reborrow().get_node();
|
||||||
&reader
|
let node = match n_reader.which().map_err(RPCError::protocol)? {
|
||||||
.reborrow()
|
veilid_capnp::route_hop::node::Which::NodeId(ni) => {
|
||||||
.get_dial_info()
|
let ni_reader = ni.map_err(RPCError::protocol)?;
|
||||||
.map_err(RPCError::map_protocol("invalid dial info in route hop"))?,
|
RouteNode::NodeId(NodeId::new(decode_dht_key(&ni_reader)))
|
||||||
)?;
|
}
|
||||||
|
veilid_capnp::route_hop::node::Which::PeerInfo(pi) => {
|
||||||
|
let pi_reader = pi.map_err(RPCError::protocol)?;
|
||||||
|
RouteNode::PeerInfo(
|
||||||
|
decode_peer_info(&pi_reader)
|
||||||
|
.map_err(RPCError::map_protocol("invalid peer info in route hop"))?,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
let next_hop = if reader.has_next_hop() {
|
let next_hop = if reader.has_next_hop() {
|
||||||
let rhd_reader = reader
|
let rhd_reader = reader
|
||||||
@ -190,26 +92,55 @@ pub fn decode_route_hop(reader: &veilid_capnp::route_hop::Reader) -> Result<Rout
|
|||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(RouteHop {
|
Ok(RouteHop { node, next_hop })
|
||||||
dial_info,
|
}
|
||||||
next_hop,
|
|
||||||
})
|
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
pub fn encode_private_route(
|
||||||
|
private_route: &PrivateRoute,
|
||||||
|
builder: &mut veilid_capnp::private_route::Builder,
|
||||||
|
) -> Result<(), RPCError> {
|
||||||
|
encode_dht_key(
|
||||||
|
&private_route.public_key,
|
||||||
|
&mut builder.reborrow().init_public_key(),
|
||||||
|
)?;
|
||||||
|
builder.set_hop_count(private_route.hop_count);
|
||||||
|
let mut h_builder = builder.reborrow().init_hops();
|
||||||
|
match &private_route.hops {
|
||||||
|
PrivateRouteHops::FirstHop(first_hop) => {
|
||||||
|
let mut rh_builder = h_builder.init_first_hop();
|
||||||
|
encode_route_hop(first_hop, &mut rh_builder)?;
|
||||||
|
}
|
||||||
|
PrivateRouteHops::Data(data) => {
|
||||||
|
let mut rhd_builder = h_builder.init_data();
|
||||||
|
encode_route_hop_data(data, &mut rhd_builder)?;
|
||||||
|
}
|
||||||
|
PrivateRouteHops::Empty => {
|
||||||
|
h_builder.set_empty(());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn decode_private_route(
|
pub fn decode_private_route(
|
||||||
reader: &veilid_capnp::private_route::Reader,
|
reader: &veilid_capnp::private_route::Reader,
|
||||||
) -> Result<PrivateRoute, RPCError> {
|
) -> Result<PrivateRoute, RPCError> {
|
||||||
let public_key = decode_public_key(&reader.get_public_key().map_err(
|
let public_key = decode_dht_key(&reader.get_public_key().map_err(RPCError::map_protocol(
|
||||||
RPCError::map_protocol("invalid public key in private route"),
|
"invalid public key in private route",
|
||||||
)?);
|
))?);
|
||||||
let hop_count = reader.get_hop_count();
|
let hop_count = reader.get_hop_count();
|
||||||
let hops = if reader.has_first_hop() {
|
|
||||||
let rh_reader = reader
|
let hops = match reader.get_hops().which().map_err(RPCError::protocol)? {
|
||||||
.get_first_hop()
|
veilid_capnp::private_route::hops::Which::FirstHop(rh_reader) => {
|
||||||
.map_err(RPCError::map_protocol("invalid first hop in private route"))?;
|
let rh_reader = rh_reader.map_err(RPCError::protocol)?;
|
||||||
Some(decode_route_hop(&rh_reader)?)
|
PrivateRouteHops::FirstHop(decode_route_hop(&rh_reader)?)
|
||||||
} else {
|
}
|
||||||
None
|
veilid_capnp::private_route::hops::Which::Data(rhd_reader) => {
|
||||||
|
let rhd_reader = rhd_reader.map_err(RPCError::protocol)?;
|
||||||
|
PrivateRouteHops::Data(decode_route_hop_data(&rhd_reader)?)
|
||||||
|
}
|
||||||
|
veilid_capnp::private_route::hops::Which::Empty(_) => PrivateRouteHops::Empty,
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(PrivateRoute {
|
Ok(PrivateRoute {
|
||||||
@ -219,10 +150,36 @@ pub fn decode_private_route(
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
pub fn encode_safety_route(
|
||||||
|
safety_route: &SafetyRoute,
|
||||||
|
builder: &mut veilid_capnp::safety_route::Builder,
|
||||||
|
) -> Result<(), RPCError> {
|
||||||
|
encode_dht_key(
|
||||||
|
&safety_route.public_key,
|
||||||
|
&mut builder.reborrow().init_public_key(),
|
||||||
|
)?;
|
||||||
|
builder.set_hop_count(safety_route.hop_count);
|
||||||
|
let h_builder = builder.reborrow().init_hops();
|
||||||
|
match &safety_route.hops {
|
||||||
|
SafetyRouteHops::Data(rhd) => {
|
||||||
|
let mut rhd_builder = h_builder.init_data();
|
||||||
|
encode_route_hop_data(rhd, &mut rhd_builder)?;
|
||||||
|
}
|
||||||
|
SafetyRouteHops::Private(pr) => {
|
||||||
|
let mut pr_builder = h_builder.init_private();
|
||||||
|
encode_private_route(pr, &mut pr_builder)?;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
pub fn decode_safety_route(
|
pub fn decode_safety_route(
|
||||||
reader: &veilid_capnp::safety_route::Reader,
|
reader: &veilid_capnp::safety_route::Reader,
|
||||||
) -> Result<SafetyRoute, RPCError> {
|
) -> Result<SafetyRoute, RPCError> {
|
||||||
let public_key = decode_public_key(
|
let public_key = decode_dht_key(
|
||||||
&reader
|
&reader
|
||||||
.get_public_key()
|
.get_public_key()
|
||||||
.map_err(RPCError::map_protocol("invalid public key in safety route"))?,
|
.map_err(RPCError::map_protocol("invalid public key in safety route"))?,
|
||||||
|
@ -5,30 +5,21 @@ pub fn encode_sender_info(
|
|||||||
sender_info: &SenderInfo,
|
sender_info: &SenderInfo,
|
||||||
builder: &mut veilid_capnp::sender_info::Builder,
|
builder: &mut veilid_capnp::sender_info::Builder,
|
||||||
) -> Result<(), RPCError> {
|
) -> Result<(), RPCError> {
|
||||||
if let Some(socket_address) = &sender_info.socket_address {
|
let mut sab = builder.reborrow().init_socket_address();
|
||||||
let mut sab = builder.reborrow().init_socket_address();
|
encode_socket_address(&sender_info.socket_address, &mut sab)?;
|
||||||
encode_socket_address(socket_address, &mut sab)?;
|
|
||||||
}
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn decode_sender_info(
|
pub fn decode_sender_info(
|
||||||
reader: &veilid_capnp::sender_info::Reader,
|
reader: &veilid_capnp::sender_info::Reader,
|
||||||
) -> Result<SenderInfo, RPCError> {
|
) -> Result<SenderInfo, RPCError> {
|
||||||
if !reader.has_socket_address() {
|
let sa_reader = reader
|
||||||
return Err(RPCError::internal("invalid socket address type"));
|
.reborrow()
|
||||||
}
|
.get_socket_address()
|
||||||
let socket_address = if reader.has_socket_address() {
|
.map_err(RPCError::map_internal(
|
||||||
Some(decode_socket_address(
|
"invalid socket address in sender_info",
|
||||||
&reader
|
))?;
|
||||||
.reborrow()
|
let socket_address = decode_socket_address(&sa_reader)?;
|
||||||
.get_socket_address()
|
|
||||||
.map_err(RPCError::map_internal(
|
|
||||||
"invalid socket address in sender_info",
|
|
||||||
))?,
|
|
||||||
)?)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
Ok(SenderInfo { socket_address })
|
Ok(SenderInfo { socket_address })
|
||||||
}
|
}
|
||||||
|
@ -53,7 +53,7 @@ pub fn decode_signal_info(
|
|||||||
let pi_reader = r.get_peer_info().map_err(RPCError::map_protocol(
|
let pi_reader = r.get_peer_info().map_err(RPCError::map_protocol(
|
||||||
"invalid peer info in hole punch signal info",
|
"invalid peer info in hole punch signal info",
|
||||||
))?;
|
))?;
|
||||||
let peer_info = decode_peer_info(&pi_reader, true)?;
|
let peer_info = decode_peer_info(&pi_reader)?;
|
||||||
|
|
||||||
SignalInfo::HolePunch { receipt, peer_info }
|
SignalInfo::HolePunch { receipt, peer_info }
|
||||||
}
|
}
|
||||||
@ -69,7 +69,7 @@ pub fn decode_signal_info(
|
|||||||
let pi_reader = r.get_peer_info().map_err(RPCError::map_protocol(
|
let pi_reader = r.get_peer_info().map_err(RPCError::map_protocol(
|
||||||
"invalid peer info in reverse connect signal info",
|
"invalid peer info in reverse connect signal info",
|
||||||
))?;
|
))?;
|
||||||
let peer_info = decode_peer_info(&pi_reader, true)?;
|
let peer_info = decode_peer_info(&pi_reader)?;
|
||||||
|
|
||||||
SignalInfo::ReverseConnect { receipt, peer_info }
|
SignalInfo::ReverseConnect { receipt, peer_info }
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,46 @@
|
|||||||
|
use crate::*;
|
||||||
|
use rpc_processor::*;
|
||||||
|
|
||||||
|
pub fn encode_signed_direct_node_info(
|
||||||
|
signed_direct_node_info: &SignedDirectNodeInfo,
|
||||||
|
builder: &mut veilid_capnp::signed_direct_node_info::Builder,
|
||||||
|
) -> Result<(), RPCError> {
|
||||||
|
//
|
||||||
|
let mut ni_builder = builder.reborrow().init_node_info();
|
||||||
|
encode_node_info(&signed_direct_node_info.node_info, &mut ni_builder)?;
|
||||||
|
|
||||||
|
builder
|
||||||
|
.reborrow()
|
||||||
|
.set_timestamp(signed_direct_node_info.timestamp);
|
||||||
|
|
||||||
|
let mut sig_builder = builder.reborrow().init_signature();
|
||||||
|
let Some(signature) = &signed_direct_node_info.signature else {
|
||||||
|
return Err(RPCError::internal("Should not encode SignedDirectNodeInfo without signature!"));
|
||||||
|
};
|
||||||
|
encode_signature(signature, &mut sig_builder);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn decode_signed_direct_node_info(
|
||||||
|
reader: &veilid_capnp::signed_direct_node_info::Reader,
|
||||||
|
node_id: &DHTKey,
|
||||||
|
) -> Result<SignedDirectNodeInfo, RPCError> {
|
||||||
|
let ni_reader = reader
|
||||||
|
.reborrow()
|
||||||
|
.get_node_info()
|
||||||
|
.map_err(RPCError::protocol)?;
|
||||||
|
let node_info = decode_node_info(&ni_reader)?;
|
||||||
|
|
||||||
|
let sig_reader = reader
|
||||||
|
.reborrow()
|
||||||
|
.get_signature()
|
||||||
|
.map_err(RPCError::protocol)?;
|
||||||
|
|
||||||
|
let timestamp = reader.reborrow().get_timestamp();
|
||||||
|
|
||||||
|
let signature = decode_signature(&sig_reader);
|
||||||
|
|
||||||
|
SignedDirectNodeInfo::new(NodeId::new(*node_id), node_info, timestamp, signature)
|
||||||
|
.map_err(RPCError::protocol)
|
||||||
|
}
|
@ -5,14 +5,16 @@ pub fn encode_signed_node_info(
|
|||||||
signed_node_info: &SignedNodeInfo,
|
signed_node_info: &SignedNodeInfo,
|
||||||
builder: &mut veilid_capnp::signed_node_info::Builder,
|
builder: &mut veilid_capnp::signed_node_info::Builder,
|
||||||
) -> Result<(), RPCError> {
|
) -> Result<(), RPCError> {
|
||||||
//
|
match signed_node_info {
|
||||||
let mut ni_builder = builder.reborrow().init_node_info();
|
SignedNodeInfo::Direct(d) => {
|
||||||
encode_node_info(&signed_node_info.node_info, &mut ni_builder)?;
|
let mut d_builder = builder.reborrow().init_direct();
|
||||||
|
encode_signed_direct_node_info(d, &mut d_builder)?;
|
||||||
let mut sig_builder = builder.reborrow().init_signature();
|
}
|
||||||
encode_signature(&signed_node_info.signature, &mut sig_builder);
|
SignedNodeInfo::Relayed(r) => {
|
||||||
|
let mut r_builder = builder.reborrow().init_relayed();
|
||||||
builder.reborrow().set_timestamp(signed_node_info.timestamp);
|
encode_signed_relayed_node_info(r, &mut r_builder)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -20,22 +22,20 @@ pub fn encode_signed_node_info(
|
|||||||
pub fn decode_signed_node_info(
|
pub fn decode_signed_node_info(
|
||||||
reader: &veilid_capnp::signed_node_info::Reader,
|
reader: &veilid_capnp::signed_node_info::Reader,
|
||||||
node_id: &DHTKey,
|
node_id: &DHTKey,
|
||||||
allow_relay_peer_info: bool,
|
|
||||||
) -> Result<SignedNodeInfo, RPCError> {
|
) -> Result<SignedNodeInfo, RPCError> {
|
||||||
let ni_reader = reader
|
match reader
|
||||||
.reborrow()
|
.which()
|
||||||
.get_node_info()
|
.map_err(RPCError::map_internal("invalid signal operation"))?
|
||||||
.map_err(RPCError::protocol)?;
|
{
|
||||||
let node_info = decode_node_info(&ni_reader, allow_relay_peer_info)?;
|
veilid_capnp::signed_node_info::Direct(d) => {
|
||||||
|
let d_reader = d.map_err(RPCError::protocol)?;
|
||||||
let sig_reader = reader
|
let sdni = decode_signed_direct_node_info(&d_reader, node_id)?;
|
||||||
.reborrow()
|
Ok(SignedNodeInfo::Direct(sdni))
|
||||||
.get_signature()
|
}
|
||||||
.map_err(RPCError::protocol)?;
|
veilid_capnp::signed_node_info::Relayed(r) => {
|
||||||
let signature = decode_signature(&sig_reader);
|
let r_reader = r.map_err(RPCError::protocol)?;
|
||||||
|
let srni = decode_signed_relayed_node_info(&r_reader, node_id)?;
|
||||||
let timestamp = reader.reborrow().get_timestamp();
|
Ok(SignedNodeInfo::Relayed(srni))
|
||||||
|
}
|
||||||
SignedNodeInfo::new(node_info, NodeId::new(*node_id), signature, timestamp)
|
}
|
||||||
.map_err(RPCError::protocol)
|
|
||||||
}
|
}
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user