mirror of
https://gitlab.com/veilid/veilid.git
synced 2025-03-12 17:06:37 -04:00
Merge branch 'flutter-work' into 'main'
Fixed for attach speed and futures optimizations See merge request veilid/veilid!356
This commit is contained in:
commit
42d8c6a291
16
Cargo.toml
16
Cargo.toml
@ -61,3 +61,19 @@ debug-assertions = false
|
||||
[profile.dev.package.chacha20]
|
||||
opt-level = 3
|
||||
debug-assertions = false
|
||||
|
||||
[workspace.lints.clippy]
|
||||
all = { level = "deny", priority = -1 }
|
||||
must_use_candidate = "deny"
|
||||
large_futures = "deny"
|
||||
large_stack_arrays = "deny"
|
||||
large_stack_frames = "deny"
|
||||
large_types_passed_by_value = "deny"
|
||||
unused_async = "deny"
|
||||
ptr_cast_constness = "deny"
|
||||
comparison_chain = "allow"
|
||||
upper_case_acronyms = "allow"
|
||||
|
||||
[workspace.lints.rust]
|
||||
unused_must_use = "deny"
|
||||
unexpected_cfgs = { level = "warn", check-cfg = ['cfg(tokio_unstable)'] }
|
||||
|
3
clippy.toml
Normal file
3
clippy.toml
Normal file
@ -0,0 +1,3 @@
|
||||
future-size-threshold = 8192
|
||||
array-size-threshold = 8192
|
||||
stack-size-threshold = 128000
|
@ -77,3 +77,6 @@ console = "0.15.8"
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "^2"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
@ -51,7 +51,7 @@ impl ClientApiConnection {
|
||||
inner.reply_channels.clear();
|
||||
}
|
||||
|
||||
async fn process_veilid_state<'a>(&self, state: &json::JsonValue) {
|
||||
fn process_veilid_state(&self, state: &json::JsonValue) {
|
||||
let comproc = self.inner.lock().comproc.clone();
|
||||
comproc.update_attachment(&state["attachment"]);
|
||||
comproc.update_network_status(&state["network"]);
|
||||
@ -77,7 +77,7 @@ impl ClientApiConnection {
|
||||
}
|
||||
}
|
||||
|
||||
async fn process_veilid_update(&self, update: json::JsonValue) {
|
||||
fn process_veilid_update(&self, update: json::JsonValue) {
|
||||
let comproc = self.inner.lock().comproc.clone();
|
||||
let Some(kind) = update["kind"].as_str() else {
|
||||
comproc.log_message(Level::Error, &format!("missing update kind: {}", update));
|
||||
@ -164,7 +164,7 @@ impl ClientApiConnection {
|
||||
};
|
||||
|
||||
if j["type"] == "Update" {
|
||||
this.process_veilid_update(j).await;
|
||||
this.process_veilid_update(j);
|
||||
} else if j["type"] == "Response" {
|
||||
this.process_response(j).await;
|
||||
}
|
||||
@ -198,7 +198,7 @@ impl ClientApiConnection {
|
||||
error!("failed to get state: {}", resp["error"]);
|
||||
return;
|
||||
}
|
||||
capi.process_veilid_state(&resp["value"]).await;
|
||||
capi.process_veilid_state(&resp["value"]);
|
||||
});
|
||||
|
||||
// Send and receive until we're done or a stop is requested
|
||||
@ -420,7 +420,7 @@ impl ClientApiConnection {
|
||||
}
|
||||
|
||||
// End Client API connection
|
||||
pub async fn disconnect(&self) {
|
||||
pub fn disconnect(&self) {
|
||||
trace!("ClientApiConnection::disconnect");
|
||||
let mut inner = self.inner.lock();
|
||||
if inner.disconnector.is_some() {
|
||||
|
@ -182,7 +182,7 @@ Core Debug Commands:
|
||||
let capi = self.capi();
|
||||
let ui = self.ui_sender();
|
||||
spawn_detached_local("cmd disconnect", async move {
|
||||
capi.disconnect().await;
|
||||
capi.disconnect();
|
||||
ui.send_callback(callback);
|
||||
});
|
||||
Ok(())
|
||||
@ -195,7 +195,7 @@ Core Debug Commands:
|
||||
|
||||
let this = self.clone();
|
||||
spawn_detached_local("cmd connect", async move {
|
||||
capi.disconnect().await;
|
||||
capi.disconnect();
|
||||
|
||||
if let Some(rest) = rest {
|
||||
if let Ok(subnode_index) = u16::from_str(&rest) {
|
||||
@ -690,7 +690,7 @@ Core Debug Commands:
|
||||
////////////////////////////////////////////
|
||||
pub fn start_connection(&self) {
|
||||
self.inner_mut().reconnect = true;
|
||||
self.inner_mut().connection_waker.resolve();
|
||||
drop(self.inner_mut().connection_waker.resolve());
|
||||
}
|
||||
// pub fn stop_connection(&self) {
|
||||
// self.inner_mut().reconnect = false;
|
||||
@ -701,12 +701,12 @@ Core Debug Commands:
|
||||
// }
|
||||
pub fn cancel_reconnect(&self) {
|
||||
self.inner_mut().reconnect = false;
|
||||
self.inner_mut().connection_waker.resolve();
|
||||
drop(self.inner_mut().connection_waker.resolve());
|
||||
}
|
||||
pub fn quit(&self) {
|
||||
self.inner_mut().finished = true;
|
||||
self.inner_mut().reconnect = false;
|
||||
self.inner_mut().connection_waker.resolve();
|
||||
drop(self.inner_mut().connection_waker.resolve());
|
||||
}
|
||||
|
||||
// called by ui
|
||||
|
@ -1,6 +1,3 @@
|
||||
#![deny(clippy::all)]
|
||||
#![allow(clippy::comparison_chain, clippy::upper_case_acronyms)]
|
||||
#![deny(unused_must_use)]
|
||||
#![recursion_limit = "256"]
|
||||
|
||||
use crate::{tools::*, ui::*};
|
||||
@ -290,7 +287,7 @@ fn main() -> Result<(), String> {
|
||||
|
||||
// When UI quits, close connection and command processor cleanly
|
||||
comproc2.quit();
|
||||
capi.disconnect().await;
|
||||
capi.disconnect();
|
||||
};
|
||||
|
||||
cfg_if! {
|
||||
|
@ -286,3 +286,6 @@ reqwest = { version = "0.11", features = ["blocking"], optional = true }
|
||||
|
||||
[package.metadata.wasm-pack.profile.release]
|
||||
wasm-opt = ["-O", "--enable-mutable-globals"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
@ -16,10 +16,10 @@ pub trait VeilidComponent:
|
||||
AsAnyArcSendSync + VeilidComponentRegistryAccessor + core::fmt::Debug
|
||||
{
|
||||
fn name(&self) -> &'static str;
|
||||
fn init(&self) -> SendPinBoxFutureLifetime<'_, EyreResult<()>>;
|
||||
fn post_init(&self) -> SendPinBoxFutureLifetime<'_, EyreResult<()>>;
|
||||
fn pre_terminate(&self) -> SendPinBoxFutureLifetime<'_, ()>;
|
||||
fn terminate(&self) -> SendPinBoxFutureLifetime<'_, ()>;
|
||||
fn init(&self) -> PinBoxFuture<'_, EyreResult<()>>;
|
||||
fn post_init(&self) -> PinBoxFuture<'_, EyreResult<()>>;
|
||||
fn pre_terminate(&self) -> PinBoxFuture<'_, ()>;
|
||||
fn terminate(&self) -> PinBoxFuture<'_, ()>;
|
||||
}
|
||||
|
||||
pub trait VeilidComponentRegistryAccessor {
|
||||
@ -171,7 +171,7 @@ impl VeilidComponentRegistry {
|
||||
}
|
||||
|
||||
// Event bus starts up early
|
||||
self.event_bus.startup().await?;
|
||||
self.event_bus.startup()?;
|
||||
|
||||
// Process components in initialization order
|
||||
let init_order = self.get_init_order();
|
||||
@ -320,19 +320,19 @@ macro_rules! impl_veilid_component {
|
||||
stringify!($component_name)
|
||||
}
|
||||
|
||||
fn init(&self) -> SendPinBoxFutureLifetime<'_, EyreResult<()>> {
|
||||
fn init(&self) -> PinBoxFuture<'_, EyreResult<()>> {
|
||||
Box::pin(async { self.init_async().await })
|
||||
}
|
||||
|
||||
fn post_init(&self) -> SendPinBoxFutureLifetime<'_, EyreResult<()>> {
|
||||
fn post_init(&self) -> PinBoxFuture<'_, EyreResult<()>> {
|
||||
Box::pin(async { self.post_init_async().await })
|
||||
}
|
||||
|
||||
fn pre_terminate(&self) -> SendPinBoxFutureLifetime<'_, ()> {
|
||||
fn pre_terminate(&self) -> PinBoxFuture<'_, ()> {
|
||||
Box::pin(async { self.pre_terminate_async().await })
|
||||
}
|
||||
|
||||
fn terminate(&self) -> SendPinBoxFutureLifetime<'_, ()> {
|
||||
fn terminate(&self) -> PinBoxFuture<'_, ()> {
|
||||
Box::pin(async { self.terminate_async().await })
|
||||
}
|
||||
}
|
||||
|
@ -45,6 +45,7 @@ pub struct Envelope {
|
||||
}
|
||||
|
||||
impl Envelope {
|
||||
#[must_use]
|
||||
pub fn new(
|
||||
version: EnvelopeVersion,
|
||||
crypto_kind: CryptoKind,
|
||||
@ -314,6 +315,7 @@ impl Envelope {
|
||||
Ok(data)
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn get_version(&self) -> u8 {
|
||||
self.version
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
use super::*;
|
||||
|
||||
/// Guard to access a particular cryptosystem
|
||||
#[must_use]
|
||||
pub struct CryptoSystemGuard<'a> {
|
||||
crypto_system: Arc<dyn CryptoSystem + Send + Sync>,
|
||||
_phantom: core::marker::PhantomData<&'a (dyn CryptoSystem + Send + Sync)>,
|
||||
@ -27,6 +28,7 @@ impl<'a> core::ops::Deref for CryptoSystemGuard<'a> {
|
||||
}
|
||||
|
||||
/// Async cryptosystem guard to help break up heavy blocking operations
|
||||
#[must_use]
|
||||
pub struct AsyncCryptoSystemGuard<'a> {
|
||||
guard: CryptoSystemGuard<'a>,
|
||||
}
|
||||
@ -42,6 +44,7 @@ impl<'a> AsyncCryptoSystemGuard<'a> {
|
||||
pub fn kind(&self) -> CryptoKind {
|
||||
self.guard.kind()
|
||||
}
|
||||
#[must_use]
|
||||
pub fn crypto(&self) -> VeilidComponentGuard<'_, Crypto> {
|
||||
self.guard.crypto()
|
||||
}
|
||||
@ -59,6 +62,7 @@ impl<'a> AsyncCryptoSystemGuard<'a> {
|
||||
pub async fn random_bytes(&self, len: u32) -> Vec<u8> {
|
||||
yielding(|| self.guard.random_bytes(len)).await
|
||||
}
|
||||
#[must_use]
|
||||
pub fn default_salt_length(&self) -> u32 {
|
||||
self.guard.default_salt_length()
|
||||
}
|
||||
@ -160,6 +164,7 @@ impl<'a> AsyncCryptoSystemGuard<'a> {
|
||||
}
|
||||
|
||||
// AEAD Encrypt/Decrypt
|
||||
#[must_use]
|
||||
pub fn aead_overhead(&self) -> usize {
|
||||
self.guard.aead_overhead()
|
||||
}
|
||||
|
@ -67,13 +67,14 @@ pub const VALID_ENVELOPE_VERSIONS: [EnvelopeVersion; 1] = [0u8];
|
||||
/// Number of envelope versions to keep on structures if many are present beyond the ones we consider valid
|
||||
pub const MAX_ENVELOPE_VERSIONS: usize = 3;
|
||||
/// Return the best envelope version we support
|
||||
#[must_use]
|
||||
pub fn best_envelope_version() -> EnvelopeVersion {
|
||||
VALID_ENVELOPE_VERSIONS[0]
|
||||
}
|
||||
|
||||
struct CryptoInner {
|
||||
dh_cache: DHCache,
|
||||
flush_future: Option<SendPinBoxFuture<()>>,
|
||||
flush_future: Option<PinBoxFutureStatic<()>>,
|
||||
}
|
||||
|
||||
impl fmt::Debug for CryptoInner {
|
||||
@ -88,6 +89,7 @@ impl fmt::Debug for CryptoInner {
|
||||
}
|
||||
|
||||
/// Crypto factory implementation
|
||||
#[must_use]
|
||||
pub struct Crypto {
|
||||
registry: VeilidComponentRegistry,
|
||||
inner: Mutex<CryptoInner>,
|
||||
@ -221,6 +223,7 @@ impl Crypto {
|
||||
};
|
||||
}
|
||||
|
||||
#[expect(clippy::unused_async)]
|
||||
async fn terminate_async(&self) {
|
||||
// Nothing to terminate at this time
|
||||
}
|
||||
|
@ -196,6 +196,7 @@ impl Receipt {
|
||||
Ok(data)
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn get_version(&self) -> u8 {
|
||||
self.version
|
||||
}
|
||||
@ -216,6 +217,7 @@ impl Receipt {
|
||||
TypedKey::new(self.crypto_kind, self.sender_id)
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn get_extra_data(&self) -> &[u8] {
|
||||
&self.extra_data
|
||||
}
|
||||
|
@ -221,11 +221,11 @@ pub async fn test_generation(vcrypto: &AsyncCryptoSystemGuard<'_>) {
|
||||
let pstr5 = vcrypto.hash_password(b"abc124", b"qwerasdg").await.unwrap();
|
||||
assert_ne!(pstr3, pstr5);
|
||||
|
||||
vcrypto
|
||||
let _ = vcrypto
|
||||
.hash_password(b"abc123", b"qwe")
|
||||
.await
|
||||
.expect_err("should reject short salt");
|
||||
vcrypto
|
||||
let _ = vcrypto
|
||||
.hash_password(
|
||||
b"abc123",
|
||||
b"qwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerz",
|
||||
@ -249,11 +249,11 @@ pub async fn test_generation(vcrypto: &AsyncCryptoSystemGuard<'_>) {
|
||||
let ss5 = vcrypto.derive_shared_secret(b"abc124", b"qwerasdg").await;
|
||||
assert_ne!(ss3, ss5);
|
||||
|
||||
vcrypto
|
||||
let _ = vcrypto
|
||||
.derive_shared_secret(b"abc123", b"qwe")
|
||||
.await
|
||||
.expect_err("should reject short salt");
|
||||
vcrypto
|
||||
let _ = vcrypto
|
||||
.derive_shared_secret(
|
||||
b"abc123",
|
||||
b"qwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerqwerz",
|
||||
|
@ -91,7 +91,7 @@ pub async fn test_receipt_round_trip(
|
||||
|
||||
// Should not validate even when a single bit is changed
|
||||
enc_data[5] = 0x01;
|
||||
Receipt::from_signed_data(&crypto, &enc_data)
|
||||
let _ = Receipt::from_signed_data(&crypto, &enc_data)
|
||||
.expect_err("should have failed to decrypt using wrong secret");
|
||||
|
||||
// Compare receipts
|
||||
|
@ -253,7 +253,7 @@ pub async fn test_encode_decode(vcrypto: &AsyncCryptoSystemGuard<'_>) {
|
||||
assert!(f2.is_err());
|
||||
}
|
||||
|
||||
pub async fn test_typed_convert(vcrypto: &AsyncCryptoSystemGuard<'_>) {
|
||||
pub fn test_typed_convert(vcrypto: &AsyncCryptoSystemGuard<'_>) {
|
||||
let tks1 = format!(
|
||||
"{}:7lxDEabK_qgjbe38RtBa3IZLrud84P6NhGP-pRTZzdQ",
|
||||
vcrypto.kind()
|
||||
@ -381,7 +381,7 @@ async fn test_operations(vcrypto: &AsyncCryptoSystemGuard<'_>) {
|
||||
assert_eq!(d4.first_nonzero_bit(), Some(0));
|
||||
}
|
||||
|
||||
pub async fn test_crypto_key_ordering() {
|
||||
pub fn test_crypto_key_ordering() {
|
||||
let k1 = CryptoKey::new([
|
||||
128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0,
|
||||
@ -413,7 +413,7 @@ pub async fn test_all() {
|
||||
let api = crypto_tests_startup().await;
|
||||
let crypto = api.crypto().unwrap();
|
||||
|
||||
test_crypto_key_ordering().await;
|
||||
test_crypto_key_ordering();
|
||||
|
||||
// Test versions
|
||||
for v in VALID_CRYPTO_KINDS {
|
||||
@ -423,7 +423,7 @@ pub async fn test_all() {
|
||||
test_sign_and_verify(&vcrypto).await;
|
||||
test_key_conversions(&vcrypto).await;
|
||||
test_encode_decode(&vcrypto).await;
|
||||
test_typed_convert(&vcrypto).await;
|
||||
test_typed_convert(&vcrypto);
|
||||
test_hash(&vcrypto).await;
|
||||
test_operations(&vcrypto).await;
|
||||
}
|
||||
|
@ -83,6 +83,7 @@ macro_rules! byte_array_type {
|
||||
derive(Tsify),
|
||||
tsify(into_wasm_abi)
|
||||
)]
|
||||
#[must_use]
|
||||
pub struct $name {
|
||||
pub bytes: [u8; $size],
|
||||
}
|
||||
@ -124,6 +125,7 @@ macro_rules! byte_array_type {
|
||||
}
|
||||
|
||||
// Big endian bit ordering
|
||||
#[must_use]
|
||||
pub fn bit(&self, index: usize) -> bool {
|
||||
assert!(index < ($size * 8));
|
||||
let bi = index / 8;
|
||||
@ -131,6 +133,7 @@ macro_rules! byte_array_type {
|
||||
((self.bytes[bi] >> ti) & 1) != 0
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn first_nonzero_bit(&self) -> Option<usize> {
|
||||
for i in 0..$size {
|
||||
let b = self.bytes[i];
|
||||
@ -147,6 +150,7 @@ macro_rules! byte_array_type {
|
||||
}
|
||||
|
||||
// Big endian nibble ordering
|
||||
#[must_use]
|
||||
pub fn nibble(&self, index: usize) -> u8 {
|
||||
assert!(index < ($size * 2));
|
||||
let bi = index / 2;
|
||||
@ -157,6 +161,7 @@ macro_rules! byte_array_type {
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn first_nonzero_nibble(&self) -> Option<(usize, u8)> {
|
||||
for i in 0..($size * 2) {
|
||||
let n = self.nibble(i);
|
||||
|
@ -1,6 +1,7 @@
|
||||
use super::*;
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
|
||||
#[must_use]
|
||||
pub struct CryptoTyped<K>
|
||||
where
|
||||
K: Clone
|
||||
|
@ -33,9 +33,11 @@ where
|
||||
+ Hash
|
||||
+ Encodable,
|
||||
{
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self { items: Vec::new() }
|
||||
}
|
||||
#[must_use]
|
||||
pub fn with_capacity(cap: usize) -> Self {
|
||||
Self {
|
||||
items: Vec::with_capacity(cap),
|
||||
@ -49,6 +51,7 @@ where
|
||||
out.sort_by(compare_crypto_kind);
|
||||
out
|
||||
}
|
||||
#[must_use]
|
||||
pub fn keys(&self) -> Vec<K> {
|
||||
let mut out = Vec::new();
|
||||
for tk in &self.items {
|
||||
@ -56,6 +59,7 @@ where
|
||||
}
|
||||
out
|
||||
}
|
||||
#[must_use]
|
||||
pub fn get(&self, kind: CryptoKind) -> Option<CryptoTyped<K>> {
|
||||
self.items.iter().find(|x| x.kind == kind).copied()
|
||||
}
|
||||
@ -93,15 +97,18 @@ where
|
||||
}
|
||||
}
|
||||
/// Return preferred typed key of our supported crypto kinds
|
||||
#[must_use]
|
||||
pub fn best(&self) -> Option<CryptoTyped<K>> {
|
||||
self.items
|
||||
.first()
|
||||
.copied()
|
||||
.filter(|k| VALID_CRYPTO_KINDS.contains(&k.kind))
|
||||
}
|
||||
#[must_use]
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.items.is_empty()
|
||||
}
|
||||
#[must_use]
|
||||
pub fn len(&self) -> usize {
|
||||
self.items.len()
|
||||
}
|
||||
|
@ -6,6 +6,7 @@ use super::*;
|
||||
derive(Tsify),
|
||||
tsify(from_wasm_abi, into_wasm_abi)
|
||||
)]
|
||||
#[must_use]
|
||||
pub struct KeyPair {
|
||||
pub key: PublicKey,
|
||||
pub secret: SecretKey,
|
||||
|
@ -11,6 +11,7 @@ pub type CryptoKind = FourCC;
|
||||
|
||||
/// Sort best crypto kinds first
|
||||
/// Better crypto kinds are 'less', ordered toward the front of a list
|
||||
#[must_use]
|
||||
pub fn compare_crypto_kind(a: &CryptoKind, b: &CryptoKind) -> cmp::Ordering {
|
||||
let a_idx = VALID_CRYPTO_KINDS.iter().position(|k| k == a);
|
||||
let b_idx = VALID_CRYPTO_KINDS.iter().position(|k| k == b);
|
||||
@ -32,6 +33,7 @@ pub fn compare_crypto_kind(a: &CryptoKind, b: &CryptoKind) -> cmp::Ordering {
|
||||
}
|
||||
|
||||
/// Intersection of crypto kind vectors
|
||||
#[must_use]
|
||||
pub fn common_crypto_kinds(a: &[CryptoKind], b: &[CryptoKind]) -> Vec<CryptoKind> {
|
||||
let mut out = Vec::new();
|
||||
for ack in a {
|
||||
|
@ -52,6 +52,7 @@ pub struct CryptoSystemVLD0 {
|
||||
}
|
||||
|
||||
impl CryptoSystemVLD0 {
|
||||
#[must_use]
|
||||
pub fn new(registry: VeilidComponentRegistry) -> Self {
|
||||
Self { registry }
|
||||
}
|
||||
|
@ -13,6 +13,7 @@ impl fmt::Debug for BlockStoreInner {
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
#[must_use]
|
||||
pub struct BlockStore {
|
||||
registry: VeilidComponentRegistry,
|
||||
inner: Mutex<BlockStoreInner>,
|
||||
@ -31,12 +32,19 @@ impl BlockStore {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
async fn init_async(&self) -> EyreResult<()> {
|
||||
// Ensure permissions are correct
|
||||
// ensure_file_private_owner(&dbpath)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self), err)]
|
||||
async fn post_init_async(&self) -> EyreResult<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
async fn pre_terminate_async(&self) {}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
async fn terminate_async(&self) {}
|
||||
}
|
||||
|
@ -15,6 +15,7 @@ impl fmt::Debug for ProtectedStoreInner {
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
#[must_use]
|
||||
pub struct ProtectedStore {
|
||||
registry: VeilidComponentRegistry,
|
||||
inner: Mutex<ProtectedStoreInner>,
|
||||
@ -37,9 +38,9 @@ impl ProtectedStore {
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self), err)]
|
||||
pub async fn delete_all(&self) -> EyreResult<()> {
|
||||
pub fn delete_all(&self) -> EyreResult<()> {
|
||||
for kpsk in &KNOWN_PROTECTED_STORE_KEYS {
|
||||
if let Err(e) = self.remove_user_secret(kpsk).await {
|
||||
if let Err(e) = self.remove_user_secret(kpsk) {
|
||||
error!("failed to delete '{}': {}", kpsk, e);
|
||||
} else {
|
||||
veilid_log!(self debug "deleted table '{}'", kpsk);
|
||||
@ -103,7 +104,7 @@ impl ProtectedStore {
|
||||
};
|
||||
|
||||
if delete {
|
||||
self.delete_all().await?;
|
||||
self.delete_all()?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -133,7 +134,7 @@ impl ProtectedStore {
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self, value), ret, err)]
|
||||
pub async fn save_user_secret_string<K: AsRef<str> + fmt::Debug, V: AsRef<str> + fmt::Debug>(
|
||||
pub fn save_user_secret_string<K: AsRef<str> + fmt::Debug, V: AsRef<str> + fmt::Debug>(
|
||||
&self,
|
||||
key: K,
|
||||
value: V,
|
||||
@ -152,7 +153,7 @@ impl ProtectedStore {
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self), err)]
|
||||
pub async fn load_user_secret_string<K: AsRef<str> + fmt::Debug>(
|
||||
pub fn load_user_secret_string<K: AsRef<str> + fmt::Debug>(
|
||||
&self,
|
||||
key: K,
|
||||
) -> EyreResult<Option<String>> {
|
||||
@ -170,22 +171,22 @@ impl ProtectedStore {
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self, value))]
|
||||
pub async fn save_user_secret_json<K, T>(&self, key: K, value: &T) -> EyreResult<bool>
|
||||
pub fn save_user_secret_json<K, T>(&self, key: K, value: &T) -> EyreResult<bool>
|
||||
where
|
||||
K: AsRef<str> + fmt::Debug,
|
||||
T: serde::Serialize,
|
||||
{
|
||||
let v = serde_json::to_vec(value)?;
|
||||
self.save_user_secret(&key, &v).await
|
||||
self.save_user_secret(&key, &v)
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self))]
|
||||
pub async fn load_user_secret_json<K, T>(&self, key: K) -> EyreResult<Option<T>>
|
||||
pub fn load_user_secret_json<K, T>(&self, key: K) -> EyreResult<Option<T>>
|
||||
where
|
||||
K: AsRef<str> + fmt::Debug,
|
||||
T: for<'de> serde::de::Deserialize<'de>,
|
||||
{
|
||||
let out = self.load_user_secret(key).await?;
|
||||
let out = self.load_user_secret(key)?;
|
||||
let b = match out {
|
||||
Some(v) => v,
|
||||
None => {
|
||||
@ -198,7 +199,7 @@ impl ProtectedStore {
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self, value), ret, err)]
|
||||
pub async fn save_user_secret<K: AsRef<str> + fmt::Debug>(
|
||||
pub fn save_user_secret<K: AsRef<str> + fmt::Debug>(
|
||||
&self,
|
||||
key: K,
|
||||
value: &[u8],
|
||||
@ -206,15 +207,15 @@ impl ProtectedStore {
|
||||
let mut s = BASE64URL_NOPAD.encode(value);
|
||||
s.push('!');
|
||||
|
||||
self.save_user_secret_string(key, s.as_str()).await
|
||||
self.save_user_secret_string(key, s.as_str())
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self), err)]
|
||||
pub async fn load_user_secret<K: AsRef<str> + fmt::Debug>(
|
||||
pub fn load_user_secret<K: AsRef<str> + fmt::Debug>(
|
||||
&self,
|
||||
key: K,
|
||||
) -> EyreResult<Option<Vec<u8>>> {
|
||||
let mut s = match self.load_user_secret_string(key).await? {
|
||||
let mut s = match self.load_user_secret_string(key)? {
|
||||
Some(s) => s,
|
||||
None => {
|
||||
return Ok(None);
|
||||
@ -244,7 +245,7 @@ impl ProtectedStore {
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self), ret, err)]
|
||||
pub async fn remove_user_secret<K: AsRef<str> + fmt::Debug>(&self, key: K) -> EyreResult<bool> {
|
||||
pub fn remove_user_secret<K: AsRef<str> + fmt::Debug>(&self, key: K) -> EyreResult<bool> {
|
||||
let inner = self.inner.lock();
|
||||
match inner
|
||||
.keyring_manager
|
||||
|
@ -1,5 +1,6 @@
|
||||
use super::*;
|
||||
|
||||
#[expect(clippy::unused_async)]
|
||||
pub async fn get_outbound_relay_peer(
|
||||
_routing_domain: routing_table::RoutingDomain,
|
||||
) -> Option<Arc<routing_table::PeerInfo>> {
|
||||
@ -22,6 +23,7 @@ cfg_if! {
|
||||
} else if #[cfg(feature="rt-tokio")] {
|
||||
use hickory_resolver::{config, TokioAsyncResolver as AsyncResolver, system_conf::read_system_conf};
|
||||
|
||||
#[expect(clippy::unused_async)]
|
||||
async fn resolver(
|
||||
config: config::ResolverConfig,
|
||||
options: config::ResolverOpts,
|
||||
@ -48,7 +50,7 @@ cfg_if! {
|
||||
cfg_if! {
|
||||
if #[cfg(not(target_os = "windows"))] {
|
||||
|
||||
async fn with_resolvers<R, F: FnOnce(Arc<Resolvers>) -> SendPinBoxFuture<R>>(closure: F) -> R {
|
||||
async fn with_resolvers<R, F: FnOnce(Arc<Resolvers>) -> PinBoxFutureStatic<R>>(closure: F) -> R {
|
||||
let mut resolvers_lock = RESOLVERS.lock().await;
|
||||
if let Some(r) = &*resolvers_lock {
|
||||
return closure(r.clone()).await;
|
||||
@ -232,6 +234,7 @@ pub async fn ptr_lookup(ip_addr: IpAddr) -> EyreResult<String> {
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn env_variable_is_defined<S: AsRef<str>>(s: S) -> bool {
|
||||
match std::env::var(s.as_ref()) {
|
||||
Ok(v) => !v.is_empty(),
|
||||
|
@ -13,6 +13,7 @@ impl fmt::Debug for BlockStoreInner {
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
#[must_use]
|
||||
pub struct BlockStore {
|
||||
registry: VeilidComponentRegistry,
|
||||
inner: Mutex<BlockStoreInner>,
|
||||
@ -31,9 +32,19 @@ impl BlockStore {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
async fn init_async(&self) -> EyreResult<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self), err)]
|
||||
async fn post_init_async(&self) -> EyreResult<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
async fn pre_terminate_async(&self) {}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
async fn terminate_async(&self) {}
|
||||
}
|
||||
|
@ -6,6 +6,7 @@ use web_sys::*;
|
||||
impl_veilid_log_facility!("pstore");
|
||||
|
||||
#[derive(Debug)]
|
||||
#[must_use]
|
||||
pub struct ProtectedStore {
|
||||
registry: VeilidComponentRegistry,
|
||||
}
|
||||
@ -18,9 +19,9 @@ impl ProtectedStore {
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self), err)]
|
||||
pub async fn delete_all(&self) -> EyreResult<()> {
|
||||
pub fn delete_all(&self) -> EyreResult<()> {
|
||||
for kpsk in &KNOWN_PROTECTED_STORE_KEYS {
|
||||
if let Err(e) = self.remove_user_secret(kpsk).await {
|
||||
if let Err(e) = self.remove_user_secret(kpsk) {
|
||||
error!("failed to delete '{}': {}", kpsk, e);
|
||||
} else {
|
||||
veilid_log!(self debug "deleted table '{}'", kpsk);
|
||||
@ -30,20 +31,20 @@ impl ProtectedStore {
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self), err)]
|
||||
pub(crate) async fn init_async(&self) -> EyreResult<()> {
|
||||
async fn init_async(&self) -> EyreResult<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self), err)]
|
||||
pub(crate) async fn post_init_async(&self) -> EyreResult<()> {
|
||||
async fn post_init_async(&self) -> EyreResult<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub(crate) async fn pre_terminate_async(&self) {}
|
||||
async fn pre_terminate_async(&self) {}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub(crate) async fn terminate_async(&self) {}
|
||||
async fn terminate_async(&self) {}
|
||||
|
||||
fn browser_key_name(&self, key: &str) -> String {
|
||||
let config = self.config();
|
||||
@ -55,8 +56,8 @@ impl ProtectedStore {
|
||||
}
|
||||
}
|
||||
|
||||
//#[instrument(level = "trace", skip(self, value), ret, err)]
|
||||
pub async fn save_user_secret_string<K: AsRef<str> + fmt::Debug, V: AsRef<str> + fmt::Debug>(
|
||||
#[instrument(level = "trace", skip(self, key, value))]
|
||||
pub fn save_user_secret_string<K: AsRef<str> + fmt::Debug, V: AsRef<str> + fmt::Debug>(
|
||||
&self,
|
||||
key: K,
|
||||
value: V,
|
||||
@ -98,8 +99,8 @@ impl ProtectedStore {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self), err)]
|
||||
pub async fn load_user_secret_string<K: AsRef<str> + fmt::Debug>(
|
||||
#[instrument(level = "trace", skip(self, key))]
|
||||
pub fn load_user_secret_string<K: AsRef<str> + fmt::Debug>(
|
||||
&self,
|
||||
key: K,
|
||||
) -> EyreResult<Option<String>> {
|
||||
@ -133,22 +134,22 @@ impl ProtectedStore {
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self, value))]
|
||||
pub async fn save_user_secret_json<K, T>(&self, key: K, value: &T) -> EyreResult<bool>
|
||||
pub fn save_user_secret_json<K, T>(&self, key: K, value: &T) -> EyreResult<bool>
|
||||
where
|
||||
K: AsRef<str> + fmt::Debug,
|
||||
T: serde::Serialize,
|
||||
{
|
||||
let v = serde_json::to_vec(value)?;
|
||||
self.save_user_secret(key, &v).await
|
||||
self.save_user_secret(key, &v)
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self))]
|
||||
pub async fn load_user_secret_json<K, T>(&self, key: K) -> EyreResult<Option<T>>
|
||||
pub fn load_user_secret_json<K, T>(&self, key: K) -> EyreResult<Option<T>>
|
||||
where
|
||||
K: AsRef<str> + fmt::Debug,
|
||||
T: for<'de> serde::de::Deserialize<'de>,
|
||||
{
|
||||
let out = self.load_user_secret(key).await?;
|
||||
let out = self.load_user_secret(key)?;
|
||||
let b = match out {
|
||||
Some(v) => v,
|
||||
None => {
|
||||
@ -161,7 +162,7 @@ impl ProtectedStore {
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self, value), ret, err)]
|
||||
pub async fn save_user_secret<K: AsRef<str> + fmt::Debug>(
|
||||
pub fn save_user_secret<K: AsRef<str> + fmt::Debug>(
|
||||
&self,
|
||||
key: K,
|
||||
value: &[u8],
|
||||
@ -169,15 +170,15 @@ impl ProtectedStore {
|
||||
let mut s = BASE64URL_NOPAD.encode(value);
|
||||
s.push('!');
|
||||
|
||||
self.save_user_secret_string(key, s.as_str()).await
|
||||
self.save_user_secret_string(key, s.as_str())
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self), err)]
|
||||
pub async fn load_user_secret<K: AsRef<str> + fmt::Debug>(
|
||||
pub fn load_user_secret<K: AsRef<str> + fmt::Debug>(
|
||||
&self,
|
||||
key: K,
|
||||
) -> EyreResult<Option<Vec<u8>>> {
|
||||
let mut s = match self.load_user_secret_string(key).await? {
|
||||
let mut s = match self.load_user_secret_string(key)? {
|
||||
Some(s) => s,
|
||||
None => {
|
||||
return Ok(None);
|
||||
@ -207,7 +208,7 @@ impl ProtectedStore {
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self), ret, err)]
|
||||
pub async fn remove_user_secret<K: AsRef<str> + fmt::Debug>(&self, key: K) -> EyreResult<bool> {
|
||||
pub fn remove_user_secret<K: AsRef<str> + fmt::Debug>(&self, key: K) -> EyreResult<bool> {
|
||||
if is_browser() {
|
||||
let win = match window() {
|
||||
Some(w) => w,
|
||||
|
@ -2,6 +2,7 @@ use super::*;
|
||||
|
||||
//use js_sys::*;
|
||||
|
||||
#[expect(clippy::unused_async)]
|
||||
pub async fn get_outbound_relay_peer(
|
||||
_routing_domain: routing_table::RoutingDomain,
|
||||
) -> Option<Arc<routing_table::PeerInfo>> {
|
||||
@ -34,14 +35,17 @@ pub async fn get_outbound_relay_peer(
|
||||
// }
|
||||
// }
|
||||
|
||||
#[expect(clippy::unused_async)]
|
||||
pub async fn txt_lookup<S: AsRef<str>>(_host: S) -> EyreResult<Vec<String>> {
|
||||
bail!("wasm does not support txt lookup")
|
||||
}
|
||||
|
||||
#[expect(clippy::unused_async)]
|
||||
pub async fn ptr_lookup(_ip_addr: IpAddr) -> EyreResult<String> {
|
||||
bail!("wasm does not support ptr lookup")
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn env_variable_is_defined<S: AsRef<str>>(_s: S) -> bool {
|
||||
false
|
||||
}
|
||||
|
@ -22,9 +22,6 @@
|
||||
//! * `default-wasm` - When building for the `wasm32` architecture, use this to enable `wasm-bindgen-futures` as the async runtime.
|
||||
//!
|
||||
|
||||
#![deny(clippy::all)]
|
||||
#![allow(clippy::comparison_chain, clippy::upper_case_acronyms)]
|
||||
#![deny(unused_must_use)]
|
||||
#![recursion_limit = "256"]
|
||||
|
||||
cfg_if::cfg_if! {
|
||||
@ -73,6 +70,16 @@ pub use veilid_tools as tools;
|
||||
|
||||
/// The on-the-wire serialization format for Veilid RPC.
|
||||
pub mod veilid_capnp {
|
||||
#![allow(
|
||||
clippy::all,
|
||||
clippy::must_use_candidate,
|
||||
clippy::large_futures,
|
||||
clippy::large_stack_arrays,
|
||||
clippy::large_stack_frames,
|
||||
clippy::large_types_passed_by_value,
|
||||
clippy::unused_async,
|
||||
clippy::ptr_cast_constness
|
||||
)]
|
||||
include!("../proto/veilid_capnp.rs");
|
||||
}
|
||||
|
||||
@ -80,11 +87,13 @@ pub mod veilid_capnp {
|
||||
pub mod tests;
|
||||
|
||||
/// Return the cargo package version of veilid-core in string format.
|
||||
#[must_use]
|
||||
pub fn veilid_version_string() -> String {
|
||||
env!("CARGO_PKG_VERSION").to_owned()
|
||||
}
|
||||
|
||||
/// Return the cargo package version of veilid-core in tuple format.
|
||||
#[must_use]
|
||||
pub fn veilid_version() -> (u32, u32, u32) {
|
||||
(
|
||||
u32::from_str(env!("CARGO_PKG_VERSION_MAJOR")).unwrap(),
|
||||
|
@ -20,6 +20,7 @@ struct ApiTracingLayerInner {
|
||||
/// with many copies of Veilid running.
|
||||
|
||||
#[derive(Clone)]
|
||||
#[must_use]
|
||||
pub struct ApiTracingLayer {}
|
||||
|
||||
static API_LOGGER_INNER: Mutex<Option<ApiTracingLayerInner>> = Mutex::new(None);
|
||||
|
@ -7,6 +7,7 @@ use tracing_subscriber::{
|
||||
},
|
||||
};
|
||||
|
||||
#[must_use]
|
||||
pub struct FmtStripFields {
|
||||
/// The inner formatter that will be used to format fields
|
||||
fmt: DefaultFields,
|
||||
|
@ -11,6 +11,7 @@ struct VeilidLayerFilterInner {
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
#[must_use]
|
||||
pub struct VeilidLayerFilter {
|
||||
inner: Arc<RwLock<VeilidLayerFilterInner>>,
|
||||
}
|
||||
@ -19,6 +20,7 @@ pub const VEILID_LOG_KEY_FIELD: &str = "__VEILID_LOG_KEY";
|
||||
pub type VeilidLayerLogKeyFilter = Arc<dyn Fn(&str) -> bool + Send + Sync>;
|
||||
|
||||
impl VeilidLayerFilter {
|
||||
#[must_use]
|
||||
pub fn make_veilid_log_key(program_name: &str, namespace: &str) -> &'static str {
|
||||
if namespace.is_empty() {
|
||||
program_name.to_static_str()
|
||||
@ -66,6 +68,7 @@ impl VeilidLayerFilter {
|
||||
VeilidConfigLogLevel::from_tracing_level_filter(inner.max_level)
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn ignore_list(&self) -> Vec<String> {
|
||||
let inner = self.inner.read();
|
||||
inner.ignore_list.clone()
|
||||
@ -113,6 +116,7 @@ impl VeilidLayerFilter {
|
||||
true
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn apply_ignore_change(ignore_list: &[String], target_change: String) -> Vec<String> {
|
||||
let mut ignore_list = ignore_list.to_vec();
|
||||
let target_change = target_change
|
||||
|
@ -131,7 +131,7 @@ impl ConnectionManager {
|
||||
self.arc.connection_inactivity_timeout_ms
|
||||
}
|
||||
|
||||
pub async fn startup(&self) -> EyreResult<()> {
|
||||
pub fn startup(&self) -> EyreResult<()> {
|
||||
let guard = self.arc.startup_lock.startup()?;
|
||||
|
||||
veilid_log!(self debug "startup connection manager");
|
||||
@ -158,7 +158,7 @@ impl ConnectionManager {
|
||||
}
|
||||
|
||||
// Spawn the reconnection processor
|
||||
self.arc.reconnection_processor.init().await;
|
||||
self.arc.reconnection_processor.init();
|
||||
|
||||
guard.success();
|
||||
|
||||
@ -696,7 +696,7 @@ impl ConnectionManager {
|
||||
);
|
||||
}
|
||||
|
||||
pub async fn debug_print(&self) -> String {
|
||||
pub fn debug_print(&self) -> String {
|
||||
//let inner = self.arc.inner.lock();
|
||||
format!(
|
||||
"Connection Table:\n\n{}",
|
||||
|
@ -455,13 +455,16 @@ impl ConnectionTable {
|
||||
let conn = inner.conn_by_id[protocol_index].remove(&id).unwrap();
|
||||
// id_by_flow
|
||||
let flow = conn.flow();
|
||||
inner.id_by_flow.remove(&flow).unwrap();
|
||||
let _ = inner
|
||||
.id_by_flow
|
||||
.remove(&flow)
|
||||
.expect("must have removed something here");
|
||||
// ids_by_remote
|
||||
let remote = flow.remote();
|
||||
let ids = inner.ids_by_remote.get_mut(&remote).unwrap();
|
||||
for (n, elem) in ids.iter().enumerate() {
|
||||
if *elem == id {
|
||||
ids.remove(n);
|
||||
let _ = ids.remove(n);
|
||||
if ids.is_empty() {
|
||||
inner.ids_by_remote.remove(&remote).unwrap();
|
||||
}
|
||||
|
@ -17,11 +17,8 @@ impl NetworkManager {
|
||||
let json_bytes = serialize_json(bootstrap_peerinfo).as_bytes().to_vec();
|
||||
|
||||
// Reply with a chunk of signed routing table
|
||||
match self
|
||||
.net()
|
||||
.send_data_to_existing_flow(flow, json_bytes)
|
||||
.await?
|
||||
{
|
||||
let net = self.net();
|
||||
match pin_future_closure!(net.send_data_to_existing_flow(flow, json_bytes)).await? {
|
||||
SendDataToExistingFlowResult::Sent(_) => {
|
||||
// Bootstrap reply was sent
|
||||
Ok(NetworkResult::value(()))
|
||||
|
@ -12,6 +12,7 @@ mod connection_manager;
|
||||
mod connection_table;
|
||||
mod direct_boot;
|
||||
mod network_connection;
|
||||
mod node_contact_method_cache;
|
||||
mod receipt_manager;
|
||||
mod send_data;
|
||||
mod stats;
|
||||
@ -25,6 +26,7 @@ pub mod tests;
|
||||
|
||||
pub use connection_manager::*;
|
||||
pub use network_connection::*;
|
||||
pub(crate) use node_contact_method_cache::*;
|
||||
pub use receipt_manager::*;
|
||||
pub use stats::*;
|
||||
pub(crate) use types::*;
|
||||
@ -55,7 +57,6 @@ pub const MAX_MESSAGE_SIZE: usize = MAX_ENVELOPE_SIZE;
|
||||
pub const IPADDR_TABLE_SIZE: usize = 1024;
|
||||
pub const IPADDR_MAX_INACTIVE_DURATION_US: TimestampDuration =
|
||||
TimestampDuration::new(300_000_000u64); // 5 minutes
|
||||
pub const NODE_CONTACT_METHOD_CACHE_SIZE: usize = 1024;
|
||||
pub const ADDRESS_FILTER_TASK_INTERVAL_SECS: u32 = 60;
|
||||
pub const BOOT_MAGIC: &[u8; 4] = b"BOOT";
|
||||
pub const HOLE_PUNCH_DELAY_MS: u32 = 100;
|
||||
@ -75,20 +76,32 @@ struct ClientAllowlistEntry {
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SendDataMethod {
|
||||
pub struct SendDataResult {
|
||||
/// How the data was sent, possibly to a relay
|
||||
pub contact_method: NodeContactMethod,
|
||||
/// Pre-relayed contact method
|
||||
pub opt_relayed_contact_method: Option<NodeContactMethod>,
|
||||
opt_contact_method: Option<NodeContactMethod>,
|
||||
/// Original contact method for the destination if it was relayed
|
||||
opt_relayed_contact_method: Option<NodeContactMethod>,
|
||||
/// The specific flow used to send the data
|
||||
pub unique_flow: UniqueFlow,
|
||||
unique_flow: UniqueFlow,
|
||||
}
|
||||
|
||||
impl SendDataResult {
|
||||
pub fn is_direct(&self) -> bool {
|
||||
self.opt_relayed_contact_method.is_none()
|
||||
&& matches!(
|
||||
&self.opt_contact_method,
|
||||
Some(ncm) if ncm.is_direct()
|
||||
)
|
||||
}
|
||||
|
||||
pub fn unique_flow(&self) -> UniqueFlow {
|
||||
self.unique_flow
|
||||
}
|
||||
}
|
||||
|
||||
/// Mechanism required to contact another node
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum NodeContactMethod {
|
||||
/// Node is not reachable by any means
|
||||
Unreachable,
|
||||
pub enum NodeContactMethodKind {
|
||||
/// Connection should have already existed
|
||||
Existing,
|
||||
/// Contact the node directly
|
||||
@ -102,14 +115,29 @@ pub enum NodeContactMethod {
|
||||
/// Must use outbound relay to reach the node
|
||||
OutboundRelay(FilteredNodeRef),
|
||||
}
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Ord, PartialOrd, Hash)]
|
||||
struct NodeContactMethodCacheKey {
|
||||
node_ids: TypedKeyGroup,
|
||||
own_node_info_ts: Timestamp,
|
||||
target_node_info_ts: Timestamp,
|
||||
target_node_ref_filter: NodeRefFilter,
|
||||
target_node_ref_sequencing: Sequencing,
|
||||
dial_info_failures_map: BTreeMap<DialInfo, Timestamp>,
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct NodeContactMethod {
|
||||
ncm_key: NodeContactMethodCacheKey,
|
||||
ncm_kind: NodeContactMethodKind,
|
||||
}
|
||||
|
||||
impl NodeContactMethod {
|
||||
pub fn is_direct(&self) -> bool {
|
||||
matches!(self.ncm_kind, NodeContactMethodKind::Direct(_))
|
||||
}
|
||||
pub fn direct_dial_info(&self) -> Option<DialInfo> {
|
||||
match &self.ncm_kind {
|
||||
NodeContactMethodKind::Direct(v) => Some(v.clone()),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
// pub fn kind(&self) -> &NodeContactMethodKind {
|
||||
// &self.ncm_kind
|
||||
// }
|
||||
// pub fn into_kind(self) -> NodeContactMethodKind {
|
||||
// self.ncm_kind
|
||||
// }
|
||||
}
|
||||
|
||||
enum SendDataToExistingFlowResult {
|
||||
@ -146,7 +174,7 @@ impl Default for NetworkManagerStartupContext {
|
||||
struct NetworkManagerInner {
|
||||
stats: NetworkManagerStats,
|
||||
client_allowlist: LruCache<TypedKey, ClientAllowlistEntry>,
|
||||
node_contact_method_cache: LruCache<NodeContactMethodCacheKey, NodeContactMethod>,
|
||||
node_contact_method_cache: NodeContactMethodCache,
|
||||
address_check: Option<AddressCheck>,
|
||||
peer_info_change_subscription: Option<EventBusSubscription>,
|
||||
socket_address_change_subscription: Option<EventBusSubscription>,
|
||||
@ -181,9 +209,6 @@ impl fmt::Debug for NetworkManager {
|
||||
//.field("registry", &self.registry)
|
||||
.field("inner", &self.inner)
|
||||
.field("address_filter", &self.address_filter)
|
||||
// .field("components", &self.components)
|
||||
// .field("rolling_transfers_task", &self.rolling_transfers_task)
|
||||
// .field("address_filter_task", &self.address_filter_task)
|
||||
.field("network_key", &self.network_key)
|
||||
.field("startup_context", &self.startup_context)
|
||||
.finish()
|
||||
@ -195,7 +220,7 @@ impl NetworkManager {
|
||||
NetworkManagerInner {
|
||||
stats: NetworkManagerStats::default(),
|
||||
client_allowlist: LruCache::new_unbounded(),
|
||||
node_contact_method_cache: LruCache::new(NODE_CONTACT_METHOD_CACHE_SIZE),
|
||||
node_contact_method_cache: NodeContactMethodCache::new(),
|
||||
address_check: None,
|
||||
peer_info_change_subscription: None,
|
||||
socket_address_change_subscription: None,
|
||||
@ -300,10 +325,12 @@ impl NetworkManager {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[expect(clippy::unused_async)]
|
||||
async fn post_init_async(&self) -> EyreResult<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[expect(clippy::unused_async)]
|
||||
async fn pre_terminate_async(&self) {}
|
||||
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
@ -347,7 +374,7 @@ impl NetworkManager {
|
||||
}
|
||||
|
||||
// Start network components
|
||||
connection_manager.startup().await?;
|
||||
connection_manager.startup()?;
|
||||
match net.startup().await? {
|
||||
StartupDisposition::Success => {}
|
||||
StartupDisposition::BindRetry => {
|
||||
@ -355,7 +382,7 @@ impl NetworkManager {
|
||||
}
|
||||
}
|
||||
|
||||
receipt_manager.startup().await?;
|
||||
receipt_manager.startup()?;
|
||||
|
||||
veilid_log!(self trace "NetworkManager::internal_startup end");
|
||||
|
||||
@ -823,7 +850,7 @@ impl NetworkManager {
|
||||
node_ref: FilteredNodeRef,
|
||||
destination_node_ref: Option<NodeRef>,
|
||||
body: B,
|
||||
) -> EyreResult<NetworkResult<SendDataMethod>> {
|
||||
) -> EyreResult<NetworkResult<SendDataResult>> {
|
||||
let Ok(_guard) = self.startup_context.startup_lock.enter() else {
|
||||
return Ok(NetworkResult::no_connection_other("network is not started"));
|
||||
};
|
||||
@ -890,7 +917,7 @@ impl NetworkManager {
|
||||
// Called when a packet potentially containing an RPC envelope is received by a low-level
|
||||
// network protocol handler. Processes the envelope, authenticates and decrypts the RPC message
|
||||
// and passes it to the RPC handler
|
||||
#[instrument(level = "trace", target = "net", skip_all)]
|
||||
//#[instrument(level = "trace", target = "net", skip_all)]
|
||||
async fn on_recv_envelope(&self, data: &mut [u8], flow: Flow) -> EyreResult<bool> {
|
||||
let Ok(_guard) = self.startup_context.startup_lock.enter() else {
|
||||
return Ok(false);
|
||||
@ -931,13 +958,13 @@ impl NetworkManager {
|
||||
|
||||
// Is this a direct bootstrap request instead of an envelope?
|
||||
if data[0..4] == *BOOT_MAGIC {
|
||||
network_result_value_or_log!(self self.handle_boot_request(flow).await? => [ format!(": flow={:?}", flow) ] {});
|
||||
network_result_value_or_log!(self pin_future!(self.handle_boot_request(flow)).await? => [ format!(": flow={:?}", flow) ] {});
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
// Is this an out-of-band receipt instead of an envelope?
|
||||
if data[0..3] == *RECEIPT_MAGIC {
|
||||
network_result_value_or_log!(self self.handle_out_of_band_receipt(data).await => [ format!(": data.len={}", data.len()) ] {});
|
||||
network_result_value_or_log!(self pin_future!(self.handle_out_of_band_receipt(data)).await => [ format!(": data.len={}", data.len()) ] {});
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
@ -1071,18 +1098,9 @@ impl NetworkManager {
|
||||
|
||||
// Relay the packet to the desired destination
|
||||
veilid_log!(self trace "relaying {} bytes to {}", data.len(), relay_nr);
|
||||
|
||||
network_result_value_or_log!(self match self.send_data(relay_nr, data.to_vec())
|
||||
.await {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
veilid_log!(self debug "failed to forward envelope: {}" ,e);
|
||||
return Ok(false);
|
||||
}
|
||||
} => [ format!(": relay_nr={}, data.len={}", relay_nr, data.len()) ] {
|
||||
return Ok(false);
|
||||
}
|
||||
);
|
||||
if let Err(e) = pin_future!(self.send_data(relay_nr, data.to_vec())).await {
|
||||
veilid_log!(self debug "failed to relay envelope: {}" ,e);
|
||||
}
|
||||
}
|
||||
// Inform caller that we dealt with the envelope, but did not process it locally
|
||||
return Ok(false);
|
||||
|
@ -3,12 +3,15 @@
|
||||
use super::*;
|
||||
use futures_util::stream::FuturesUnordered;
|
||||
use igd_manager::{IGDAddressType, IGDProtocolType};
|
||||
use stop_token::future::FutureExt as _;
|
||||
|
||||
impl_veilid_log_facility!("net");
|
||||
|
||||
const PORT_MAP_VALIDATE_TRY_COUNT: usize = 3;
|
||||
const PORT_MAP_VALIDATE_DELAY_MS: u32 = 500;
|
||||
const PORT_MAP_TRY_COUNT: usize = 3;
|
||||
const EXTERNAL_INFO_NODE_COUNT: usize = 20;
|
||||
const EXTERNAL_INFO_CONCURRENCY: usize = 20;
|
||||
const EXTERNAL_INFO_VALIDATIONS: usize = 5;
|
||||
|
||||
// Detection result of dial info detection futures
|
||||
@ -26,6 +29,82 @@ pub struct DetectionResult {
|
||||
pub external_address_types: AddressTypeSet,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
enum DetectionResultKind {
|
||||
Result {
|
||||
result: DetectionResult,
|
||||
possibilities: Vec<DialInfoClassPossibility>,
|
||||
},
|
||||
Failure {
|
||||
possibilities: Vec<DialInfoClassPossibility>,
|
||||
},
|
||||
}
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
type DialInfoClassPossibility = (DialInfoClass, usize);
|
||||
|
||||
#[derive(Debug)]
|
||||
struct DialInfoClassAllPossibilities {
|
||||
remaining: BTreeMap<DialInfoClass, usize>,
|
||||
}
|
||||
|
||||
impl DialInfoClassAllPossibilities {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
remaining: BTreeMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add(&mut self, possibilities: &[DialInfoClassPossibility]) {
|
||||
for (k, v) in possibilities {
|
||||
*self.remaining.entry(*k).or_default() += v;
|
||||
}
|
||||
}
|
||||
pub fn remove(&mut self, possibilities: &[DialInfoClassPossibility]) {
|
||||
for (k, v) in possibilities {
|
||||
*self.remaining.entry(*k).or_default() -= v;
|
||||
}
|
||||
}
|
||||
pub fn any_better(&mut self, dial_info_class: DialInfoClass) -> bool {
|
||||
let best_available_order: [DialInfoClassSet; 4] = [
|
||||
DialInfoClass::Mapped.into(),
|
||||
DialInfoClass::Direct | DialInfoClass::Blocked,
|
||||
DialInfoClass::FullConeNAT.into(),
|
||||
DialInfoClass::AddressRestrictedNAT | DialInfoClass::PortRestrictedNAT,
|
||||
];
|
||||
|
||||
for bestdicset in best_available_order {
|
||||
// Already got the best we've checked so far?
|
||||
if bestdicset.contains(dial_info_class) {
|
||||
// We can just stop here since nothing else is going to be better
|
||||
return false;
|
||||
}
|
||||
|
||||
// Get the total remaining possibilities left at this level
|
||||
let mut remaining = 0usize;
|
||||
for bestdic in bestdicset {
|
||||
remaining += self.remaining.get(&bestdic).copied().unwrap_or_default()
|
||||
}
|
||||
|
||||
if remaining > 0 {
|
||||
// There's some things worth waiting for that could be better than dial_info_class
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// Nothing left to wait for
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for DialInfoClassAllPossibilities {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub struct DiscoveryContextConfig {
|
||||
pub protocol_type: ProtocolType,
|
||||
@ -57,6 +136,7 @@ pub(super) struct DiscoveryContext {
|
||||
registry: VeilidComponentRegistry,
|
||||
unlocked_inner: Arc<DiscoveryContextUnlockedInner>,
|
||||
inner: Arc<Mutex<DiscoveryContextInner>>,
|
||||
stop_token: StopToken,
|
||||
}
|
||||
|
||||
impl_veilid_component_registry_accessor!(DiscoveryContext);
|
||||
@ -70,7 +150,11 @@ impl core::ops::Deref for DiscoveryContext {
|
||||
}
|
||||
|
||||
impl DiscoveryContext {
|
||||
pub fn new(registry: VeilidComponentRegistry, config: DiscoveryContextConfig) -> Self {
|
||||
pub fn new(
|
||||
registry: VeilidComponentRegistry,
|
||||
config: DiscoveryContextConfig,
|
||||
stop_token: StopToken,
|
||||
) -> Self {
|
||||
let routing_table = registry.routing_table();
|
||||
let intf_addrs =
|
||||
Self::get_local_addresses(&routing_table, config.protocol_type, config.address_type);
|
||||
@ -81,6 +165,7 @@ impl DiscoveryContext {
|
||||
inner: Arc::new(Mutex::new(DiscoveryContextInner {
|
||||
external_info: Vec::new(),
|
||||
})),
|
||||
stop_token,
|
||||
}
|
||||
}
|
||||
|
||||
@ -147,9 +232,7 @@ impl DiscoveryContext {
|
||||
// This is done over the normal port using RPC
|
||||
#[instrument(level = "trace", skip(self), ret)]
|
||||
async fn discover_external_addresses(&self) -> bool {
|
||||
let node_count = self
|
||||
.config()
|
||||
.with(|c| c.network.dht.max_find_node_count as usize);
|
||||
let node_count = EXTERNAL_INFO_NODE_COUNT;
|
||||
let routing_domain = RoutingDomain::PublicInternet;
|
||||
|
||||
let protocol_type = self.config.protocol_type;
|
||||
@ -211,7 +294,6 @@ impl DiscoveryContext {
|
||||
}
|
||||
|
||||
// For each peer, ask them for our public address, filtering on desired dial info
|
||||
|
||||
let get_public_address_func = |node: NodeRef| {
|
||||
let this = self.clone();
|
||||
let node = node.custom_filtered(
|
||||
@ -242,24 +324,55 @@ impl DiscoveryContext {
|
||||
unord.push(gpa_future);
|
||||
|
||||
// Always process N at a time so we get all addresses in parallel if possible
|
||||
if unord.len() == EXTERNAL_INFO_VALIDATIONS {
|
||||
if unord.len() == EXTERNAL_INFO_CONCURRENCY {
|
||||
// Process one
|
||||
if let Some(Some(ei)) = unord.next().in_current_span().await {
|
||||
external_address_infos.push(ei);
|
||||
if external_address_infos.len() == EXTERNAL_INFO_VALIDATIONS {
|
||||
break;
|
||||
match unord
|
||||
.next()
|
||||
.timeout_at(self.stop_token.clone())
|
||||
.in_current_span()
|
||||
.await
|
||||
{
|
||||
Ok(Some(Some(ei))) => {
|
||||
external_address_infos.push(ei);
|
||||
if external_address_infos.len() == EXTERNAL_INFO_VALIDATIONS {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(Some(None)) => {
|
||||
// Found no public address from this node
|
||||
}
|
||||
Ok(None) => {
|
||||
// Should never happen in this loop
|
||||
unreachable!();
|
||||
}
|
||||
Err(_) => {
|
||||
// stop requested
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Finish whatever is left if we need to
|
||||
if external_address_infos.len() < EXTERNAL_INFO_VALIDATIONS {
|
||||
while let Some(res) = unord.next().in_current_span().await {
|
||||
if let Some(ei) = res {
|
||||
while external_address_infos.len() < EXTERNAL_INFO_VALIDATIONS {
|
||||
match unord
|
||||
.next()
|
||||
.timeout_at(self.stop_token.clone())
|
||||
.in_current_span()
|
||||
.await
|
||||
{
|
||||
Ok(Some(Some(ei))) => {
|
||||
external_address_infos.push(ei);
|
||||
if external_address_infos.len() == EXTERNAL_INFO_VALIDATIONS {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(Some(None)) => {
|
||||
// Found no public address from this node
|
||||
}
|
||||
Ok(None) => {
|
||||
// No nodes left to wait for
|
||||
break;
|
||||
}
|
||||
Err(_) => {
|
||||
// stop requested
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -414,16 +527,21 @@ impl DiscoveryContext {
|
||||
|
||||
// If we know we are not behind NAT, check our firewall status
|
||||
#[instrument(level = "trace", skip(self), ret)]
|
||||
async fn protocol_process_no_nat(
|
||||
fn protocol_process_no_nat(
|
||||
&self,
|
||||
unord: &mut FuturesUnordered<SendPinBoxFuture<Option<DetectionResult>>>,
|
||||
all_possibilities: &mut DialInfoClassAllPossibilities,
|
||||
unord: &mut FuturesUnordered<PinBoxFutureStatic<DetectionResultKind>>,
|
||||
) {
|
||||
let external_infos = self.inner.lock().external_info.clone();
|
||||
|
||||
// Have all the external validator nodes check us
|
||||
for external_info in external_infos {
|
||||
let this = self.clone();
|
||||
let do_no_nat_fut: SendPinBoxFuture<Option<DetectionResult>> = Box::pin(async move {
|
||||
|
||||
let possibilities = vec![(DialInfoClass::Direct, 1), (DialInfoClass::Blocked, 1)];
|
||||
all_possibilities.add(&possibilities);
|
||||
|
||||
let do_no_nat_fut: PinBoxFutureStatic<DetectionResultKind> = Box::pin(async move {
|
||||
// Do a validate_dial_info on the external address from a redirected node
|
||||
if this
|
||||
.validate_dial_info(
|
||||
@ -434,47 +552,55 @@ impl DiscoveryContext {
|
||||
.await
|
||||
{
|
||||
// Add public dial info with Direct dialinfo class
|
||||
Some(DetectionResult {
|
||||
config: this.config,
|
||||
ddi: DetectedDialInfo::Detected(DialInfoDetail {
|
||||
dial_info: external_info.dial_info.clone(),
|
||||
class: DialInfoClass::Direct,
|
||||
}),
|
||||
external_address_types: AddressTypeSet::only(
|
||||
external_info.address.address_type(),
|
||||
),
|
||||
})
|
||||
DetectionResultKind::Result {
|
||||
possibilities,
|
||||
result: DetectionResult {
|
||||
config: this.config,
|
||||
ddi: DetectedDialInfo::Detected(DialInfoDetail {
|
||||
dial_info: external_info.dial_info.clone(),
|
||||
class: DialInfoClass::Direct,
|
||||
}),
|
||||
external_address_types: AddressTypeSet::only(
|
||||
external_info.address.address_type(),
|
||||
),
|
||||
},
|
||||
}
|
||||
} else {
|
||||
// Add public dial info with Blocked dialinfo class
|
||||
Some(DetectionResult {
|
||||
config: this.config,
|
||||
ddi: DetectedDialInfo::Detected(DialInfoDetail {
|
||||
dial_info: external_info.dial_info.clone(),
|
||||
class: DialInfoClass::Blocked,
|
||||
}),
|
||||
external_address_types: AddressTypeSet::only(
|
||||
external_info.address.address_type(),
|
||||
),
|
||||
})
|
||||
DetectionResultKind::Result {
|
||||
possibilities,
|
||||
result: DetectionResult {
|
||||
config: this.config,
|
||||
ddi: DetectedDialInfo::Detected(DialInfoDetail {
|
||||
dial_info: external_info.dial_info.clone(),
|
||||
class: DialInfoClass::Blocked,
|
||||
}),
|
||||
external_address_types: AddressTypeSet::only(
|
||||
external_info.address.address_type(),
|
||||
),
|
||||
},
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
unord.push(do_no_nat_fut);
|
||||
}
|
||||
}
|
||||
|
||||
// If we know we are behind NAT check what kind
|
||||
#[instrument(level = "trace", skip(self), ret)]
|
||||
async fn protocol_process_nat(
|
||||
fn protocol_process_nat(
|
||||
&self,
|
||||
unord: &mut FuturesUnordered<SendPinBoxFuture<Option<DetectionResult>>>,
|
||||
all_possibilities: &mut DialInfoClassAllPossibilities,
|
||||
unord: &mut FuturesUnordered<PinBoxFutureStatic<DetectionResultKind>>,
|
||||
) {
|
||||
// Get the external dial info histogram for our use here
|
||||
let external_info = {
|
||||
let inner = self.inner.lock();
|
||||
inner.external_info.clone()
|
||||
};
|
||||
let local_port = self.config.port;
|
||||
|
||||
// Get the external dial info histogram for our use here
|
||||
let mut external_info_addr_port_hist = HashMap::<SocketAddress, usize>::new();
|
||||
let mut external_info_addr_hist = HashMap::<Address, usize>::new();
|
||||
for ei in &external_info {
|
||||
@ -525,15 +651,21 @@ impl DiscoveryContext {
|
||||
// then we consider this a symmetric NAT
|
||||
if different_addresses || !same_address_has_popular_port {
|
||||
let this = self.clone();
|
||||
let do_symmetric_nat_fut: SendPinBoxFuture<Option<DetectionResult>> =
|
||||
let do_symmetric_nat_fut: PinBoxFutureStatic<DetectionResultKind> =
|
||||
Box::pin(async move {
|
||||
Some(DetectionResult {
|
||||
config: this.config,
|
||||
ddi: DetectedDialInfo::SymmetricNAT,
|
||||
external_address_types,
|
||||
})
|
||||
DetectionResultKind::Result {
|
||||
// Don't bother tracking possibilities for SymmetricNAT
|
||||
// it's never going to be 'better than' anything else
|
||||
possibilities: vec![],
|
||||
result: DetectionResult {
|
||||
config: this.config,
|
||||
ddi: DetectedDialInfo::SymmetricNAT,
|
||||
external_address_types,
|
||||
},
|
||||
}
|
||||
});
|
||||
unord.push(do_symmetric_nat_fut);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
@ -545,23 +677,28 @@ impl DiscoveryContext {
|
||||
if local_port_matching_external_info.is_none() && best_external_info.is_some() {
|
||||
let c_external_1 = best_external_info.as_ref().unwrap().clone();
|
||||
let c_this = this.clone();
|
||||
let do_manual_map_fut: SendPinBoxFuture<Option<DetectionResult>> =
|
||||
Box::pin(async move {
|
||||
// Do a validate_dial_info on the external address, but with the same port as the local port of local interface, from a redirected node
|
||||
// This test is to see if a node had manual port forwarding done with the same port number as the local listener
|
||||
let mut external_1_dial_info_with_local_port = c_external_1.dial_info.clone();
|
||||
external_1_dial_info_with_local_port.set_port(local_port);
|
||||
|
||||
if this
|
||||
.validate_dial_info(
|
||||
c_external_1.node.clone(),
|
||||
external_1_dial_info_with_local_port.clone(),
|
||||
true,
|
||||
)
|
||||
.await
|
||||
{
|
||||
// Add public dial info with Direct dialinfo class
|
||||
return Some(DetectionResult {
|
||||
let possibilities = vec![(DialInfoClass::Direct, 1)];
|
||||
all_possibilities.add(&possibilities);
|
||||
|
||||
let do_manual_map_fut: PinBoxFutureStatic<DetectionResultKind> = Box::pin(async move {
|
||||
// Do a validate_dial_info on the external address, but with the same port as the local port of local interface, from a redirected node
|
||||
// This test is to see if a node had manual port forwarding done with the same port number as the local listener
|
||||
let mut external_1_dial_info_with_local_port = c_external_1.dial_info.clone();
|
||||
external_1_dial_info_with_local_port.set_port(local_port);
|
||||
|
||||
if this
|
||||
.validate_dial_info(
|
||||
c_external_1.node.clone(),
|
||||
external_1_dial_info_with_local_port.clone(),
|
||||
true,
|
||||
)
|
||||
.await
|
||||
{
|
||||
// Add public dial info with Direct dialinfo class
|
||||
return DetectionResultKind::Result {
|
||||
possibilities,
|
||||
result: DetectionResult {
|
||||
config: c_this.config,
|
||||
ddi: DetectedDialInfo::Detected(DialInfoDetail {
|
||||
dial_info: external_1_dial_info_with_local_port,
|
||||
@ -570,80 +707,99 @@ impl DiscoveryContext {
|
||||
external_address_types: AddressTypeSet::only(
|
||||
c_external_1.address.address_type(),
|
||||
),
|
||||
});
|
||||
}
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
None
|
||||
});
|
||||
DetectionResultKind::Failure { possibilities }
|
||||
});
|
||||
unord.push(do_manual_map_fut);
|
||||
}
|
||||
|
||||
// NAT Detection
|
||||
///////////
|
||||
|
||||
let retry_count = self.config().with(|c| c.network.restricted_nat_retries);
|
||||
|
||||
// Full Cone NAT Detection
|
||||
///////////
|
||||
let this = self.clone();
|
||||
let do_nat_detect_fut: SendPinBoxFuture<Option<DetectionResult>> = Box::pin(async move {
|
||||
let mut retry_count = this.config().with(|c| c.network.restricted_nat_retries);
|
||||
|
||||
// Loop for restricted NAT retries
|
||||
let c_this = self.clone();
|
||||
let c_external_1 = external_info.first().cloned().unwrap();
|
||||
let possibilities = vec![(DialInfoClass::FullConeNAT, 1)];
|
||||
all_possibilities.add(&possibilities);
|
||||
let do_full_cone_fut: PinBoxFutureStatic<DetectionResultKind> = Box::pin(async move {
|
||||
let mut retry_count = retry_count;
|
||||
|
||||
// Let's see what kind of NAT we have
|
||||
// Does a redirected dial info validation from a different address and a random port find us?
|
||||
loop {
|
||||
let mut ord = FuturesOrdered::new();
|
||||
if c_this
|
||||
.validate_dial_info(
|
||||
c_external_1.node.clone(),
|
||||
c_external_1.dial_info.clone(),
|
||||
true,
|
||||
)
|
||||
.await
|
||||
{
|
||||
// Yes, another machine can use the dial info directly, so Full Cone
|
||||
// Add public dial info with full cone NAT network class
|
||||
|
||||
let c_this = this.clone();
|
||||
let c_external_1 = external_info.first().cloned().unwrap();
|
||||
let do_full_cone_fut: SendPinBoxFuture<Option<DetectionResult>> =
|
||||
Box::pin(async move {
|
||||
// Let's see what kind of NAT we have
|
||||
// Does a redirected dial info validation from a different address and a random port find us?
|
||||
if c_this
|
||||
.validate_dial_info(
|
||||
c_external_1.node.clone(),
|
||||
c_external_1.dial_info.clone(),
|
||||
true,
|
||||
)
|
||||
.await
|
||||
{
|
||||
// Yes, another machine can use the dial info directly, so Full Cone
|
||||
// Add public dial info with full cone NAT network class
|
||||
return DetectionResultKind::Result {
|
||||
possibilities,
|
||||
result: DetectionResult {
|
||||
config: c_this.config,
|
||||
ddi: DetectedDialInfo::Detected(DialInfoDetail {
|
||||
dial_info: c_external_1.dial_info,
|
||||
class: DialInfoClass::FullConeNAT,
|
||||
}),
|
||||
external_address_types: AddressTypeSet::only(
|
||||
c_external_1.address.address_type(),
|
||||
),
|
||||
},
|
||||
};
|
||||
}
|
||||
if retry_count == 0 {
|
||||
break;
|
||||
}
|
||||
retry_count -= 1;
|
||||
}
|
||||
|
||||
return Some(DetectionResult {
|
||||
config: c_this.config,
|
||||
ddi: DetectedDialInfo::Detected(DialInfoDetail {
|
||||
dial_info: c_external_1.dial_info,
|
||||
class: DialInfoClass::FullConeNAT,
|
||||
}),
|
||||
external_address_types: AddressTypeSet::only(
|
||||
c_external_1.address.address_type(),
|
||||
),
|
||||
});
|
||||
}
|
||||
None
|
||||
});
|
||||
ord.push_back(do_full_cone_fut);
|
||||
DetectionResultKind::Failure { possibilities }
|
||||
});
|
||||
unord.push(do_full_cone_fut);
|
||||
|
||||
let c_this = this.clone();
|
||||
let c_external_1 = external_info.first().cloned().unwrap();
|
||||
let c_external_2 = external_info.get(1).cloned().unwrap();
|
||||
let do_restricted_cone_fut: SendPinBoxFuture<Option<DetectionResult>> =
|
||||
Box::pin(async move {
|
||||
// We are restricted, determine what kind of restriction
|
||||
let c_this = self.clone();
|
||||
let c_external_1 = external_info.first().cloned().unwrap();
|
||||
let c_external_2 = external_info.get(1).cloned().unwrap();
|
||||
let possibilities = vec![
|
||||
(DialInfoClass::AddressRestrictedNAT, 1),
|
||||
(DialInfoClass::PortRestrictedNAT, 1),
|
||||
];
|
||||
all_possibilities.add(&possibilities);
|
||||
let do_restricted_cone_fut: PinBoxFutureStatic<DetectionResultKind> =
|
||||
Box::pin(async move {
|
||||
let mut retry_count = retry_count;
|
||||
|
||||
// If we're going to end up as a restricted NAT of some sort
|
||||
// Address is the same, so it's address or port restricted
|
||||
// We are restricted, determine what kind of restriction
|
||||
|
||||
// Do a validate_dial_info on the external address from a random port
|
||||
if c_this
|
||||
.validate_dial_info(
|
||||
c_external_2.node.clone(),
|
||||
c_external_1.dial_info.clone(),
|
||||
false,
|
||||
)
|
||||
.await
|
||||
{
|
||||
// Got a reply from a non-default port, which means we're only address restricted
|
||||
return Some(DetectionResult {
|
||||
// If we're going to end up as a restricted NAT of some sort
|
||||
// Address is the same, so it's address or port restricted
|
||||
|
||||
loop {
|
||||
// Do a validate_dial_info on the external address from a random port
|
||||
if c_this
|
||||
.validate_dial_info(
|
||||
c_external_2.node.clone(),
|
||||
c_external_1.dial_info.clone(),
|
||||
false,
|
||||
)
|
||||
.await
|
||||
{
|
||||
// Got a reply from a non-default port, which means we're only address restricted
|
||||
return DetectionResultKind::Result {
|
||||
possibilities,
|
||||
result: DetectionResult {
|
||||
config: c_this.config,
|
||||
ddi: DetectedDialInfo::Detected(DialInfoDetail {
|
||||
dial_info: c_external_1.dial_info.clone(),
|
||||
@ -652,87 +808,85 @@ impl DiscoveryContext {
|
||||
external_address_types: AddressTypeSet::only(
|
||||
c_external_1.address.address_type(),
|
||||
),
|
||||
});
|
||||
}
|
||||
// Didn't get a reply from a non-default port, which means we are also port restricted
|
||||
Some(DetectionResult {
|
||||
config: c_this.config,
|
||||
ddi: DetectedDialInfo::Detected(DialInfoDetail {
|
||||
dial_info: c_external_1.dial_info.clone(),
|
||||
class: DialInfoClass::PortRestrictedNAT,
|
||||
}),
|
||||
external_address_types: AddressTypeSet::only(
|
||||
c_external_1.address.address_type(),
|
||||
),
|
||||
})
|
||||
});
|
||||
ord.push_back(do_restricted_cone_fut);
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
// Return the first result we get
|
||||
let mut some_dr = None;
|
||||
while let Some(res) = ord.next().await {
|
||||
if let Some(dr) = res {
|
||||
some_dr = Some(dr);
|
||||
if retry_count == 0 {
|
||||
break;
|
||||
}
|
||||
retry_count -= 1;
|
||||
}
|
||||
|
||||
if let Some(dr) = some_dr {
|
||||
if let DetectedDialInfo::Detected(did) = &dr.ddi {
|
||||
// If we got something better than restricted NAT or we're done retrying
|
||||
if did.class < DialInfoClass::AddressRestrictedNAT || retry_count == 0 {
|
||||
return Some(dr);
|
||||
}
|
||||
}
|
||||
// Didn't get a reply from a non-default port, which means we are also port restricted
|
||||
DetectionResultKind::Result {
|
||||
possibilities,
|
||||
result: DetectionResult {
|
||||
config: c_this.config,
|
||||
ddi: DetectedDialInfo::Detected(DialInfoDetail {
|
||||
dial_info: c_external_1.dial_info.clone(),
|
||||
class: DialInfoClass::PortRestrictedNAT,
|
||||
}),
|
||||
external_address_types: AddressTypeSet::only(
|
||||
c_external_1.address.address_type(),
|
||||
),
|
||||
},
|
||||
}
|
||||
if retry_count == 0 {
|
||||
break;
|
||||
}
|
||||
retry_count -= 1;
|
||||
}
|
||||
|
||||
None
|
||||
});
|
||||
unord.push(do_nat_detect_fut);
|
||||
});
|
||||
unord.push(do_restricted_cone_fut);
|
||||
}
|
||||
|
||||
/// Add discovery futures to an unordered set that may detect dialinfo when they complete
|
||||
/// Run a discovery for a particular context
|
||||
/// Returns None if no detection was possible
|
||||
/// Returns Some(DetectionResult) with the best detection result for this context
|
||||
#[instrument(level = "trace", skip(self))]
|
||||
pub async fn discover(
|
||||
&self,
|
||||
unord: &mut FuturesUnordered<SendPinBoxFuture<Option<DetectionResult>>>,
|
||||
) {
|
||||
let enable_upnp = self.config().with(|c| c.network.upnp);
|
||||
|
||||
pub async fn discover(self) -> Option<DetectionResult> {
|
||||
// Do this right away because it's fast and every detection is going to need it
|
||||
// Get our external addresses from two fast nodes
|
||||
// Get our external addresses from a bunch of fast nodes
|
||||
if !self.discover_external_addresses().await {
|
||||
// If we couldn't get an external address, then we should just try the whole network class detection again later
|
||||
return;
|
||||
return None;
|
||||
}
|
||||
|
||||
// The set of futures we're going to wait on to determine dial info class for this context
|
||||
let mut unord = FuturesUnordered::<PinBoxFutureStatic<DetectionResultKind>>::new();
|
||||
|
||||
// Used to determine what is still worth waiting for since we always want to return the
|
||||
// best available dial info class. Once there are no better options in our waiting set
|
||||
// we can just return what we've got.
|
||||
let mut all_possibilities = DialInfoClassAllPossibilities::new();
|
||||
|
||||
// UPNP Automatic Mapping
|
||||
///////////
|
||||
|
||||
let enable_upnp = self.config().with(|c| c.network.upnp);
|
||||
if enable_upnp {
|
||||
let this = self.clone();
|
||||
let do_mapped_fut: SendPinBoxFuture<Option<DetectionResult>> = Box::pin(async move {
|
||||
|
||||
let possibilities = vec![(DialInfoClass::Mapped, 1)];
|
||||
all_possibilities.add(&possibilities);
|
||||
|
||||
let do_mapped_fut: PinBoxFutureStatic<DetectionResultKind> = Box::pin(async move {
|
||||
// Attempt a port mapping via all available and enabled mechanisms
|
||||
// Try this before the direct mapping in the event that we are restarting
|
||||
// and may not have recorded a mapping created the last time
|
||||
if let Some(external_mapped_dial_info) = this.try_upnp_port_mapping().await {
|
||||
// Got a port mapping, let's use it
|
||||
return Some(DetectionResult {
|
||||
config: this.config,
|
||||
ddi: DetectedDialInfo::Detected(DialInfoDetail {
|
||||
dial_info: external_mapped_dial_info.clone(),
|
||||
class: DialInfoClass::Mapped,
|
||||
}),
|
||||
external_address_types: AddressTypeSet::only(
|
||||
external_mapped_dial_info.address_type(),
|
||||
),
|
||||
});
|
||||
return DetectionResultKind::Result {
|
||||
possibilities,
|
||||
result: DetectionResult {
|
||||
config: this.config,
|
||||
ddi: DetectedDialInfo::Detected(DialInfoDetail {
|
||||
dial_info: external_mapped_dial_info.clone(),
|
||||
class: DialInfoClass::Mapped,
|
||||
}),
|
||||
external_address_types: AddressTypeSet::only(
|
||||
external_mapped_dial_info.address_type(),
|
||||
),
|
||||
},
|
||||
};
|
||||
}
|
||||
None
|
||||
DetectionResultKind::Failure { possibilities }
|
||||
});
|
||||
unord.push(do_mapped_fut);
|
||||
}
|
||||
@ -750,9 +904,84 @@ impl DiscoveryContext {
|
||||
.unwrap_or_default();
|
||||
|
||||
if local_address_in_external_info {
|
||||
self.protocol_process_no_nat(unord).await;
|
||||
self.protocol_process_no_nat(&mut all_possibilities, &mut unord);
|
||||
} else {
|
||||
self.protocol_process_nat(unord).await;
|
||||
self.protocol_process_nat(&mut all_possibilities, &mut unord);
|
||||
}
|
||||
|
||||
// Wait for the best detection result to roll in
|
||||
let mut opt_best_detection_result: Option<DetectionResult> = None;
|
||||
loop {
|
||||
match unord
|
||||
.next()
|
||||
.timeout_at(self.stop_token.clone())
|
||||
.in_current_span()
|
||||
.await
|
||||
{
|
||||
Ok(Some(DetectionResultKind::Result {
|
||||
result,
|
||||
possibilities,
|
||||
})) => {
|
||||
// Remove possible dial info classes from our available set
|
||||
all_possibilities.remove(&possibilities);
|
||||
|
||||
// Get best detection result for each discovery context config
|
||||
if let Some(best_detection_result) = &mut opt_best_detection_result {
|
||||
let ddi = &mut best_detection_result.ddi;
|
||||
// Upgrade existing dialinfo
|
||||
match ddi {
|
||||
DetectedDialInfo::SymmetricNAT => {
|
||||
// Whatever we got is better than or equal to symmetric
|
||||
*ddi = result.ddi;
|
||||
}
|
||||
DetectedDialInfo::Detected(cur_did) => match result.ddi {
|
||||
DetectedDialInfo::SymmetricNAT => {
|
||||
// Nothing is worse than this
|
||||
}
|
||||
DetectedDialInfo::Detected(did) => {
|
||||
// Pick the best dial info class we detected
|
||||
// because some nodes could be degenerate and if any node can validate a
|
||||
// better dial info class we should go with it and leave the
|
||||
// degenerate nodes in the dust to fade into obscurity
|
||||
if did.class < cur_did.class {
|
||||
cur_did.class = did.class;
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
best_detection_result.external_address_types |=
|
||||
result.external_address_types;
|
||||
} else {
|
||||
opt_best_detection_result = Some(result);
|
||||
}
|
||||
}
|
||||
Ok(Some(DetectionResultKind::Failure { possibilities })) => {
|
||||
// Found no dial info for this protocol/address combination
|
||||
|
||||
// Remove possible dial info classes from our available set
|
||||
all_possibilities.remove(&possibilities);
|
||||
}
|
||||
Ok(None) => {
|
||||
// All done, normally
|
||||
break;
|
||||
}
|
||||
Err(_) => {
|
||||
// Stop token, exit early without error propagation
|
||||
return None;
|
||||
}
|
||||
}
|
||||
|
||||
// See if there's any better results worth waiting for
|
||||
if let Some(best_detection_result) = &opt_best_detection_result {
|
||||
if let DetectedDialInfo::Detected(did) = &best_detection_result.ddi {
|
||||
// If nothing else is going to be a better result, just stop here
|
||||
if !all_possibilities.any_better(did.class) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
opt_best_detection_result
|
||||
}
|
||||
}
|
||||
|
@ -318,7 +318,7 @@ impl Network {
|
||||
.routing_table()
|
||||
.routing_domain_for_address(dial_info.address());
|
||||
|
||||
let network_result = fut.await?;
|
||||
let network_result = pin_future_closure!(fut).await?;
|
||||
if matches!(network_result, NetworkResult::NoConnection(_)) {
|
||||
// Dial info failure
|
||||
self.network_manager()
|
||||
|
@ -211,7 +211,7 @@ impl ProtocolAcceptHandler for RawTcpProtocolHandler {
|
||||
stream: AsyncPeekStream,
|
||||
peer_addr: SocketAddr,
|
||||
local_addr: SocketAddr,
|
||||
) -> SendPinBoxFuture<io::Result<Option<ProtocolNetworkConnection>>> {
|
||||
) -> PinBoxFutureStatic<io::Result<Option<ProtocolNetworkConnection>>> {
|
||||
Box::pin(self.clone().on_accept_async(stream, peer_addr, local_addr))
|
||||
}
|
||||
}
|
||||
|
@ -410,7 +410,7 @@ impl ProtocolAcceptHandler for WebsocketProtocolHandler {
|
||||
stream: AsyncPeekStream,
|
||||
peer_addr: SocketAddr,
|
||||
local_addr: SocketAddr,
|
||||
) -> SendPinBoxFuture<io::Result<Option<ProtocolNetworkConnection>>> {
|
||||
) -> PinBoxFutureStatic<io::Result<Option<ProtocolNetworkConnection>>> {
|
||||
Box::pin(self.clone().on_accept_async(stream, peer_addr, local_addr))
|
||||
}
|
||||
}
|
||||
|
@ -11,9 +11,9 @@ impl Network {
|
||||
// Network lock ensures only one task operating on the low level network state
|
||||
// can happen at the same time. Try lock is here to give preference to other longer
|
||||
// running processes like update_network_class_task.
|
||||
let _guard = match self.network_task_lock.try_lock() {
|
||||
Ok(v) => v,
|
||||
Err(_) => {
|
||||
let _guard = match asyncmutex_try_lock!(self.network_task_lock) {
|
||||
Some(v) => v,
|
||||
None => {
|
||||
// If we can't get the lock right now, then
|
||||
return Ok(());
|
||||
}
|
||||
|
@ -88,7 +88,7 @@ impl Network {
|
||||
&self,
|
||||
stop_token: StopToken,
|
||||
_l: Timestamp,
|
||||
_t: Timestamp,
|
||||
cur_ts: Timestamp,
|
||||
) -> EyreResult<bool> {
|
||||
// Figure out if we can optimize TCP/WS checking since they are often on the same port
|
||||
let (protocol_config, inbound_protocol_map) = {
|
||||
@ -153,8 +153,8 @@ impl Network {
|
||||
port,
|
||||
};
|
||||
context_configs.insert(dcc);
|
||||
let discovery_context = DiscoveryContext::new(self.registry(), dcc);
|
||||
discovery_context.discover(&mut unord).await;
|
||||
let discovery_context = DiscoveryContext::new(self.registry(), dcc, stop_token.clone());
|
||||
unord.push(discovery_context.discover());
|
||||
}
|
||||
|
||||
// Wait for all discovery futures to complete and apply discoverycontexts
|
||||
@ -174,34 +174,8 @@ impl Network {
|
||||
// Add the external address kinds to the set we've seen
|
||||
external_address_types |= dr.external_address_types;
|
||||
|
||||
// Get best detection result for each discovery context config
|
||||
if let Some(cur_dr) = detection_results.get_mut(&dr.config) {
|
||||
let ddi = &mut cur_dr.ddi;
|
||||
// Upgrade existing dialinfo
|
||||
match ddi {
|
||||
DetectedDialInfo::SymmetricNAT => {
|
||||
// Whatever we got is better than or equal to symmetric
|
||||
*ddi = dr.ddi;
|
||||
}
|
||||
DetectedDialInfo::Detected(cur_did) => match dr.ddi {
|
||||
DetectedDialInfo::SymmetricNAT => {
|
||||
// Nothing is worse than this
|
||||
}
|
||||
DetectedDialInfo::Detected(did) => {
|
||||
// Pick the best dial info class we detected
|
||||
// because some nodes could be degenerate and if any node can validate a
|
||||
// better dial info class we should go with it and leave the
|
||||
// degenerate nodes in the dust to fade into obscurity
|
||||
if did.class < cur_did.class {
|
||||
cur_did.class = did.class;
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
cur_dr.external_address_types |= dr.external_address_types;
|
||||
} else {
|
||||
detection_results.insert(dr.config, dr);
|
||||
}
|
||||
// Save best detection result for each discovery context config
|
||||
detection_results.insert(dr.config, dr);
|
||||
}
|
||||
Ok(Some(None)) => {
|
||||
// Found no dial info for this protocol/address combination
|
||||
@ -223,9 +197,11 @@ impl Network {
|
||||
self.update_with_detection_result(&mut editor, &inbound_protocol_map, dr);
|
||||
}
|
||||
|
||||
let end_ts = Timestamp::now();
|
||||
|
||||
// If we got no external address types, try again
|
||||
if external_address_types.is_empty() {
|
||||
veilid_log!(self debug "Network class discovery failed, trying again, got no external address types");
|
||||
veilid_log!(self debug "Network class discovery failed in {}, trying again, got no external address types", end_ts - cur_ts);
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
@ -240,12 +216,12 @@ impl Network {
|
||||
}
|
||||
|
||||
if !success {
|
||||
veilid_log!(self debug "Network class discovery failed, trying again, needed {:?}", context_configs);
|
||||
veilid_log!(self debug "Network class discovery failed in {}, trying again, needed {:?}", end_ts - cur_ts, context_configs);
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// All done
|
||||
veilid_log!(self debug "Network class discovery finished with address_types {:?}", external_address_types);
|
||||
veilid_log!(self debug "Network class discovery finished in {} with address_types {:?}", end_ts - cur_ts, external_address_types);
|
||||
|
||||
// Set the address types we've seen and confirm the network class
|
||||
editor.setup_network(
|
||||
|
@ -17,7 +17,7 @@ cfg_if::cfg_if! {
|
||||
stream: AsyncPeekStream,
|
||||
peer_addr: SocketAddr,
|
||||
local_addr: SocketAddr,
|
||||
) -> SendPinBoxFuture<io::Result<Option<ProtocolNetworkConnection>>>;
|
||||
) -> PinBoxFutureStatic<io::Result<Option<ProtocolNetworkConnection>>>;
|
||||
}
|
||||
|
||||
pub(crate) trait ProtocolAcceptHandlerClone {
|
||||
@ -328,7 +328,7 @@ impl NetworkConnection {
|
||||
receiver: flume::Receiver<(Option<Id>, Vec<u8>)>,
|
||||
protocol_connection: ProtocolNetworkConnection,
|
||||
stats: Arc<Mutex<NetworkConnectionStats>>,
|
||||
) -> SendPinBoxFuture<()> {
|
||||
) -> PinBoxFutureStatic<()> {
|
||||
Box::pin(async move {
|
||||
let registry = connection_manager.registry();
|
||||
|
||||
|
132
veilid-core/src/network_manager/node_contact_method_cache.rs
Normal file
132
veilid-core/src/network_manager/node_contact_method_cache.rs
Normal file
@ -0,0 +1,132 @@
|
||||
use super::*;
|
||||
|
||||
pub const NODE_CONTACT_METHOD_CACHE_SIZE: usize = 1024;
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Ord, PartialOrd, Hash)]
|
||||
pub struct NodeContactMethodCacheKey {
|
||||
pub node_ids: TypedKeyGroup,
|
||||
pub own_node_info_ts: Timestamp,
|
||||
pub target_node_info_ts: Timestamp,
|
||||
pub target_node_ref_filter: NodeRefFilter,
|
||||
pub target_node_ref_sequencing: Sequencing,
|
||||
pub dial_info_failures_map: BTreeMap<DialInfo, Timestamp>,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Default, Debug)]
|
||||
pub struct HitMissStats {
|
||||
pub hit: usize,
|
||||
pub miss: usize,
|
||||
}
|
||||
|
||||
#[derive(Debug, Hash, Eq, PartialEq, Ord, PartialOrd)]
|
||||
enum ContactMethodKind {
|
||||
Unreachable,
|
||||
Existing,
|
||||
Direct,
|
||||
SignalReverse,
|
||||
SignalHolePunch,
|
||||
InboundRelay,
|
||||
OutboundRelay,
|
||||
}
|
||||
impl From<Option<&NodeContactMethodKind>> for ContactMethodKind {
|
||||
fn from(value: Option<&NodeContactMethodKind>) -> Self {
|
||||
match value {
|
||||
None => ContactMethodKind::Unreachable,
|
||||
Some(NodeContactMethodKind::Existing) => ContactMethodKind::Existing,
|
||||
Some(NodeContactMethodKind::Direct(_)) => ContactMethodKind::Direct,
|
||||
Some(NodeContactMethodKind::SignalReverse(_, _)) => ContactMethodKind::SignalReverse,
|
||||
Some(NodeContactMethodKind::SignalHolePunch(_, _)) => {
|
||||
ContactMethodKind::SignalHolePunch
|
||||
}
|
||||
Some(NodeContactMethodKind::InboundRelay(_)) => ContactMethodKind::InboundRelay,
|
||||
Some(NodeContactMethodKind::OutboundRelay(_)) => ContactMethodKind::OutboundRelay,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl HitMissStats {
|
||||
pub fn percentage(&self) -> f32 {
|
||||
(self.hit as f32 * 100.0f32) / ((self.hit + self.miss) as f32)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for HitMissStats {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"{}/{} {:.2}%",
|
||||
self.hit,
|
||||
self.hit + self.miss,
|
||||
self.percentage()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct NodeContactMethodCache {
|
||||
cache: LruCache<NodeContactMethodCacheKey, NodeContactMethodKind>,
|
||||
|
||||
// Statistics for cache hits/misses
|
||||
cache_stats: HitMissStats,
|
||||
|
||||
// Recorded stats for contact method success
|
||||
contact_method_kind_stats: HashMap<ContactMethodKind, HitMissStats>,
|
||||
}
|
||||
|
||||
impl fmt::Debug for NodeContactMethodCache {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("NodeContactMethodCache")
|
||||
//.field("cache", &self.cache)
|
||||
.field("cache_stats", &self.cache_stats)
|
||||
.field("contact_method_kind_stats", &self.contact_method_kind_stats)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl NodeContactMethodCache {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
cache: LruCache::new(NODE_CONTACT_METHOD_CACHE_SIZE),
|
||||
cache_stats: HitMissStats::default(),
|
||||
contact_method_kind_stats: HashMap::new(),
|
||||
}
|
||||
}
|
||||
pub fn insert(&mut self, ncm_key: NodeContactMethodCacheKey, ncm_kind: NodeContactMethodKind) {
|
||||
// Cache this
|
||||
self.cache.insert(ncm_key, ncm_kind);
|
||||
}
|
||||
|
||||
pub fn get(&mut self, ncm_key: &NodeContactMethodCacheKey) -> Option<NodeContactMethodKind> {
|
||||
if let Some(ncm_kind) = self.cache.get(ncm_key) {
|
||||
self.cache_stats.hit += 1;
|
||||
|
||||
return Some(ncm_kind.clone());
|
||||
}
|
||||
// Record miss
|
||||
self.cache_stats.miss += 1;
|
||||
None
|
||||
}
|
||||
|
||||
pub fn record_contact_method_success(&mut self, ncm_kind: Option<&NodeContactMethodKind>) {
|
||||
let cmk = ContactMethodKind::from(ncm_kind);
|
||||
self.contact_method_kind_stats.entry(cmk).or_default().hit += 1;
|
||||
}
|
||||
pub fn record_contact_method_failure(&mut self, ncm_kind: Option<&NodeContactMethodKind>) {
|
||||
let cmk = ContactMethodKind::from(ncm_kind);
|
||||
self.contact_method_kind_stats.entry(cmk).or_default().miss += 1;
|
||||
}
|
||||
|
||||
pub fn debug(&self) -> String {
|
||||
let mut out = format!(
|
||||
"Cache size: {}\nCache hits: {}\nContact methods:\n",
|
||||
self.cache.len(),
|
||||
self.cache_stats
|
||||
);
|
||||
let mut sorted_kinds: Vec<_> = self.contact_method_kind_stats.keys().collect();
|
||||
sorted_kinds.sort();
|
||||
for kind in sorted_kinds {
|
||||
let kindstats = self.contact_method_kind_stats.get(kind).unwrap();
|
||||
out += &format!(" {:?}: {}\n", kind, kindstats);
|
||||
}
|
||||
out
|
||||
}
|
||||
}
|
@ -38,7 +38,7 @@ pub trait ReceiptCallback: Send + 'static {
|
||||
receipt: Receipt,
|
||||
returns_so_far: u32,
|
||||
expected_returns: u32,
|
||||
) -> SendPinBoxFuture<()>;
|
||||
) -> PinBoxFutureStatic<()>;
|
||||
}
|
||||
impl<F, T> ReceiptCallback for T
|
||||
where
|
||||
@ -51,7 +51,7 @@ where
|
||||
receipt: Receipt,
|
||||
returns_so_far: u32,
|
||||
expected_returns: u32,
|
||||
) -> SendPinBoxFuture<()> {
|
||||
) -> PinBoxFutureStatic<()> {
|
||||
Box::pin(self(event, receipt, returns_so_far, expected_returns))
|
||||
}
|
||||
}
|
||||
@ -184,17 +184,12 @@ impl ReceiptManager {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn startup(&self) -> EyreResult<()> {
|
||||
pub fn startup(&self) -> EyreResult<()> {
|
||||
let guard = self.unlocked_inner.startup_lock.startup()?;
|
||||
veilid_log!(self debug "startup receipt manager");
|
||||
|
||||
// Retrieve config
|
||||
{
|
||||
// let config = self.core().config();
|
||||
// let c = config.get();
|
||||
let mut inner = self.inner.lock();
|
||||
inner.stop_source = Some(StopSource::new());
|
||||
}
|
||||
let mut inner = self.inner.lock();
|
||||
inner.stop_source = Some(StopSource::new());
|
||||
|
||||
guard.success();
|
||||
Ok(())
|
||||
@ -204,7 +199,7 @@ impl ReceiptManager {
|
||||
fn perform_callback(
|
||||
evt: ReceiptEvent,
|
||||
record_mut: &mut ReceiptRecord,
|
||||
) -> Option<SendPinBoxFuture<()>> {
|
||||
) -> Option<PinBoxFutureStatic<()>> {
|
||||
match &mut record_mut.receipt_callback {
|
||||
ReceiptRecordCallbackType::Normal(callback) => Some(callback.call(
|
||||
evt,
|
||||
|
@ -1,12 +1,6 @@
|
||||
use super::*;
|
||||
use stop_token::future::FutureExt as _;
|
||||
|
||||
// global debugging statistics for hole punch success
|
||||
static HOLE_PUNCH_SUCCESS: AtomicUsize = AtomicUsize::new(0);
|
||||
static HOLE_PUNCH_FAILURE: AtomicUsize = AtomicUsize::new(0);
|
||||
static REVERSE_CONNECT_SUCCESS: AtomicUsize = AtomicUsize::new(0);
|
||||
static REVERSE_CONNECT_FAILURE: AtomicUsize = AtomicUsize::new(0);
|
||||
|
||||
impl NetworkManager {
|
||||
/// Send raw data to a node
|
||||
///
|
||||
@ -20,141 +14,227 @@ impl NetworkManager {
|
||||
&self,
|
||||
destination_node_ref: FilteredNodeRef,
|
||||
data: Vec<u8>,
|
||||
) -> EyreResult<NetworkResult<SendDataMethod>> {
|
||||
) -> EyreResult<NetworkResult<SendDataResult>> {
|
||||
// Get the best way to contact this node
|
||||
let possibly_relayed_contact_method =
|
||||
let mut opt_node_contact_method =
|
||||
self.get_node_contact_method(destination_node_ref.clone())?;
|
||||
|
||||
self.try_possibly_relayed_contact_method(
|
||||
possibly_relayed_contact_method,
|
||||
destination_node_ref,
|
||||
data,
|
||||
)
|
||||
.await
|
||||
// Retry loop
|
||||
loop {
|
||||
// Boxed because calling rpc_call_signal() is recursive to send_data()
|
||||
let nres = pin_future_closure!(self.try_node_contact_method(
|
||||
opt_node_contact_method.clone(),
|
||||
destination_node_ref.clone(),
|
||||
data.clone(),
|
||||
))
|
||||
.await?;
|
||||
|
||||
match &nres {
|
||||
NetworkResult::Timeout => {
|
||||
// Record contact method failure statistics
|
||||
self.inner
|
||||
.lock()
|
||||
.node_contact_method_cache
|
||||
.record_contact_method_failure(
|
||||
opt_node_contact_method.as_ref().map(|x| &x.ncm_kind),
|
||||
);
|
||||
|
||||
// Timeouts may retry with a different method
|
||||
match opt_node_contact_method {
|
||||
Some(NodeContactMethod {
|
||||
ncm_key,
|
||||
ncm_kind:
|
||||
NodeContactMethodKind::SignalReverse(relay_nr, _target_node_ref),
|
||||
}) => {
|
||||
// Try again with a different method
|
||||
opt_node_contact_method = Some(NodeContactMethod {
|
||||
ncm_key,
|
||||
ncm_kind: NodeContactMethodKind::InboundRelay(relay_nr),
|
||||
});
|
||||
continue;
|
||||
}
|
||||
Some(NodeContactMethod {
|
||||
ncm_key,
|
||||
ncm_kind:
|
||||
NodeContactMethodKind::SignalHolePunch(relay_nr, _target_node_ref),
|
||||
}) => {
|
||||
// Try again with a different method
|
||||
opt_node_contact_method = Some(NodeContactMethod {
|
||||
ncm_key,
|
||||
ncm_kind: NodeContactMethodKind::InboundRelay(relay_nr),
|
||||
});
|
||||
continue;
|
||||
}
|
||||
_ => {
|
||||
// Don't retry any other contact methods, and don't cache a timeout
|
||||
break Ok(nres);
|
||||
}
|
||||
}
|
||||
}
|
||||
NetworkResult::ServiceUnavailable(_)
|
||||
| NetworkResult::NoConnection(_)
|
||||
| NetworkResult::AlreadyExists(_)
|
||||
| NetworkResult::InvalidMessage(_) => {
|
||||
// Record contact method failure statistics
|
||||
self.inner
|
||||
.lock()
|
||||
.node_contact_method_cache
|
||||
.record_contact_method_failure(
|
||||
opt_node_contact_method.as_ref().map(|x| &x.ncm_kind),
|
||||
);
|
||||
|
||||
// Other network results don't cache, just directly return the result
|
||||
break Ok(nres);
|
||||
}
|
||||
NetworkResult::Value(v) => {
|
||||
// Successful network result gets to cache the node contact method
|
||||
if let Some(ncm) = &v.opt_contact_method {
|
||||
// Cache the contact method
|
||||
self.cache_node_contact_method(ncm.clone());
|
||||
}
|
||||
if let Some(ncm) = &v.opt_relayed_contact_method {
|
||||
// Cache the relayed contact method
|
||||
self.cache_node_contact_method(ncm.clone());
|
||||
}
|
||||
|
||||
// Record cache insertion as a success
|
||||
self.inner
|
||||
.lock()
|
||||
.node_contact_method_cache
|
||||
.record_contact_method_success(
|
||||
v.opt_contact_method.as_ref().map(|x| &x.ncm_kind),
|
||||
);
|
||||
// If relayed contact method was specified, then it wasn't unreachable
|
||||
// (must have been relay type or it wouldnt be here, and if this is None
|
||||
// then the contact method was not relayed)
|
||||
if v.opt_relayed_contact_method.is_some() {
|
||||
self.inner
|
||||
.lock()
|
||||
.node_contact_method_cache
|
||||
.record_contact_method_success(
|
||||
v.opt_relayed_contact_method.as_ref().map(|x| &x.ncm_kind),
|
||||
);
|
||||
}
|
||||
|
||||
break Ok(nres);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", target = "net", skip_all)]
|
||||
pub fn try_possibly_relayed_contact_method(
|
||||
async fn try_node_contact_method(
|
||||
&self,
|
||||
possibly_relayed_contact_method: NodeContactMethod,
|
||||
opt_node_contact_method: Option<NodeContactMethod>,
|
||||
destination_node_ref: FilteredNodeRef,
|
||||
data: Vec<u8>,
|
||||
) -> SendPinBoxFuture<EyreResult<NetworkResult<SendDataMethod>>> {
|
||||
let registry = self.registry();
|
||||
Box::pin(
|
||||
async move {
|
||||
let this = registry.network_manager();
|
||||
) -> EyreResult<NetworkResult<SendDataResult>> {
|
||||
// If we need to relay, do it
|
||||
let (opt_contact_method, target_node_ref, opt_relayed_contact_method) =
|
||||
match opt_node_contact_method.clone().map(|x| x.ncm_kind) {
|
||||
Some(NodeContactMethodKind::OutboundRelay(relay_nr))
|
||||
| Some(NodeContactMethodKind::InboundRelay(relay_nr)) => {
|
||||
let opt_contact_method = self.get_node_contact_method(relay_nr.clone())?;
|
||||
(opt_contact_method, relay_nr, opt_node_contact_method)
|
||||
}
|
||||
_ => (opt_node_contact_method, destination_node_ref.clone(), None),
|
||||
};
|
||||
|
||||
// If we need to relay, do it
|
||||
let (contact_method, target_node_ref, opt_relayed_contact_method) = match possibly_relayed_contact_method.clone() {
|
||||
NodeContactMethod::OutboundRelay(relay_nr)
|
||||
| NodeContactMethod::InboundRelay(relay_nr) => {
|
||||
let cm = this.get_node_contact_method(relay_nr.clone())?;
|
||||
(cm, relay_nr, Some(possibly_relayed_contact_method))
|
||||
}
|
||||
cm => (cm, destination_node_ref.clone(), None),
|
||||
};
|
||||
#[cfg(feature = "verbose-tracing")]
|
||||
veilid_log!(self debug
|
||||
"ContactMethod: {:?} for {:?}",
|
||||
opt_contact_method, destination_node_ref
|
||||
);
|
||||
|
||||
#[cfg(feature = "verbose-tracing")]
|
||||
veilid_log!(self debug
|
||||
"ContactMethod: {:?} for {:?}",
|
||||
contact_method, destination_node_ref
|
||||
);
|
||||
|
||||
// Try the contact method
|
||||
let mut send_data_method = match contact_method {
|
||||
NodeContactMethod::OutboundRelay(relay_nr) => {
|
||||
// Relay loop or multiple relays
|
||||
bail!(
|
||||
// Try the contact method
|
||||
let unique_flow = match &opt_contact_method {
|
||||
None => {
|
||||
// If a node is unreachable it may still have an existing inbound connection
|
||||
// Try that, but don't cache anything
|
||||
network_result_try!(
|
||||
pin_future_closure!(self.send_data_ncm_existing(target_node_ref, data)).await?
|
||||
)
|
||||
}
|
||||
Some(NodeContactMethod {
|
||||
ncm_key: _,
|
||||
ncm_kind: NodeContactMethodKind::Existing,
|
||||
}) => {
|
||||
// The node must have an existing connection, for example connecting to your own
|
||||
// relay is something that must always have a connection already
|
||||
network_result_try!(
|
||||
pin_future_closure!(self.send_data_ncm_existing(target_node_ref, data)).await?
|
||||
)
|
||||
}
|
||||
Some(NodeContactMethod {
|
||||
ncm_key: _,
|
||||
ncm_kind: NodeContactMethodKind::OutboundRelay(relay_nr),
|
||||
}) => {
|
||||
// Relay loop or multiple relays
|
||||
bail!(
|
||||
"Outbound relay loop or multiple relays detected: destination {} resolved to target {} via extraneous relay {}",
|
||||
destination_node_ref,
|
||||
target_node_ref,
|
||||
relay_nr,
|
||||
);
|
||||
}
|
||||
| NodeContactMethod::InboundRelay(relay_nr) => {
|
||||
// Relay loop or multiple relays
|
||||
bail!(
|
||||
}
|
||||
Some(NodeContactMethod {
|
||||
ncm_key: _,
|
||||
ncm_kind: NodeContactMethodKind::InboundRelay(relay_nr),
|
||||
}) => {
|
||||
// Relay loop or multiple relays
|
||||
bail!(
|
||||
"Inbound relay loop or multiple relays detected: destination {} resolved to target {} via extraneous relay {}",
|
||||
destination_node_ref,
|
||||
target_node_ref,
|
||||
relay_nr,
|
||||
);
|
||||
}
|
||||
NodeContactMethod::Direct(dial_info) => {
|
||||
network_result_try!(
|
||||
this.send_data_ncm_direct(target_node_ref, dial_info, data).await?
|
||||
)
|
||||
}
|
||||
NodeContactMethod::SignalReverse(relay_nr, target_node_ref) => {
|
||||
let nres =
|
||||
this.send_data_ncm_signal_reverse(relay_nr.clone(), target_node_ref.clone(), data.clone())
|
||||
.await?;
|
||||
if matches!(nres, NetworkResult::Timeout) {
|
||||
// Failed to reverse-connect, fallback to inbound relay
|
||||
let success = REVERSE_CONNECT_SUCCESS.load(Ordering::Acquire);
|
||||
let failure = REVERSE_CONNECT_FAILURE.fetch_add(1, Ordering::AcqRel) + 1;
|
||||
let rate = (success as f64 * 100.0) / ((success + failure) as f64);
|
||||
|
||||
veilid_log!(this debug target:"network_result", "Reverse connection failed ({:.2}% success) to {}, falling back to inbound relay via {}", rate, target_node_ref, relay_nr);
|
||||
network_result_try!(this.try_possibly_relayed_contact_method(NodeContactMethod::InboundRelay(relay_nr), destination_node_ref, data).await?)
|
||||
} else {
|
||||
if let NetworkResult::Value(sdm) = &nres {
|
||||
if matches!(sdm.contact_method, NodeContactMethod::SignalReverse(_,_)) {
|
||||
|
||||
let success = REVERSE_CONNECT_SUCCESS.fetch_add(1, Ordering::AcqRel) + 1;
|
||||
let failure = REVERSE_CONNECT_FAILURE.load(Ordering::Acquire);
|
||||
let rate = (success as f64 * 100.0) / ((success + failure) as f64);
|
||||
|
||||
veilid_log!(this debug target:"network_result", "Reverse connection successful ({:.2}% success) to {} via {}", rate, target_node_ref, relay_nr);
|
||||
}
|
||||
}
|
||||
network_result_try!(nres)
|
||||
}
|
||||
}
|
||||
NodeContactMethod::SignalHolePunch(relay_nr, target_node_ref) => {
|
||||
let nres =
|
||||
this.send_data_ncm_signal_hole_punch(relay_nr.clone(), target_node_ref.clone(), data.clone())
|
||||
.await?;
|
||||
if matches!(nres, NetworkResult::Timeout) {
|
||||
// Failed to holepunch, fallback to inbound relay
|
||||
let success = HOLE_PUNCH_SUCCESS.load(Ordering::Acquire);
|
||||
let failure = HOLE_PUNCH_FAILURE.fetch_add(1, Ordering::AcqRel) + 1;
|
||||
let rate = (success as f64 * 100.0) / ((success + failure) as f64);
|
||||
|
||||
veilid_log!(this debug target:"network_result", "Hole punch failed ({:.2}% success) to {} , falling back to inbound relay via {}", rate, target_node_ref , relay_nr);
|
||||
network_result_try!(this.try_possibly_relayed_contact_method(NodeContactMethod::InboundRelay(relay_nr), destination_node_ref, data).await?)
|
||||
} else {
|
||||
if let NetworkResult::Value(sdm) = &nres {
|
||||
if matches!(sdm.contact_method, NodeContactMethod::SignalHolePunch(_,_)) {
|
||||
let success = HOLE_PUNCH_SUCCESS.fetch_add(1, Ordering::AcqRel) + 1;
|
||||
let failure = HOLE_PUNCH_FAILURE.load(Ordering::Acquire);
|
||||
let rate = (success as f64 * 100.0) / ((success + failure) as f64);
|
||||
|
||||
veilid_log!(this debug target:"network_result", "Hole punch successful ({:.2}% success) to {} via {}", rate, target_node_ref, relay_nr);
|
||||
}
|
||||
}
|
||||
network_result_try!(nres)
|
||||
}
|
||||
}
|
||||
NodeContactMethod::Existing => {
|
||||
network_result_try!(
|
||||
this.send_data_ncm_existing(target_node_ref, data).await?
|
||||
)
|
||||
}
|
||||
NodeContactMethod::Unreachable => {
|
||||
network_result_try!(
|
||||
this.send_data_ncm_unreachable(target_node_ref, data)
|
||||
.await?
|
||||
)
|
||||
}
|
||||
};
|
||||
send_data_method.opt_relayed_contact_method = opt_relayed_contact_method;
|
||||
|
||||
Ok(NetworkResult::value(send_data_method))
|
||||
}
|
||||
.in_current_span()
|
||||
)
|
||||
Some(NodeContactMethod {
|
||||
ncm_key: _,
|
||||
ncm_kind: NodeContactMethodKind::Direct(dial_info),
|
||||
}) => {
|
||||
network_result_try!(
|
||||
pin_future_closure!(self.send_data_ncm_direct(
|
||||
target_node_ref,
|
||||
dial_info.clone(),
|
||||
data
|
||||
))
|
||||
.await?
|
||||
)
|
||||
}
|
||||
Some(NodeContactMethod {
|
||||
ncm_key: _,
|
||||
ncm_kind: NodeContactMethodKind::SignalReverse(relay_nr, target_node_ref),
|
||||
}) => {
|
||||
network_result_try!(
|
||||
pin_future_closure!(self.send_data_ncm_signal_reverse(
|
||||
relay_nr.clone(),
|
||||
target_node_ref.clone(),
|
||||
data.clone()
|
||||
))
|
||||
.await?
|
||||
)
|
||||
}
|
||||
Some(NodeContactMethod {
|
||||
ncm_key: _,
|
||||
ncm_kind: NodeContactMethodKind::SignalHolePunch(relay_nr, target_node_ref),
|
||||
}) => {
|
||||
network_result_try!(
|
||||
pin_future_closure!(self.send_data_ncm_signal_hole_punch(
|
||||
relay_nr.clone(),
|
||||
target_node_ref.clone(),
|
||||
data.clone()
|
||||
))
|
||||
.await?
|
||||
)
|
||||
}
|
||||
};
|
||||
|
||||
Ok(NetworkResult::value(SendDataResult {
|
||||
opt_contact_method,
|
||||
opt_relayed_contact_method,
|
||||
unique_flow,
|
||||
}))
|
||||
}
|
||||
|
||||
/// Send data using NodeContactMethod::Existing
|
||||
@ -163,7 +243,7 @@ impl NetworkManager {
|
||||
&self,
|
||||
target_node_ref: FilteredNodeRef,
|
||||
data: Vec<u8>,
|
||||
) -> EyreResult<NetworkResult<SendDataMethod>> {
|
||||
) -> EyreResult<NetworkResult<UniqueFlow>> {
|
||||
// First try to send data to the last connection we've seen this peer on
|
||||
let Some(flow) = target_node_ref.last_flow() else {
|
||||
return Ok(NetworkResult::no_connection_other(format!(
|
||||
@ -172,7 +252,8 @@ impl NetworkManager {
|
||||
)));
|
||||
};
|
||||
|
||||
let unique_flow = match self.net().send_data_to_existing_flow(flow, data).await? {
|
||||
let net = self.net();
|
||||
let unique_flow = match pin_future!(net.send_data_to_existing_flow(flow, data)).await? {
|
||||
SendDataToExistingFlowResult::Sent(unique_flow) => unique_flow,
|
||||
SendDataToExistingFlowResult::NotSent(_) => {
|
||||
return Ok(NetworkResult::no_connection_other(
|
||||
@ -184,46 +265,7 @@ impl NetworkManager {
|
||||
// Update timestamp for this last connection since we just sent to it
|
||||
self.set_last_flow(target_node_ref.unfiltered(), flow, Timestamp::now());
|
||||
|
||||
Ok(NetworkResult::value(SendDataMethod {
|
||||
contact_method: NodeContactMethod::Existing,
|
||||
opt_relayed_contact_method: None,
|
||||
unique_flow,
|
||||
}))
|
||||
}
|
||||
|
||||
/// Send data using NodeContactMethod::Unreachable
|
||||
#[instrument(level = "trace", target = "net", skip_all, err)]
|
||||
async fn send_data_ncm_unreachable(
|
||||
&self,
|
||||
target_node_ref: FilteredNodeRef,
|
||||
data: Vec<u8>,
|
||||
) -> EyreResult<NetworkResult<SendDataMethod>> {
|
||||
// Try to send data to the last flow we've seen this peer on
|
||||
let Some(flow) = target_node_ref.last_flow() else {
|
||||
return Ok(NetworkResult::no_connection_other(format!(
|
||||
"Node is not reachable and has no existing connection: {}",
|
||||
target_node_ref
|
||||
)));
|
||||
};
|
||||
|
||||
let unique_flow = match self.net().send_data_to_existing_flow(flow, data).await? {
|
||||
SendDataToExistingFlowResult::Sent(unique_flow) => unique_flow,
|
||||
SendDataToExistingFlowResult::NotSent(_) => {
|
||||
return Ok(NetworkResult::no_connection_other(format!(
|
||||
"failed to send to unreachable node over existing connection: {:?}",
|
||||
flow
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
// Update timestamp for this last connection since we just sent to it
|
||||
self.set_last_flow(target_node_ref.unfiltered(), flow, Timestamp::now());
|
||||
|
||||
Ok(NetworkResult::value(SendDataMethod {
|
||||
contact_method: NodeContactMethod::Existing,
|
||||
opt_relayed_contact_method: None,
|
||||
unique_flow,
|
||||
}))
|
||||
Ok(NetworkResult::value(unique_flow))
|
||||
}
|
||||
|
||||
/// Send data using NodeContactMethod::SignalReverse
|
||||
@ -233,7 +275,7 @@ impl NetworkManager {
|
||||
relay_nr: FilteredNodeRef,
|
||||
target_node_ref: FilteredNodeRef,
|
||||
data: Vec<u8>,
|
||||
) -> EyreResult<NetworkResult<SendDataMethod>> {
|
||||
) -> EyreResult<NetworkResult<UniqueFlow>> {
|
||||
// Make a noderef that meets the sequencing requirements
|
||||
// But is not protocol-specific, or address-family-specific
|
||||
// as a signalled node gets to choose its own dial info for the reverse connection.
|
||||
@ -252,16 +294,13 @@ impl NetworkManager {
|
||||
|
||||
// First try to send data to the last flow we've seen this peer on
|
||||
let data = if let Some(flow) = seq_target_node_ref.last_flow() {
|
||||
match self.net().send_data_to_existing_flow(flow, data).await? {
|
||||
let net = self.net();
|
||||
match pin_future!(net.send_data_to_existing_flow(flow, data)).await? {
|
||||
SendDataToExistingFlowResult::Sent(unique_flow) => {
|
||||
// Update timestamp for this last connection since we just sent to it
|
||||
self.set_last_flow(target_node_ref.unfiltered(), flow, Timestamp::now());
|
||||
|
||||
return Ok(NetworkResult::value(SendDataMethod {
|
||||
contact_method: NodeContactMethod::Existing,
|
||||
opt_relayed_contact_method: None,
|
||||
unique_flow,
|
||||
}));
|
||||
return Ok(NetworkResult::value(unique_flow));
|
||||
}
|
||||
SendDataToExistingFlowResult::NotSent(data) => {
|
||||
// Couldn't send data to existing connection
|
||||
@ -281,14 +320,10 @@ impl NetworkManager {
|
||||
};
|
||||
|
||||
let unique_flow = network_result_try!(
|
||||
self.do_reverse_connect(relay_nr.clone(), target_node_ref.clone(), data)
|
||||
pin_future!(self.do_reverse_connect(relay_nr.clone(), target_node_ref.clone(), data))
|
||||
.await?
|
||||
);
|
||||
Ok(NetworkResult::value(SendDataMethod {
|
||||
contact_method: NodeContactMethod::SignalReverse(relay_nr, target_node_ref),
|
||||
opt_relayed_contact_method: None,
|
||||
unique_flow,
|
||||
}))
|
||||
Ok(NetworkResult::value(unique_flow))
|
||||
}
|
||||
|
||||
/// Send data using NodeContactMethod::SignalHolePunch
|
||||
@ -298,19 +333,16 @@ impl NetworkManager {
|
||||
relay_nr: FilteredNodeRef,
|
||||
target_node_ref: FilteredNodeRef,
|
||||
data: Vec<u8>,
|
||||
) -> EyreResult<NetworkResult<SendDataMethod>> {
|
||||
) -> EyreResult<NetworkResult<UniqueFlow>> {
|
||||
// First try to send data to the last flow we've seen this peer on
|
||||
let data = if let Some(flow) = target_node_ref.last_flow() {
|
||||
match self.net().send_data_to_existing_flow(flow, data).await? {
|
||||
let net = self.net();
|
||||
match pin_future!(net.send_data_to_existing_flow(flow, data)).await? {
|
||||
SendDataToExistingFlowResult::Sent(unique_flow) => {
|
||||
// Update timestamp for this last connection since we just sent to it
|
||||
self.set_last_flow(target_node_ref.unfiltered(), flow, Timestamp::now());
|
||||
|
||||
return Ok(NetworkResult::value(SendDataMethod {
|
||||
contact_method: NodeContactMethod::Existing,
|
||||
opt_relayed_contact_method: None,
|
||||
unique_flow,
|
||||
}));
|
||||
return Ok(NetworkResult::value(unique_flow));
|
||||
}
|
||||
SendDataToExistingFlowResult::NotSent(data) => {
|
||||
// Couldn't send data to existing connection
|
||||
@ -330,14 +362,11 @@ impl NetworkManager {
|
||||
};
|
||||
|
||||
let unique_flow = network_result_try!(
|
||||
self.do_hole_punch(relay_nr.clone(), target_node_ref.clone(), data)
|
||||
pin_future!(self.do_hole_punch(relay_nr.clone(), target_node_ref.clone(), data))
|
||||
.await?
|
||||
);
|
||||
Ok(NetworkResult::value(SendDataMethod {
|
||||
contact_method: NodeContactMethod::SignalHolePunch(relay_nr, target_node_ref),
|
||||
opt_relayed_contact_method: None,
|
||||
unique_flow,
|
||||
}))
|
||||
|
||||
Ok(NetworkResult::value(unique_flow))
|
||||
}
|
||||
|
||||
/// Send data using NodeContactMethod::Direct
|
||||
@ -347,7 +376,7 @@ impl NetworkManager {
|
||||
node_ref: FilteredNodeRef,
|
||||
dial_info: DialInfo,
|
||||
data: Vec<u8>,
|
||||
) -> EyreResult<NetworkResult<SendDataMethod>> {
|
||||
) -> EyreResult<NetworkResult<UniqueFlow>> {
|
||||
// Since we have the best dial info already, we can find a connection to use by protocol type
|
||||
let node_ref = node_ref.filtered_clone(NodeRefFilter::from(dial_info.make_filter()));
|
||||
|
||||
@ -359,16 +388,13 @@ impl NetworkManager {
|
||||
flow, node_ref
|
||||
);
|
||||
|
||||
match self.net().send_data_to_existing_flow(flow, data).await? {
|
||||
let net = self.net();
|
||||
match pin_future!(net.send_data_to_existing_flow(flow, data)).await? {
|
||||
SendDataToExistingFlowResult::Sent(unique_flow) => {
|
||||
// Update timestamp for this last connection since we just sent to it
|
||||
self.set_last_flow(node_ref.unfiltered(), flow, Timestamp::now());
|
||||
|
||||
return Ok(NetworkResult::value(SendDataMethod {
|
||||
contact_method: NodeContactMethod::Existing,
|
||||
opt_relayed_contact_method: None,
|
||||
unique_flow,
|
||||
}));
|
||||
return Ok(NetworkResult::value(unique_flow));
|
||||
}
|
||||
SendDataToExistingFlowResult::NotSent(d) => {
|
||||
// Connection couldn't send, kill it
|
||||
@ -381,27 +407,22 @@ impl NetworkManager {
|
||||
};
|
||||
|
||||
// New direct connection was necessary for this dial info
|
||||
let net = self.net();
|
||||
let unique_flow = network_result_try!(
|
||||
self.net()
|
||||
.send_data_to_dial_info(dial_info.clone(), data)
|
||||
.await?
|
||||
pin_future!(net.send_data_to_dial_info(dial_info.clone(), data)).await?
|
||||
);
|
||||
|
||||
// If we connected to this node directly, save off the last connection so we can use it again
|
||||
self.set_last_flow(node_ref.unfiltered(), unique_flow.flow, Timestamp::now());
|
||||
|
||||
Ok(NetworkResult::value(SendDataMethod {
|
||||
contact_method: NodeContactMethod::Direct(dial_info),
|
||||
opt_relayed_contact_method: None,
|
||||
unique_flow,
|
||||
}))
|
||||
Ok(NetworkResult::value(unique_flow))
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", target = "net", skip_all, err)]
|
||||
#[instrument(level = "trace", target = "net", skip(self), err)]
|
||||
pub fn get_node_contact_method(
|
||||
&self,
|
||||
target_node_ref: FilteredNodeRef,
|
||||
) -> EyreResult<NodeContactMethod> {
|
||||
) -> EyreResult<Option<NodeContactMethod>> {
|
||||
let routing_table = self.routing_table();
|
||||
|
||||
// If a node is punished, then don't try to contact it
|
||||
@ -410,7 +431,7 @@ impl NetworkManager {
|
||||
.iter()
|
||||
.any(|nid| self.address_filter().is_node_id_punished(*nid))
|
||||
{
|
||||
return Ok(NodeContactMethod::Unreachable);
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
// Figure out the best routing domain to get the contact method over
|
||||
@ -418,7 +439,7 @@ impl NetworkManager {
|
||||
Some(rd) => rd,
|
||||
None => {
|
||||
veilid_log!(self trace "no routing domain for node {:?}", target_node_ref);
|
||||
return Ok(NodeContactMethod::Unreachable);
|
||||
return Ok(None);
|
||||
}
|
||||
};
|
||||
|
||||
@ -430,7 +451,7 @@ impl NetworkManager {
|
||||
// Peer B is the target node, get the whole peer info now
|
||||
let Some(peer_b) = target_node_ref.get_peer_info(routing_domain) else {
|
||||
veilid_log!(self trace "no node info for node {:?}", target_node_ref);
|
||||
return Ok(NodeContactMethod::Unreachable);
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
// Calculate the dial info failures map
|
||||
@ -458,42 +479,47 @@ impl NetworkManager {
|
||||
target_node_ref_sequencing: target_node_ref.sequencing(),
|
||||
dial_info_failures_map,
|
||||
};
|
||||
if let Some(ncm) = self.inner.lock().node_contact_method_cache.get(&ncm_key) {
|
||||
return Ok(ncm.clone());
|
||||
if let Some(ncm_kind) = self.inner.lock().node_contact_method_cache.get(&ncm_key) {
|
||||
return Ok(Some(NodeContactMethod { ncm_key, ncm_kind }));
|
||||
}
|
||||
|
||||
// Calculate the node contact method
|
||||
let routing_table = self.routing_table();
|
||||
let ncm = Self::get_node_contact_method_uncached(
|
||||
let Some(ncm_kind) = Self::get_node_contact_method_kind(
|
||||
&routing_table,
|
||||
routing_domain,
|
||||
target_node_ref,
|
||||
peer_a,
|
||||
peer_b,
|
||||
&ncm_key,
|
||||
)?;
|
||||
)?
|
||||
else {
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
Ok(Some(NodeContactMethod { ncm_key, ncm_kind }))
|
||||
}
|
||||
|
||||
fn cache_node_contact_method(&self, ncm: NodeContactMethod) {
|
||||
// Cache this
|
||||
self.inner
|
||||
.lock()
|
||||
.node_contact_method_cache
|
||||
.insert(ncm_key, ncm.clone());
|
||||
|
||||
Ok(ncm)
|
||||
.insert(ncm.ncm_key, ncm.ncm_kind);
|
||||
}
|
||||
|
||||
/// Figure out how to reach a node from our own node over the best routing domain and reference the nodes we want to access
|
||||
/// Uses NodeRefs to ensure nodes are referenced, this is not a part of 'RoutingTable' because RoutingTable is not
|
||||
/// allowed to use NodeRefs due to recursive locking
|
||||
#[instrument(level = "trace", target = "net", skip_all, err)]
|
||||
fn get_node_contact_method_uncached(
|
||||
fn get_node_contact_method_kind(
|
||||
routing_table: &RoutingTable,
|
||||
routing_domain: RoutingDomain,
|
||||
target_node_ref: FilteredNodeRef,
|
||||
peer_a: Arc<PeerInfo>,
|
||||
peer_b: Arc<PeerInfo>,
|
||||
ncm_key: &NodeContactMethodCacheKey,
|
||||
) -> EyreResult<NodeContactMethod> {
|
||||
) -> EyreResult<Option<NodeContactMethodKind>> {
|
||||
// Dial info filter comes from the target node ref but must be filtered by this node's outbound capabilities
|
||||
let dial_info_filter = target_node_ref.dial_info_filter().filtered(
|
||||
DialInfoFilter::all()
|
||||
@ -542,9 +568,9 @@ impl NetworkManager {
|
||||
|
||||
// Translate the raw contact method to a referenced contact method
|
||||
let ncm = match cm {
|
||||
ContactMethod::Unreachable => NodeContactMethod::Unreachable,
|
||||
ContactMethod::Existing => NodeContactMethod::Existing,
|
||||
ContactMethod::Direct(di) => NodeContactMethod::Direct(di),
|
||||
ContactMethod::Unreachable => None,
|
||||
ContactMethod::Existing => Some(NodeContactMethodKind::Existing),
|
||||
ContactMethod::Direct(di) => Some(NodeContactMethodKind::Direct(di)),
|
||||
ContactMethod::SignalReverse(relay_key, target_key) => {
|
||||
let mut relay_nr = routing_table
|
||||
.lookup_and_filter_noderef(relay_key, routing_domain.into(), dial_info_filter)?
|
||||
@ -595,7 +621,10 @@ impl NetworkManager {
|
||||
if tighten {
|
||||
target_node_ref.set_sequencing(Sequencing::EnsureOrdered);
|
||||
}
|
||||
NodeContactMethod::SignalReverse(relay_nr, target_node_ref)
|
||||
Some(NodeContactMethodKind::SignalReverse(
|
||||
relay_nr,
|
||||
target_node_ref,
|
||||
))
|
||||
}
|
||||
ContactMethod::SignalHolePunch(relay_key, target_key) => {
|
||||
let mut relay_nr = routing_table
|
||||
@ -622,7 +651,10 @@ impl NetworkManager {
|
||||
.with_protocol_type(ProtocolType::UDP),
|
||||
);
|
||||
|
||||
NodeContactMethod::SignalHolePunch(relay_nr, udp_target_node_ref)
|
||||
Some(NodeContactMethodKind::SignalHolePunch(
|
||||
relay_nr,
|
||||
udp_target_node_ref,
|
||||
))
|
||||
}
|
||||
ContactMethod::InboundRelay(relay_key) => {
|
||||
let mut relay_nr = routing_table
|
||||
@ -635,7 +667,7 @@ impl NetworkManager {
|
||||
)
|
||||
})?;
|
||||
relay_nr.set_sequencing(sequencing);
|
||||
NodeContactMethod::InboundRelay(relay_nr)
|
||||
Some(NodeContactMethodKind::InboundRelay(relay_nr))
|
||||
}
|
||||
ContactMethod::OutboundRelay(relay_key) => {
|
||||
let mut relay_nr = routing_table
|
||||
@ -648,7 +680,7 @@ impl NetworkManager {
|
||||
)
|
||||
})?;
|
||||
relay_nr.set_sequencing(sequencing);
|
||||
NodeContactMethod::OutboundRelay(relay_nr)
|
||||
Some(NodeContactMethodKind::OutboundRelay(relay_nr))
|
||||
}
|
||||
};
|
||||
|
||||
@ -695,16 +727,15 @@ impl NetworkManager {
|
||||
|
||||
// Issue the signal
|
||||
let rpc = self.rpc_processor();
|
||||
network_result_try!(rpc
|
||||
.rpc_call_signal(
|
||||
Destination::relay(relay_nr.clone(), target_nr.unfiltered()),
|
||||
SignalInfo::ReverseConnect {
|
||||
receipt,
|
||||
peer_info: published_peer_info
|
||||
},
|
||||
)
|
||||
.await
|
||||
.wrap_err("failed to send signal")?);
|
||||
network_result_try!(pin_future!(rpc.rpc_call_signal(
|
||||
Destination::relay(relay_nr.clone(), target_nr.unfiltered()),
|
||||
SignalInfo::ReverseConnect {
|
||||
receipt,
|
||||
peer_info: published_peer_info
|
||||
},
|
||||
))
|
||||
.await
|
||||
.wrap_err("failed to send signal")?);
|
||||
|
||||
// Wait for the return receipt
|
||||
let inbound_nr = match eventual_value
|
||||
@ -747,7 +778,8 @@ impl NetworkManager {
|
||||
|
||||
// And now use the existing connection to send over
|
||||
if let Some(flow) = inbound_nr.last_flow() {
|
||||
match self.net().send_data_to_existing_flow(flow, data).await? {
|
||||
let net = self.net();
|
||||
match pin_future!(net.send_data_to_existing_flow(flow, data)).await? {
|
||||
SendDataToExistingFlowResult::Sent(unique_flow) => {
|
||||
Ok(NetworkResult::value(unique_flow))
|
||||
}
|
||||
@ -817,9 +849,9 @@ impl NetworkManager {
|
||||
// Both sides will do this and then the receipt will get sent over the punched hole
|
||||
// Don't bother storing the returned flow as the 'last flow' because the other side of the hole
|
||||
// punch should come through and create a real 'last connection' for us if this succeeds
|
||||
let net = self.net();
|
||||
network_result_try!(
|
||||
self.net()
|
||||
.send_data_to_dial_info(hole_punch_did.dial_info.clone(), Vec::new())
|
||||
pin_future!(net.send_data_to_dial_info(hole_punch_did.dial_info.clone(), Vec::new()))
|
||||
.await?
|
||||
);
|
||||
|
||||
@ -828,22 +860,20 @@ impl NetworkManager {
|
||||
|
||||
// Issue the signal
|
||||
let rpc = self.rpc_processor();
|
||||
network_result_try!(rpc
|
||||
.rpc_call_signal(
|
||||
Destination::relay(relay_nr, target_nr.unfiltered()),
|
||||
SignalInfo::HolePunch {
|
||||
receipt,
|
||||
peer_info: published_peer_info
|
||||
},
|
||||
)
|
||||
.await
|
||||
.wrap_err("failed to send signal")?);
|
||||
network_result_try!(pin_future!(rpc.rpc_call_signal(
|
||||
Destination::relay(relay_nr, target_nr.unfiltered()),
|
||||
SignalInfo::HolePunch {
|
||||
receipt,
|
||||
peer_info: published_peer_info
|
||||
},
|
||||
))
|
||||
.await
|
||||
.wrap_err("failed to send signal")?);
|
||||
|
||||
// Another hole punch after the signal for UDP redundancy
|
||||
let net = self.net();
|
||||
network_result_try!(
|
||||
self.net()
|
||||
.send_data_to_dial_info(hole_punch_did.dial_info, Vec::new())
|
||||
.await?
|
||||
pin_future!(net.send_data_to_dial_info(hole_punch_did.dial_info, Vec::new())).await?
|
||||
);
|
||||
|
||||
// Wait for the return receipt
|
||||
|
@ -69,12 +69,28 @@ impl NetworkManager {
|
||||
.add_down(bytes);
|
||||
}
|
||||
|
||||
#[expect(dead_code)]
|
||||
pub fn get_stats(&self) -> NetworkManagerStats {
|
||||
let inner = self.inner.lock();
|
||||
inner.stats.clone()
|
||||
}
|
||||
|
||||
pub fn debug(&self) -> String {
|
||||
let stats = self.get_stats();
|
||||
|
||||
let mut out = String::new();
|
||||
out += "Network Manager\n";
|
||||
out += "---------------\n";
|
||||
let mut out = format!(
|
||||
"Transfer stats:\n{}\n",
|
||||
indent_all_string(&stats.self_stats.transfer_stats)
|
||||
);
|
||||
out += "Node Contact Method Cache\n";
|
||||
out += "-------------------------\n";
|
||||
out += &self.inner.lock().node_contact_method_cache.debug();
|
||||
|
||||
out
|
||||
}
|
||||
|
||||
pub fn get_veilid_state(&self) -> Box<VeilidStateNetwork> {
|
||||
if !self.network_is_started() {
|
||||
return Box::new(VeilidStateNetwork {
|
||||
|
@ -54,7 +54,7 @@ pub async fn test_signed_node_info() {
|
||||
sni.timestamp(),
|
||||
sni.signatures().to_vec(),
|
||||
);
|
||||
sdni.validate(&tks1, &crypto).unwrap_err();
|
||||
let _ = sdni.validate(&tks1, &crypto).unwrap_err();
|
||||
|
||||
// Test unsupported cryptosystem validation
|
||||
let fake_crypto_kind: CryptoKind = FourCC::from([0, 1, 2, 3]);
|
||||
@ -119,7 +119,7 @@ pub async fn test_signed_node_info() {
|
||||
sni2.timestamp(),
|
||||
sni2.signatures().to_vec(),
|
||||
);
|
||||
srni.validate(&tks3, &crypto).unwrap_err();
|
||||
assert_err!(srni.validate(&tks3, &crypto));
|
||||
|
||||
// Test unsupported cryptosystem validation
|
||||
let fake_crypto_kind: CryptoKind = FourCC::from([0, 1, 2, 3]);
|
||||
|
@ -1,7 +1,7 @@
|
||||
use super::*;
|
||||
|
||||
// Keep member order appropriate for sorting < preference
|
||||
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
|
||||
#[derive(Debug, Ord, PartialOrd, Hash, Serialize, Deserialize, EnumSetType)]
|
||||
pub(crate) enum DialInfoClass {
|
||||
Direct = 0, // D = Directly reachable with public IP and no firewall, with statically configured port
|
||||
Mapped = 1, // M = Directly reachable with via portmap behind any NAT or firewalled with dynamically negotiated port
|
||||
@ -33,3 +33,6 @@ impl DialInfoClass {
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), expect(dead_code))]
|
||||
pub(crate) type DialInfoClassSet = EnumSet<DialInfoClass>;
|
||||
|
@ -516,5 +516,6 @@ impl Network {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
#[expect(clippy::unused_async)]
|
||||
pub async fn cancel_tasks(&self) {}
|
||||
}
|
||||
|
@ -60,6 +60,7 @@ const CACHE_VALIDITY_KEY: &[u8] = b"cache_validity_key";
|
||||
type LowLevelProtocolPorts = BTreeSet<(LowLevelProtocolType, AddressType, u16)>;
|
||||
type ProtocolToPortMapping = BTreeMap<(ProtocolType, AddressType), (LowLevelProtocolType, u16)>;
|
||||
#[derive(Clone, Debug)]
|
||||
#[must_use]
|
||||
pub struct LowLevelPortInfo {
|
||||
pub low_level_protocol_ports: LowLevelProtocolPorts,
|
||||
pub protocol_to_port: ProtocolToPortMapping,
|
||||
@ -71,6 +72,7 @@ type SerializedBuckets = Vec<Vec<u8>>;
|
||||
type SerializedBucketMap = BTreeMap<CryptoKind, SerializedBuckets>;
|
||||
|
||||
#[derive(Clone, Debug, Default, Eq, PartialEq)]
|
||||
#[must_use]
|
||||
pub struct RoutingTableHealth {
|
||||
/// Number of reliable (long-term responsive) entries in the routing table
|
||||
pub reliable_entry_count: usize,
|
||||
@ -89,10 +91,12 @@ pub struct RoutingTableHealth {
|
||||
pub type BucketIndex = (CryptoKind, usize);
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
#[must_use]
|
||||
pub struct RecentPeersEntry {
|
||||
pub last_connection: Flow,
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub(crate) struct RoutingTable {
|
||||
registry: VeilidComponentRegistry,
|
||||
inner: RwLock<RoutingTableInner>,
|
||||
@ -231,10 +235,12 @@ impl RoutingTable {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[expect(clippy::unused_async)]
|
||||
async fn post_init_async(&self) -> EyreResult<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[expect(clippy::unused_async)]
|
||||
pub(crate) async fn startup(&self) -> EyreResult<()> {
|
||||
Ok(())
|
||||
}
|
||||
@ -245,6 +251,7 @@ impl RoutingTable {
|
||||
self.cancel_tasks().await;
|
||||
}
|
||||
|
||||
#[expect(clippy::unused_async)]
|
||||
async fn pre_terminate_async(&self) {}
|
||||
|
||||
/// Called to shut down the routing table
|
||||
@ -1091,7 +1098,7 @@ impl RoutingTable {
|
||||
capabilities: Vec<Capability>,
|
||||
) {
|
||||
// Ask node for nodes closest to our own node
|
||||
let closest_nodes = network_result_value_or_log!(self match self.find_nodes_close_to_self(crypto_kind, node_ref.clone(), capabilities.clone()).await {
|
||||
let closest_nodes = network_result_value_or_log!(self match pin_future!(self.find_nodes_close_to_self(crypto_kind, node_ref.clone(), capabilities.clone())).await {
|
||||
Err(e) => {
|
||||
veilid_log!(self error
|
||||
"find_self failed for {:?}: {:?}",
|
||||
@ -1107,7 +1114,7 @@ impl RoutingTable {
|
||||
// Ask each node near us to find us as well
|
||||
if wide {
|
||||
for closest_nr in closest_nodes {
|
||||
network_result_value_or_log!(self match self.find_nodes_close_to_self(crypto_kind, closest_nr.clone(), capabilities.clone()).await {
|
||||
network_result_value_or_log!(self match pin_future!(self.find_nodes_close_to_self(crypto_kind, closest_nr.clone(), capabilities.clone())).await {
|
||||
Err(e) => {
|
||||
veilid_log!(self error
|
||||
"find_self failed for {:?}: {:?}",
|
||||
|
@ -38,6 +38,7 @@ struct RouteSpecStoreInner {
|
||||
|
||||
/// The routing table's storage for private/safety routes
|
||||
#[derive(Debug)]
|
||||
#[must_use]
|
||||
pub(crate) struct RouteSpecStore {
|
||||
registry: VeilidComponentRegistry,
|
||||
inner: Mutex<RouteSpecStoreInner>,
|
||||
|
@ -157,12 +157,7 @@ impl RouteSpecStoreCache {
|
||||
}
|
||||
|
||||
/// add remote private route to caches
|
||||
/// returns a remote private route set id
|
||||
fn add_remote_private_route(
|
||||
&mut self,
|
||||
id: RouteId,
|
||||
rprinfo: RemotePrivateRouteInfo,
|
||||
) -> RouteId {
|
||||
fn add_remote_private_route(&mut self, id: RouteId, rprinfo: RemotePrivateRouteInfo) {
|
||||
// also store in id by key table
|
||||
for private_route in rprinfo.get_private_routes() {
|
||||
self.remote_private_routes_by_key
|
||||
@ -182,15 +177,14 @@ impl RouteSpecStoreCache {
|
||||
// If anything LRUs out, remove from the by-key table
|
||||
// Follow the same logic as 'remove_remote_private_route' here
|
||||
for dead_private_route in dead_rpri.get_private_routes() {
|
||||
self.remote_private_routes_by_key
|
||||
let _ = self
|
||||
.remote_private_routes_by_key
|
||||
.remove(&dead_private_route.public_key.value)
|
||||
.unwrap();
|
||||
self.invalidate_compiled_route_cache(&dead_private_route.public_key.value);
|
||||
}
|
||||
self.dead_remote_routes.push(dead_id);
|
||||
}
|
||||
|
||||
id
|
||||
}
|
||||
|
||||
/// iterate all of the remote private routes we have in the cache
|
||||
@ -311,7 +305,8 @@ impl RouteSpecStoreCache {
|
||||
return false;
|
||||
};
|
||||
for private_route in rprinfo.get_private_routes() {
|
||||
self.remote_private_routes_by_key
|
||||
let _ = self
|
||||
.remote_private_routes_by_key
|
||||
.remove(&private_route.public_key.value)
|
||||
.unwrap();
|
||||
self.invalidate_compiled_route_cache(&private_route.public_key.value);
|
||||
|
@ -75,7 +75,7 @@ impl RouteSpecStoreContent {
|
||||
pub fn remove_detail(&mut self, id: &RouteId) -> Option<RouteSetSpecDetail> {
|
||||
let detail = self.details.remove(id)?;
|
||||
for (pk, _) in detail.iter_route_set() {
|
||||
self.id_by_key.remove(pk).unwrap();
|
||||
let _ = self.id_by_key.remove(pk).unwrap();
|
||||
}
|
||||
Some(detail)
|
||||
}
|
||||
|
@ -16,6 +16,7 @@ pub type EntryCounts = BTreeMap<(RoutingDomain, CryptoKind), usize>;
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
|
||||
/// RoutingTable rwlock-internal data
|
||||
#[must_use]
|
||||
pub struct RoutingTableInner {
|
||||
/// Convenience accessor for the global component registry
|
||||
pub(super) registry: VeilidComponentRegistry,
|
||||
|
@ -17,8 +17,8 @@ pub trait RoutingDomainEditorCommonTrait {
|
||||
capabilities: Vec<Capability>,
|
||||
confirmed: bool,
|
||||
) -> &mut Self;
|
||||
fn commit(&mut self, pause_tasks: bool) -> SendPinBoxFutureLifetime<'_, bool>;
|
||||
fn shutdown(&mut self) -> SendPinBoxFutureLifetime<'_, ()>;
|
||||
fn commit(&mut self, pause_tasks: bool) -> PinBoxFuture<'_, bool>;
|
||||
fn shutdown(&mut self) -> PinBoxFuture<'_, ()>;
|
||||
fn publish(&mut self);
|
||||
}
|
||||
|
||||
|
@ -105,7 +105,7 @@ impl<'a> RoutingDomainEditorCommonTrait for RoutingDomainEditorLocalNetwork<'a>
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
fn commit(&mut self, pause_tasks: bool) -> SendPinBoxFutureLifetime<'_, bool> {
|
||||
fn commit(&mut self, pause_tasks: bool) -> PinBoxFuture<'_, bool> {
|
||||
Box::pin(async move {
|
||||
// No locking if we have nothing to do
|
||||
if self.changes.is_empty() {
|
||||
@ -254,7 +254,7 @@ impl<'a> RoutingDomainEditorCommonTrait for RoutingDomainEditorLocalNetwork<'a>
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
fn shutdown(&mut self) -> SendPinBoxFutureLifetime<'_, ()> {
|
||||
fn shutdown(&mut self) -> PinBoxFuture<'_, ()> {
|
||||
Box::pin(async move {
|
||||
self.clear_dial_info_details(None, None)
|
||||
.set_relay_node(None)
|
||||
|
@ -116,7 +116,7 @@ impl<'a> RoutingDomainEditorCommonTrait for RoutingDomainEditorPublicInternet<'a
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
fn commit(&mut self, pause_tasks: bool) -> SendPinBoxFutureLifetime<'_, bool> {
|
||||
fn commit(&mut self, pause_tasks: bool) -> PinBoxFuture<'_, bool> {
|
||||
Box::pin(async move {
|
||||
// No locking if we have nothing to do
|
||||
if self.changes.is_empty() {
|
||||
@ -270,7 +270,7 @@ impl<'a> RoutingDomainEditorCommonTrait for RoutingDomainEditorPublicInternet<'a
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
fn shutdown(&mut self) -> SendPinBoxFutureLifetime<'_, ()> {
|
||||
fn shutdown(&mut self) -> PinBoxFuture<'_, ()> {
|
||||
Box::pin(async move {
|
||||
self.clear_dial_info_details(None, None)
|
||||
.set_relay_node(None)
|
||||
|
@ -33,7 +33,7 @@ impl BootstrapRecord {
|
||||
|
||||
impl RoutingTable {
|
||||
/// Process bootstrap version 0
|
||||
async fn process_bootstrap_records_v0(
|
||||
fn process_bootstrap_records_v0(
|
||||
&self,
|
||||
records: Vec<String>,
|
||||
) -> EyreResult<Option<BootstrapRecord>> {
|
||||
@ -195,7 +195,7 @@ impl RoutingTable {
|
||||
};
|
||||
let bootstrap_record = match txt_version {
|
||||
BOOTSTRAP_TXT_VERSION_0 => {
|
||||
match self.process_bootstrap_records_v0(records).await {
|
||||
match self.process_bootstrap_records_v0(records) {
|
||||
Err(e) => {
|
||||
veilid_log!(self error
|
||||
"couldn't process v0 bootstrap records from {}: {}",
|
||||
@ -260,7 +260,7 @@ impl RoutingTable {
|
||||
&self,
|
||||
crypto_kinds: Vec<CryptoKind>,
|
||||
pi: Arc<PeerInfo>,
|
||||
unord: &FuturesUnordered<SendPinBoxFuture<()>>,
|
||||
unord: &FuturesUnordered<PinBoxFutureStatic<()>>,
|
||||
) {
|
||||
veilid_log!(self trace
|
||||
"--- bootstrapping {} with {:?}",
|
||||
@ -291,7 +291,7 @@ impl RoutingTable {
|
||||
let bsdi = match network_manager
|
||||
.get_node_contact_method(nr.default_filtered())
|
||||
{
|
||||
Ok(NodeContactMethod::Direct(v)) => v,
|
||||
Ok(Some(ncm)) if ncm.is_direct() => ncm.direct_dial_info().unwrap(),
|
||||
Ok(v) => {
|
||||
veilid_log!(nr debug "invalid contact method for bootstrap, ignoring peer: {:?}", v);
|
||||
// let _ =
|
||||
@ -342,7 +342,7 @@ impl RoutingTable {
|
||||
veilid_log!(self debug " bootstrap crypto kinds: {:?}", &crypto_kinds);
|
||||
|
||||
// Run all bootstrap operations concurrently
|
||||
let mut unord = FuturesUnordered::<SendPinBoxFuture<()>>::new();
|
||||
let mut unord = FuturesUnordered::<PinBoxFutureStatic<()>>::new();
|
||||
for peer in peers {
|
||||
self.bootstrap_with_peer(crypto_kinds.clone(), peer, &unord);
|
||||
}
|
||||
@ -367,7 +367,7 @@ impl RoutingTable {
|
||||
crypto_kinds
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self), err)]
|
||||
#[instrument(level = "trace", skip_all, err)]
|
||||
pub async fn bootstrap_task_routine(
|
||||
&self,
|
||||
stop_token: StopToken,
|
||||
@ -403,7 +403,7 @@ impl RoutingTable {
|
||||
let mut peer_map = HashMap::<TypedKeyGroup, Arc<PeerInfo>>::new();
|
||||
for bootstrap_di in bootstrap_dialinfos {
|
||||
veilid_log!(self debug "direct bootstrap with: {}", bootstrap_di);
|
||||
let peers = network_manager.boot_request(bootstrap_di).await?;
|
||||
let peers = pin_future!(network_manager.boot_request(bootstrap_di)).await?;
|
||||
for peer in peers {
|
||||
if !peer_map.contains_key(peer.node_ids()) {
|
||||
peer_map.insert(peer.node_ids().clone(), peer);
|
||||
@ -413,10 +413,10 @@ impl RoutingTable {
|
||||
peer_map.into_values().collect()
|
||||
} else {
|
||||
// If not direct, resolve bootstrap servers and recurse their TXT entries
|
||||
let bsrecs = match self
|
||||
let bsrecs = match pin_future!(self
|
||||
.resolve_bootstrap(bootstrap)
|
||||
.timeout_at(stop_token.clone())
|
||||
.await
|
||||
.timeout_at(stop_token.clone()))
|
||||
.await
|
||||
{
|
||||
Ok(v) => v?,
|
||||
Err(_) => {
|
||||
|
@ -14,7 +14,7 @@ const ACTIVE_WATCH_KEEPALIVE_PING_INTERVAL_SECS: u32 = 10;
|
||||
/// Ping queue processing depth per validator
|
||||
const MAX_PARALLEL_PINGS: usize = 8;
|
||||
|
||||
type PingValidatorFuture = SendPinBoxFuture<Result<(), RPCError>>;
|
||||
type PingValidatorFuture = PinBoxFutureStatic<Result<(), RPCError>>;
|
||||
|
||||
impl RoutingTable {
|
||||
// Task routine for PublicInternet status pings
|
||||
@ -258,7 +258,7 @@ impl RoutingTable {
|
||||
futurequeue.push_back(
|
||||
async move {
|
||||
#[cfg(feature = "verbose-tracing")]
|
||||
veilid_log!(self debug "--> PublicInternet Validator ping to {:?}", nr);
|
||||
veilid_log!(nr debug "--> PublicInternet Validator ping to {:?}", nr);
|
||||
let rpc_processor = nr.rpc_processor();
|
||||
let _ = rpc_processor
|
||||
.rpc_call_status(Destination::direct(nr))
|
||||
@ -291,7 +291,7 @@ impl RoutingTable {
|
||||
futurequeue.push_back(
|
||||
async move {
|
||||
#[cfg(feature = "verbose-tracing")]
|
||||
veilid_log!(self debug "--> LocalNetwork Validator ping to {:?}", nr);
|
||||
veilid_log!(nr debug "--> LocalNetwork Validator ping to {:?}", nr);
|
||||
let rpc_processor = nr.rpc_processor();
|
||||
let _ = rpc_processor
|
||||
.rpc_call_status(Destination::direct(nr))
|
||||
|
@ -57,7 +57,7 @@ pub async fn test_routingtable_buckets_round_trip() {
|
||||
mock_registry::terminate(copy_registry).await;
|
||||
}
|
||||
|
||||
pub async fn test_round_trip_peerinfo() {
|
||||
pub fn test_round_trip_peerinfo() {
|
||||
let mut tks = TypedKeyGroup::new();
|
||||
tks.add(TypedKey::new(
|
||||
CRYPTO_KIND_VLD0,
|
||||
@ -93,5 +93,5 @@ pub async fn test_round_trip_peerinfo() {
|
||||
|
||||
pub async fn test_all() {
|
||||
test_routingtable_buckets_round_trip().await;
|
||||
test_round_trip_peerinfo().await;
|
||||
test_round_trip_peerinfo();
|
||||
}
|
||||
|
@ -64,7 +64,10 @@ pub struct FanoutCallOutput {
|
||||
}
|
||||
|
||||
pub type FanoutCallResult = RPCNetworkResult<FanoutCallOutput>;
|
||||
pub type FanoutNodeInfoFilter = Arc<dyn Fn(&[TypedKey], &NodeInfo) -> bool + Send + Sync>;
|
||||
pub type FanoutNodeInfoFilter = Arc<dyn (Fn(&[TypedKey], &NodeInfo) -> bool) + Send + Sync>;
|
||||
pub type FanoutCheckDone<R> = Arc<dyn (Fn(&[NodeRef]) -> Option<R>) + Send + Sync>;
|
||||
pub type FanoutCallRoutine =
|
||||
Arc<dyn (Fn(NodeRef) -> PinBoxFutureStatic<FanoutCallResult>) + Send + Sync>;
|
||||
|
||||
pub fn empty_fanout_node_info_filter() -> FanoutNodeInfoFilter {
|
||||
Arc::new(|_, _| true)
|
||||
@ -91,12 +94,9 @@ pub fn capability_fanout_node_info_filter(caps: Vec<Capability>) -> FanoutNodeIn
|
||||
/// If the algorithm times out, a Timeout result is returned, however operations will still have been performed and a
|
||||
/// timeout is not necessarily indicative of an algorithmic 'failure', just that no definitive stopping condition was found
|
||||
/// in the given time
|
||||
pub(crate) struct FanoutCall<'a, R, F, C, D>
|
||||
pub(crate) struct FanoutCall<'a, R>
|
||||
where
|
||||
R: Unpin,
|
||||
F: Future<Output = FanoutCallResult>,
|
||||
C: Fn(NodeRef) -> F,
|
||||
D: Fn(&[NodeRef]) -> Option<R>,
|
||||
{
|
||||
routing_table: &'a RoutingTable,
|
||||
node_id: TypedKey,
|
||||
@ -105,16 +105,13 @@ where
|
||||
fanout: usize,
|
||||
timeout_us: TimestampDuration,
|
||||
node_info_filter: FanoutNodeInfoFilter,
|
||||
call_routine: C,
|
||||
check_done: D,
|
||||
call_routine: FanoutCallRoutine,
|
||||
check_done: FanoutCheckDone<R>,
|
||||
}
|
||||
|
||||
impl<'a, R, F, C, D> FanoutCall<'a, R, F, C, D>
|
||||
impl<'a, R> FanoutCall<'a, R>
|
||||
where
|
||||
R: Unpin,
|
||||
F: Future<Output = FanoutCallResult>,
|
||||
C: Fn(NodeRef) -> F,
|
||||
D: Fn(&[NodeRef]) -> Option<R>,
|
||||
{
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn new(
|
||||
@ -124,8 +121,8 @@ where
|
||||
fanout: usize,
|
||||
timeout_us: TimestampDuration,
|
||||
node_info_filter: FanoutNodeInfoFilter,
|
||||
call_routine: C,
|
||||
check_done: D,
|
||||
call_routine: FanoutCallRoutine,
|
||||
check_done: FanoutCheckDone<R>,
|
||||
) -> Self {
|
||||
let context = Mutex::new(FanoutContext {
|
||||
fanout_queue: FanoutQueue::new(node_id.kind),
|
||||
|
@ -68,12 +68,13 @@ impl_veilid_log_facility!("rpc");
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
|
||||
#[derive(Debug)]
|
||||
#[must_use]
|
||||
struct WaitableReply {
|
||||
handle: OperationWaitHandle<Message, Option<QuestionContext>>,
|
||||
timeout_us: TimestampDuration,
|
||||
node_ref: NodeRef,
|
||||
send_ts: Timestamp,
|
||||
send_data_method: SendDataMethod,
|
||||
send_data_method: SendDataResult,
|
||||
safety_route: Option<PublicKey>,
|
||||
remote_private_route: Option<PublicKey>,
|
||||
reply_private_route: Option<PublicKey>,
|
||||
@ -83,6 +84,7 @@ struct WaitableReply {
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
#[must_use]
|
||||
enum RPCKind {
|
||||
Question,
|
||||
Statement,
|
||||
@ -92,6 +94,7 @@ enum RPCKind {
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
#[must_use]
|
||||
pub struct RPCProcessorStartupContext {
|
||||
pub startup_lock: Arc<StartupLock>,
|
||||
}
|
||||
@ -111,6 +114,7 @@ impl Default for RPCProcessorStartupContext {
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
|
||||
#[derive(Debug)]
|
||||
#[must_use]
|
||||
struct RPCProcessorInner {
|
||||
send_channel: Option<flume::Sender<(Span, MessageEncoded)>>,
|
||||
stop_source: Option<StopSource>,
|
||||
@ -118,6 +122,7 @@ struct RPCProcessorInner {
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
#[must_use]
|
||||
pub(crate) struct RPCProcessor {
|
||||
registry: VeilidComponentRegistry,
|
||||
inner: Mutex<RPCProcessorInner>,
|
||||
@ -183,14 +188,17 @@ impl RPCProcessor {
|
||||
/////////////////////////////////////
|
||||
/// Initialization
|
||||
|
||||
#[expect(clippy::unused_async)]
|
||||
async fn init_async(&self) -> EyreResult<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[expect(clippy::unused_async)]
|
||||
async fn post_init_async(&self) -> EyreResult<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[expect(clippy::unused_async)]
|
||||
async fn pre_terminate_async(&self) {
|
||||
// Ensure things have shut down
|
||||
assert!(
|
||||
@ -199,6 +207,7 @@ impl RPCProcessor {
|
||||
);
|
||||
}
|
||||
|
||||
#[expect(clippy::unused_async)]
|
||||
async fn terminate_async(&self) {}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
@ -223,7 +232,7 @@ impl RPCProcessor {
|
||||
let stop_token = inner.stop_source.as_ref().unwrap().token();
|
||||
let jh = spawn(&format!("rpc worker {}", task_n), async move {
|
||||
let this = registry.rpc_processor();
|
||||
this.rpc_worker(stop_token, receiver).await
|
||||
Box::pin(this.rpc_worker(stop_token, receiver)).await
|
||||
});
|
||||
inner.worker_join_handles.push(jh);
|
||||
}
|
||||
@ -368,9 +377,10 @@ impl RPCProcessor {
|
||||
}
|
||||
|
||||
// Routine to call to generate fanout
|
||||
let call_routine = |next_node: NodeRef| {
|
||||
let registry = self.registry();
|
||||
async move {
|
||||
let registry = self.registry();
|
||||
let call_routine = Arc::new(move |next_node: NodeRef| {
|
||||
let registry = registry.clone();
|
||||
Box::pin(async move {
|
||||
let this = registry.rpc_processor();
|
||||
let v = network_result_try!(
|
||||
this.rpc_call_find_node(
|
||||
@ -384,11 +394,11 @@ impl RPCProcessor {
|
||||
Ok(NetworkResult::value(FanoutCallOutput {
|
||||
peer_info_list: v.answer,
|
||||
}))
|
||||
}
|
||||
};
|
||||
}) as PinBoxFuture<FanoutCallResult>
|
||||
});
|
||||
|
||||
// Routine to call to check if we're done at each step
|
||||
let check_done = |_: &[NodeRef]| {
|
||||
let check_done = Arc::new(move |_: &[NodeRef]| {
|
||||
let Ok(Some(nr)) = routing_table.lookup_node_ref(node_id) else {
|
||||
return None;
|
||||
};
|
||||
@ -401,7 +411,7 @@ impl RPCProcessor {
|
||||
}
|
||||
|
||||
None
|
||||
};
|
||||
});
|
||||
|
||||
// Call the fanout
|
||||
let routing_table = self.routing_table();
|
||||
@ -421,13 +431,13 @@ impl RPCProcessor {
|
||||
|
||||
/// Search the DHT for a specific node corresponding to a key unless we
|
||||
/// have that node in our routing table already, and return the node reference
|
||||
/// Note: This routine can possibly be recursive, hence the SendPinBoxFuture async form
|
||||
/// Note: This routine can possibly be recursive, hence the PinBoxFuture async form
|
||||
#[instrument(level = "trace", target = "rpc", skip_all)]
|
||||
pub fn resolve_node(
|
||||
&self,
|
||||
node_id: TypedKey,
|
||||
safety_selection: SafetySelection,
|
||||
) -> SendPinBoxFuture<Result<Option<NodeRef>, RPCError>> {
|
||||
) -> PinBoxFuture<Result<Option<NodeRef>, RPCError>> {
|
||||
let registry = self.registry();
|
||||
Box::pin(
|
||||
async move {
|
||||
@ -1165,11 +1175,12 @@ impl RPCProcessor {
|
||||
);
|
||||
|
||||
// Ref the connection so it doesn't go away until we're done with the waitable reply
|
||||
let opt_connection_ref_scope = send_data_method.unique_flow.connection_id.and_then(|id| {
|
||||
self.network_manager()
|
||||
.connection_manager()
|
||||
.try_connection_ref_scope(id)
|
||||
});
|
||||
let opt_connection_ref_scope =
|
||||
send_data_method.unique_flow().connection_id.and_then(|id| {
|
||||
self.network_manager()
|
||||
.connection_manager()
|
||||
.try_connection_ref_scope(id)
|
||||
});
|
||||
|
||||
// Pass back waitable reply completion
|
||||
Ok(NetworkResult::value(WaitableReply {
|
||||
@ -1506,35 +1517,75 @@ impl RPCProcessor {
|
||||
|
||||
// Process specific message kind
|
||||
match msg.operation.kind() {
|
||||
RPCOperationKind::Question(q) => match q.detail() {
|
||||
RPCQuestionDetail::StatusQ(_) => self.process_status_q(msg).await,
|
||||
RPCQuestionDetail::FindNodeQ(_) => self.process_find_node_q(msg).await,
|
||||
RPCQuestionDetail::AppCallQ(_) => self.process_app_call_q(msg).await,
|
||||
RPCQuestionDetail::GetValueQ(_) => self.process_get_value_q(msg).await,
|
||||
RPCQuestionDetail::SetValueQ(_) => self.process_set_value_q(msg).await,
|
||||
RPCQuestionDetail::WatchValueQ(_) => self.process_watch_value_q(msg).await,
|
||||
RPCQuestionDetail::InspectValueQ(_) => self.process_inspect_value_q(msg).await,
|
||||
#[cfg(feature = "unstable-blockstore")]
|
||||
RPCQuestionDetail::SupplyBlockQ(_) => self.process_supply_block_q(msg).await,
|
||||
#[cfg(feature = "unstable-blockstore")]
|
||||
RPCQuestionDetail::FindBlockQ(_) => self.process_find_block_q(msg).await,
|
||||
#[cfg(feature = "unstable-tunnels")]
|
||||
RPCQuestionDetail::StartTunnelQ(_) => self.process_start_tunnel_q(msg).await,
|
||||
#[cfg(feature = "unstable-tunnels")]
|
||||
RPCQuestionDetail::CompleteTunnelQ(_) => self.process_complete_tunnel_q(msg).await,
|
||||
#[cfg(feature = "unstable-tunnels")]
|
||||
RPCQuestionDetail::CancelTunnelQ(_) => self.process_cancel_tunnel_q(msg).await,
|
||||
},
|
||||
RPCOperationKind::Statement(s) => match s.detail() {
|
||||
RPCStatementDetail::ValidateDialInfo(_) => {
|
||||
self.process_validate_dial_info(msg).await
|
||||
}
|
||||
RPCStatementDetail::Route(_) => self.process_route(msg).await,
|
||||
RPCStatementDetail::ValueChanged(_) => self.process_value_changed(msg).await,
|
||||
RPCStatementDetail::Signal(_) => self.process_signal(msg).await,
|
||||
RPCStatementDetail::ReturnReceipt(_) => self.process_return_receipt(msg).await,
|
||||
RPCStatementDetail::AppMessage(_) => self.process_app_message(msg).await,
|
||||
},
|
||||
RPCOperationKind::Question(q) => {
|
||||
let res = match q.detail() {
|
||||
RPCQuestionDetail::StatusQ(_) => {
|
||||
pin_dyn_future_closure!(self.process_status_q(msg))
|
||||
}
|
||||
RPCQuestionDetail::FindNodeQ(_) => {
|
||||
pin_dyn_future_closure!(self.process_find_node_q(msg))
|
||||
}
|
||||
RPCQuestionDetail::AppCallQ(_) => {
|
||||
pin_dyn_future_closure!(self.process_app_call_q(msg))
|
||||
}
|
||||
RPCQuestionDetail::GetValueQ(_) => {
|
||||
pin_dyn_future_closure!(self.process_get_value_q(msg))
|
||||
}
|
||||
RPCQuestionDetail::SetValueQ(_) => {
|
||||
pin_dyn_future_closure!(self.process_set_value_q(msg))
|
||||
}
|
||||
RPCQuestionDetail::WatchValueQ(_) => {
|
||||
pin_dyn_future_closure!(self.process_watch_value_q(msg))
|
||||
}
|
||||
RPCQuestionDetail::InspectValueQ(_) => {
|
||||
pin_dyn_future_closure!(self.process_inspect_value_q(msg))
|
||||
}
|
||||
#[cfg(feature = "unstable-blockstore")]
|
||||
RPCQuestionDetail::SupplyBlockQ(_) => {
|
||||
pin_dyn_future_closure!(self.process_supply_block_q(msg))
|
||||
}
|
||||
#[cfg(feature = "unstable-blockstore")]
|
||||
RPCQuestionDetail::FindBlockQ(_) => {
|
||||
pin_dyn_future_closure!(self.process_find_block_q(msg))
|
||||
}
|
||||
#[cfg(feature = "unstable-tunnels")]
|
||||
RPCQuestionDetail::StartTunnelQ(_) => {
|
||||
pin_dyn_future_closure!(self.process_start_tunnel_q(msg))
|
||||
}
|
||||
#[cfg(feature = "unstable-tunnels")]
|
||||
RPCQuestionDetail::CompleteTunnelQ(_) => {
|
||||
pin_dyn_future_closure!(self.process_complete_tunnel_q(msg))
|
||||
}
|
||||
#[cfg(feature = "unstable-tunnels")]
|
||||
RPCQuestionDetail::CancelTunnelQ(_) => {
|
||||
pin_dyn_future_closure!(self.process_cancel_tunnel_q(msg))
|
||||
}
|
||||
};
|
||||
res.await
|
||||
}
|
||||
RPCOperationKind::Statement(s) => {
|
||||
let res = match s.detail() {
|
||||
RPCStatementDetail::ValidateDialInfo(_) => {
|
||||
pin_dyn_future_closure!(self.process_validate_dial_info(msg))
|
||||
}
|
||||
RPCStatementDetail::Route(_) => {
|
||||
pin_dyn_future_closure!(self.process_route(msg))
|
||||
}
|
||||
RPCStatementDetail::ValueChanged(_) => {
|
||||
pin_dyn_future_closure!(self.process_value_changed(msg))
|
||||
}
|
||||
RPCStatementDetail::Signal(_) => {
|
||||
pin_dyn_future_closure!(self.process_signal(msg))
|
||||
}
|
||||
RPCStatementDetail::ReturnReceipt(_) => {
|
||||
pin_dyn_future_closure!(self.process_return_receipt(msg))
|
||||
}
|
||||
RPCStatementDetail::AppMessage(_) => {
|
||||
pin_dyn_future_closure!(self.process_app_message(msg))
|
||||
}
|
||||
};
|
||||
res.await
|
||||
}
|
||||
RPCOperationKind::Answer(_) => {
|
||||
let op_id = msg.operation.op_id();
|
||||
if let Err(e) = self.waiting_rpc_table.complete_op_waiter(op_id, msg) {
|
||||
|
@ -115,18 +115,13 @@ impl RPCProcessor {
|
||||
} => {
|
||||
if matches!(safety_selection, SafetySelection::Unsafe(_)) {
|
||||
if let Some(sender_info) = sender_info {
|
||||
if send_data_method.opt_relayed_contact_method.is_none()
|
||||
&& matches!(
|
||||
send_data_method.contact_method,
|
||||
NodeContactMethod::Direct(_)
|
||||
)
|
||||
{
|
||||
if send_data_method.is_direct() {
|
||||
// Directly requested status that actually gets sent directly and not over a relay will tell us what our IP address appears as
|
||||
// If this changes, we'd want to know about that to reset the networking stack
|
||||
opt_previous_sender_info = target.report_sender_info(
|
||||
routing_domain,
|
||||
send_data_method.unique_flow.flow.protocol_type(),
|
||||
send_data_method.unique_flow.flow.address_type(),
|
||||
send_data_method.unique_flow().flow.protocol_type(),
|
||||
send_data_method.unique_flow().flow.address_type(),
|
||||
sender_info,
|
||||
);
|
||||
};
|
||||
@ -137,7 +132,7 @@ impl RPCProcessor {
|
||||
routing_domain,
|
||||
socket_address: sender_info.socket_address,
|
||||
old_socket_address: opt_previous_sender_info.map(|s| s.socket_address),
|
||||
flow: send_data_method.unique_flow.flow,
|
||||
flow: send_data_method.unique_flow().flow,
|
||||
reporting_peer: target.unfiltered(),
|
||||
}) {
|
||||
veilid_log!(self debug "Failed to post event: {}", e);
|
||||
|
@ -82,7 +82,7 @@ impl RPCProcessor {
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#[instrument(level = "trace", target = "rpc", skip(self, msg), fields(msg.operation.op_id), ret, err)]
|
||||
//#[instrument(level = "trace", target = "rpc", skip(self, msg), fields(msg.operation.op_id), ret, err)]
|
||||
pub(super) async fn process_validate_dial_info(&self, msg: Message) -> RPCNetworkResult<()> {
|
||||
// Ensure this never came over a private route, safety route is okay though
|
||||
let detail = match msg.header.detail {
|
||||
@ -187,7 +187,7 @@ impl RPCProcessor {
|
||||
|
||||
// Send the validate_dial_info request
|
||||
// This can only be sent directly, as relays can not validate dial info
|
||||
network_result_value_or_log!(self self.statement(Destination::direct(peer.default_filtered()), statement)
|
||||
network_result_value_or_log!(self pin_future_closure!(self.statement(Destination::direct(peer.default_filtered()), statement))
|
||||
.await? => [ format!(": peer={} statement={:?}", peer, statement) ] {
|
||||
continue;
|
||||
}
|
||||
|
@ -33,11 +33,20 @@ impl StorageManager {
|
||||
}
|
||||
format!("{}]\n", out)
|
||||
}
|
||||
|
||||
pub async fn debug_offline_records(&self) -> String {
|
||||
let inner = self.inner.lock().await;
|
||||
let Some(local_record_store) = &inner.local_record_store else {
|
||||
return "not initialized".to_owned();
|
||||
};
|
||||
|
||||
let mut out = "[\n".to_owned();
|
||||
for (k, v) in &inner.offline_subkey_writes {
|
||||
out += &format!(" {}:{:?}\n", k, v);
|
||||
let record_info = local_record_store
|
||||
.peek_record(*k, |r| format!("{} nodes", r.detail().nodes.len()))
|
||||
.unwrap_or("Not found".to_owned());
|
||||
|
||||
out += &format!(" {}:{:?}, {}\n", k, v, record_info);
|
||||
}
|
||||
format!("{}]\n", out)
|
||||
}
|
||||
|
@ -84,11 +84,11 @@ impl StorageManager {
|
||||
let call_routine = {
|
||||
let context = context.clone();
|
||||
let registry = self.registry();
|
||||
move |next_node: NodeRef| {
|
||||
Arc::new(move |next_node: NodeRef| {
|
||||
let context = context.clone();
|
||||
let registry = registry.clone();
|
||||
let last_descriptor = last_get_result.opt_descriptor.clone();
|
||||
async move {
|
||||
Box::pin(async move {
|
||||
let rpc_processor = registry.rpc_processor();
|
||||
let gva = network_result_try!(
|
||||
rpc_processor
|
||||
@ -189,8 +189,8 @@ impl StorageManager {
|
||||
veilid_log!(registry debug target:"network_result", "GetValue fanout call returned peers {}", gva.answer.peers.len());
|
||||
|
||||
Ok(NetworkResult::value(FanoutCallOutput{peer_info_list: gva.answer.peers}))
|
||||
}.instrument(tracing::trace_span!("outbound_get_value fanout routine"))
|
||||
}
|
||||
}.instrument(tracing::trace_span!("outbound_get_value fanout routine"))) as PinBoxFuture<FanoutCallResult>
|
||||
})
|
||||
};
|
||||
|
||||
// Routine to call to check if we're done at each step
|
||||
@ -198,7 +198,7 @@ impl StorageManager {
|
||||
let context = context.clone();
|
||||
let out_tx = out_tx.clone();
|
||||
let registry = self.registry();
|
||||
move |_closest_nodes: &[NodeRef]| {
|
||||
Arc::new(move |_closest_nodes: &[NodeRef]| {
|
||||
let mut ctx = context.lock();
|
||||
|
||||
// send partial update if desired
|
||||
@ -229,7 +229,7 @@ impl StorageManager {
|
||||
return Some(());
|
||||
}
|
||||
None
|
||||
}
|
||||
})
|
||||
};
|
||||
|
||||
// Call the fanout in a spawned task
|
||||
@ -305,7 +305,7 @@ impl StorageManager {
|
||||
self.process_deferred_results(
|
||||
res_rx,
|
||||
Box::new(
|
||||
move |result: VeilidAPIResult<get_value::OutboundGetValueResult>| -> SendPinBoxFuture<bool> {
|
||||
move |result: VeilidAPIResult<get_value::OutboundGetValueResult>| -> PinBoxFutureStatic<bool> {
|
||||
let registry=registry.clone();
|
||||
Box::pin(async move {
|
||||
let this = registry.storage_manager();
|
||||
|
@ -120,12 +120,12 @@ impl StorageManager {
|
||||
let call_routine = {
|
||||
let context = context.clone();
|
||||
let registry = self.registry();
|
||||
move |next_node: NodeRef| {
|
||||
Arc::new(move |next_node: NodeRef| {
|
||||
let context = context.clone();
|
||||
let registry = registry.clone();
|
||||
let opt_descriptor = local_inspect_result.opt_descriptor.clone();
|
||||
let subkeys = subkeys.clone();
|
||||
async move {
|
||||
Box::pin(async move {
|
||||
let rpc_processor = registry.rpc_processor();
|
||||
|
||||
let iva = network_result_try!(
|
||||
@ -159,7 +159,7 @@ impl StorageManager {
|
||||
|
||||
// Keep the value if we got one and it is newer and it passes schema validation
|
||||
if !answer.seqs.is_empty() {
|
||||
veilid_log!(self debug "Got seqs back: len={}", answer.seqs.len());
|
||||
veilid_log!(registry debug "Got seqs back: len={}", answer.seqs.len());
|
||||
let mut ctx = context.lock();
|
||||
|
||||
// Ensure we have a schema and descriptor etc
|
||||
@ -239,25 +239,29 @@ impl StorageManager {
|
||||
veilid_log!(registry debug target:"network_result", "InspectValue fanout call returned peers {}", answer.peers.len());
|
||||
|
||||
Ok(NetworkResult::value(FanoutCallOutput { peer_info_list: answer.peers}))
|
||||
}.instrument(tracing::trace_span!("outbound_inspect_value fanout call"))
|
||||
}
|
||||
}.instrument(tracing::trace_span!("outbound_inspect_value fanout call"))) as PinBoxFuture<FanoutCallResult>
|
||||
})
|
||||
};
|
||||
|
||||
// Routine to call to check if we're done at each step
|
||||
let check_done = |_closest_nodes: &[NodeRef]| {
|
||||
// If we have reached sufficient consensus on all subkeys, return done
|
||||
let ctx = context.lock();
|
||||
let mut has_consensus = true;
|
||||
for cs in ctx.seqcounts.iter() {
|
||||
if cs.value_nodes.len() < consensus_count {
|
||||
has_consensus = false;
|
||||
break;
|
||||
|
||||
let check_done = {
|
||||
let context = context.clone();
|
||||
Arc::new(move |_closest_nodes: &[NodeRef]| {
|
||||
// If we have reached sufficient consensus on all subkeys, return done
|
||||
let ctx = context.lock();
|
||||
let mut has_consensus = true;
|
||||
for cs in ctx.seqcounts.iter() {
|
||||
if cs.value_nodes.len() < consensus_count {
|
||||
has_consensus = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if !ctx.seqcounts.is_empty() && ctx.opt_descriptor_info.is_some() && has_consensus {
|
||||
return Some(());
|
||||
}
|
||||
None
|
||||
if !ctx.seqcounts.is_empty() && ctx.opt_descriptor_info.is_some() && has_consensus {
|
||||
return Some(());
|
||||
}
|
||||
None
|
||||
})
|
||||
};
|
||||
|
||||
// Call the fanout
|
||||
|
@ -62,7 +62,7 @@ struct StorageManagerInner {
|
||||
/// Storage manager metadata that is persistent, including copy of offline subkey writes
|
||||
pub metadata_db: Option<TableDB>,
|
||||
/// Background processing task (not part of attachment manager tick tree so it happens when detached too)
|
||||
pub tick_future: Option<SendPinBoxFuture<()>>,
|
||||
pub tick_future: Option<PinBoxFutureStatic<()>>,
|
||||
}
|
||||
|
||||
impl fmt::Debug for StorageManagerInner {
|
||||
@ -237,7 +237,7 @@ impl StorageManager {
|
||||
}
|
||||
|
||||
// Start deferred results processors
|
||||
self.deferred_result_processor.init().await;
|
||||
self.deferred_result_processor.init();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -966,9 +966,8 @@ impl StorageManager {
|
||||
|
||||
// Update the watch. This just calls through to the above watch_values() function
|
||||
// This will update the active_watch so we don't need to do that in this routine.
|
||||
let expiration_ts = self
|
||||
.watch_values(key, subkeys, active_watch.expiration_ts, count)
|
||||
.await?;
|
||||
let expiration_ts =
|
||||
pin_future!(self.watch_values(key, subkeys, active_watch.expiration_ts, count)).await?;
|
||||
|
||||
// A zero expiration time returned from watch_value() means the watch is done
|
||||
// or no subkeys are left, and the watch is no longer active
|
||||
@ -1739,7 +1738,7 @@ impl StorageManager {
|
||||
pub(super) fn process_deferred_results<T: Send + 'static>(
|
||||
&self,
|
||||
receiver: flume::Receiver<T>,
|
||||
handler: impl FnMut(T) -> SendPinBoxFuture<bool> + Send + 'static,
|
||||
handler: impl FnMut(T) -> PinBoxFutureStatic<bool> + Send + 'static,
|
||||
) -> bool {
|
||||
self.deferred_result_processor
|
||||
.add(receiver.into_stream(), handler)
|
||||
|
@ -82,11 +82,11 @@ impl StorageManager {
|
||||
let context = context.clone();
|
||||
let registry = self.registry();
|
||||
|
||||
move |next_node: NodeRef| {
|
||||
Arc::new(move |next_node: NodeRef| {
|
||||
let registry = registry.clone();
|
||||
let context = context.clone();
|
||||
let descriptor = descriptor.clone();
|
||||
async move {
|
||||
Box::pin(async move {
|
||||
let rpc_processor = registry.rpc_processor();
|
||||
|
||||
let send_descriptor = true; // xxx check if next_node needs the descriptor or not, see issue #203
|
||||
@ -187,8 +187,8 @@ impl StorageManager {
|
||||
ctx.send_partial_update = true;
|
||||
|
||||
Ok(NetworkResult::value(FanoutCallOutput{peer_info_list:sva.answer.peers}))
|
||||
}.instrument(tracing::trace_span!("fanout call_routine"))
|
||||
}
|
||||
}.instrument(tracing::trace_span!("fanout call_routine"))) as PinBoxFuture<FanoutCallResult>
|
||||
})
|
||||
};
|
||||
|
||||
// Routine to call to check if we're done at each step
|
||||
@ -196,7 +196,7 @@ impl StorageManager {
|
||||
let context = context.clone();
|
||||
let out_tx = out_tx.clone();
|
||||
let registry = self.registry();
|
||||
move |_closest_nodes: &[NodeRef]| {
|
||||
Arc::new(move |_closest_nodes: &[NodeRef]| {
|
||||
let mut ctx = context.lock();
|
||||
|
||||
// send partial update if desired
|
||||
@ -233,7 +233,7 @@ impl StorageManager {
|
||||
return Some(());
|
||||
}
|
||||
None
|
||||
}
|
||||
})
|
||||
};
|
||||
|
||||
// Call the fanout in a spawned task
|
||||
@ -308,7 +308,7 @@ impl StorageManager {
|
||||
self.process_deferred_results(
|
||||
res_rx,
|
||||
Box::new(
|
||||
move |result: VeilidAPIResult<set_value::OutboundSetValueResult>| -> SendPinBoxFuture<bool> {
|
||||
move |result: VeilidAPIResult<set_value::OutboundSetValueResult>| -> PinBoxFutureStatic<bool> {
|
||||
let registry = registry.clone();
|
||||
let last_value_data = last_value_data.clone();
|
||||
Box::pin(async move {
|
||||
|
@ -26,7 +26,7 @@ impl SignedValueDescriptor {
|
||||
);
|
||||
}
|
||||
// validate schema
|
||||
DHTSchema::try_from(self.schema_data.as_slice())?;
|
||||
let _ = DHTSchema::try_from(self.schema_data.as_slice())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -99,18 +99,17 @@ impl StorageManager {
|
||||
opt_watcher.unwrap_or_else(|| self.anonymous_watch_keys.get(key.kind).unwrap().value);
|
||||
|
||||
let wva = VeilidAPIError::from_network_result(
|
||||
self.rpc_processor()
|
||||
.rpc_call_watch_value(
|
||||
Destination::direct(watch_node.routing_domain_filtered(routing_domain))
|
||||
.with_safety(safety_selection),
|
||||
key,
|
||||
subkeys,
|
||||
expiration,
|
||||
count,
|
||||
watcher,
|
||||
Some(watch_id),
|
||||
)
|
||||
.await?,
|
||||
pin_future!(self.rpc_processor().rpc_call_watch_value(
|
||||
Destination::direct(watch_node.routing_domain_filtered(routing_domain))
|
||||
.with_safety(safety_selection),
|
||||
key,
|
||||
subkeys,
|
||||
expiration,
|
||||
count,
|
||||
watcher,
|
||||
Some(watch_id),
|
||||
))
|
||||
.await?,
|
||||
)?;
|
||||
|
||||
if wva.answer.accepted {
|
||||
@ -233,13 +232,13 @@ impl StorageManager {
|
||||
let call_routine = {
|
||||
let context = context.clone();
|
||||
let registry = self.registry();
|
||||
move |next_node: NodeRef| {
|
||||
Arc::new(move |next_node: NodeRef| {
|
||||
let context = context.clone();
|
||||
let registry = registry.clone();
|
||||
|
||||
let subkeys = subkeys.clone();
|
||||
|
||||
async move {
|
||||
Box::pin(async move {
|
||||
let rpc_processor = registry.rpc_processor();
|
||||
let wva = network_result_try!(
|
||||
rpc_processor
|
||||
@ -282,18 +281,21 @@ impl StorageManager {
|
||||
veilid_log!(registry debug target:"network_result", "WatchValue fanout call returned peers {} ({})", wva.answer.peers.len(), next_node);
|
||||
|
||||
Ok(NetworkResult::value(FanoutCallOutput{peer_info_list: wva.answer.peers}))
|
||||
}.instrument(tracing::trace_span!("outbound_watch_value call routine"))
|
||||
}
|
||||
}.instrument(tracing::trace_span!("outbound_watch_value call routine"))) as PinBoxFuture<FanoutCallResult>
|
||||
})
|
||||
};
|
||||
|
||||
// Routine to call to check if we're done at each step
|
||||
let check_done = |_closest_nodes: &[NodeRef]| {
|
||||
// If a watch has succeeded, return done
|
||||
let ctx = context.lock();
|
||||
if ctx.opt_watch_value_result.is_some() {
|
||||
return Some(());
|
||||
}
|
||||
None
|
||||
let check_done = {
|
||||
let context = context.clone();
|
||||
Arc::new(move |_closest_nodes: &[NodeRef]| {
|
||||
// If a watch has succeeded, return done
|
||||
let ctx = context.lock();
|
||||
if ctx.opt_watch_value_result.is_some() {
|
||||
return Some(());
|
||||
}
|
||||
None
|
||||
})
|
||||
};
|
||||
|
||||
// Call the fanout
|
||||
|
@ -23,6 +23,7 @@ const ALL_TABLE_NAMES: &[u8] = b"all_table_names";
|
||||
/// Description of column
|
||||
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
|
||||
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))]
|
||||
#[must_use]
|
||||
pub struct ColumnInfo {
|
||||
pub key_count: AlignedU64,
|
||||
}
|
||||
@ -30,6 +31,7 @@ pub struct ColumnInfo {
|
||||
/// IO Stats for table
|
||||
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
|
||||
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))]
|
||||
#[must_use]
|
||||
pub struct IOStatsInfo {
|
||||
/// Number of transaction.
|
||||
pub transactions: AlignedU64,
|
||||
@ -54,6 +56,7 @@ pub struct IOStatsInfo {
|
||||
/// Description of table
|
||||
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
|
||||
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))]
|
||||
#[must_use]
|
||||
pub struct TableInfo {
|
||||
/// Internal table name
|
||||
pub table_name: String,
|
||||
@ -67,6 +70,7 @@ pub struct TableInfo {
|
||||
pub columns: Vec<ColumnInfo>,
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
struct TableStoreInner {
|
||||
opened: BTreeMap<String, Weak<TableDBUnlockedInner>>,
|
||||
encryption_key: Option<TypedSharedSecret>,
|
||||
@ -87,6 +91,7 @@ impl fmt::Debug for TableStoreInner {
|
||||
|
||||
/// Veilid Table Storage.
|
||||
/// Database for storing key value pairs persistently and securely across runs.
|
||||
#[must_use]
|
||||
pub struct TableStore {
|
||||
registry: VeilidComponentRegistry,
|
||||
inner: Mutex<TableStoreInner>, // Sync mutex here because TableDB drops can happen at any time
|
||||
@ -160,7 +165,7 @@ impl TableStore {
|
||||
})
|
||||
}
|
||||
|
||||
async fn name_get_or_create(&self, table: &str) -> VeilidAPIResult<String> {
|
||||
fn name_get_or_create(&self, table: &str) -> VeilidAPIResult<String> {
|
||||
let name = self.namespaced_name(table)?;
|
||||
|
||||
let mut inner = self.inner.lock();
|
||||
@ -356,8 +361,7 @@ impl TableStore {
|
||||
async fn load_device_encryption_key(&self) -> EyreResult<Option<TypedSharedSecret>> {
|
||||
let dek_bytes: Option<Vec<u8>> = self
|
||||
.protected_store()
|
||||
.load_user_secret("device_encryption_key")
|
||||
.await?;
|
||||
.load_user_secret("device_encryption_key")?;
|
||||
let Some(dek_bytes) = dek_bytes else {
|
||||
veilid_log!(self debug "no device encryption key");
|
||||
return Ok(None);
|
||||
@ -383,8 +387,7 @@ impl TableStore {
|
||||
// Remove the device encryption key
|
||||
let existed = self
|
||||
.protected_store()
|
||||
.remove_user_secret("device_encryption_key")
|
||||
.await?;
|
||||
.remove_user_secret("device_encryption_key")?;
|
||||
veilid_log!(self debug "removed device encryption key. existed: {}", existed);
|
||||
return Ok(());
|
||||
};
|
||||
@ -423,8 +426,7 @@ impl TableStore {
|
||||
// Save the new device encryption key
|
||||
let existed = self
|
||||
.protected_store()
|
||||
.save_user_secret("device_encryption_key", &dek_bytes)
|
||||
.await?;
|
||||
.save_user_secret("device_encryption_key", &dek_bytes)?;
|
||||
veilid_log!(self debug "saving device encryption key. existed: {}", existed);
|
||||
Ok(())
|
||||
}
|
||||
@ -560,7 +562,7 @@ impl TableStore {
|
||||
}
|
||||
}
|
||||
|
||||
let table_name = self.name_get_or_create(name).await?;
|
||||
let table_name = self.name_get_or_create(name)?;
|
||||
|
||||
// See if this table is already opened, if so the column count must be the same
|
||||
{
|
||||
|
@ -3,6 +3,7 @@ pub use keyvaluedb_sqlite::*;
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[derive(Clone)]
|
||||
#[must_use]
|
||||
pub(in crate::table_store) struct TableStoreDriver {
|
||||
registry: VeilidComponentRegistry,
|
||||
}
|
||||
@ -31,6 +32,7 @@ impl TableStoreDriver {
|
||||
Ok(dbpath)
|
||||
}
|
||||
|
||||
#[expect(clippy::unused_async)]
|
||||
pub async fn open(&self, table_name: &str, column_count: u32) -> VeilidAPIResult<Database> {
|
||||
let dbpath = self.get_dbpath(table_name)?;
|
||||
|
||||
@ -52,6 +54,7 @@ impl TableStoreDriver {
|
||||
Ok(db)
|
||||
}
|
||||
|
||||
#[expect(clippy::unused_async)]
|
||||
pub async fn delete(&self, table_name: &str) -> VeilidAPIResult<bool> {
|
||||
let dbpath = self.get_dbpath(table_name)?;
|
||||
if !dbpath.exists() {
|
||||
|
@ -12,6 +12,7 @@ cfg_if! {
|
||||
|
||||
impl_veilid_log_facility!("tstore");
|
||||
|
||||
#[must_use]
|
||||
struct CryptInfo {
|
||||
typed_key: TypedSharedSecret,
|
||||
}
|
||||
@ -21,6 +22,7 @@ impl CryptInfo {
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub struct TableDBUnlockedInner {
|
||||
registry: VeilidComponentRegistry,
|
||||
table: String,
|
||||
@ -44,6 +46,7 @@ impl Drop for TableDBUnlockedInner {
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
#[must_use]
|
||||
pub struct TableDB {
|
||||
opened_column_count: u32,
|
||||
unlocked_inner: Arc<TableDBUnlockedInner>,
|
||||
@ -107,6 +110,7 @@ impl TableDB {
|
||||
}
|
||||
|
||||
/// Get the internal name of the table
|
||||
#[must_use]
|
||||
pub fn table_name(&self) -> String {
|
||||
self.unlocked_inner.table.clone()
|
||||
}
|
||||
|
@ -2,6 +2,7 @@ use super::*;
|
||||
pub use keyvaluedb_web::*;
|
||||
|
||||
#[derive(Clone)]
|
||||
#[must_use]
|
||||
pub(in crate::table_store) struct TableStoreDriver {
|
||||
registry: VeilidComponentRegistry,
|
||||
}
|
||||
|
@ -15,73 +15,55 @@ async fn shutdown(api: VeilidAPI) {
|
||||
trace!("test_table_store: finished");
|
||||
}
|
||||
|
||||
pub async fn test_protected_store(ps: &ProtectedStore) {
|
||||
pub fn test_protected_store(ps: &ProtectedStore) {
|
||||
info!("testing protected store");
|
||||
|
||||
let _ = ps.remove_user_secret("_test_key").await;
|
||||
let _ = ps.remove_user_secret("_test_broken").await;
|
||||
let _ = ps.remove_user_secret("_test_key");
|
||||
let _ = ps.remove_user_secret("_test_broken");
|
||||
|
||||
let d1: [u8; 0] = [];
|
||||
|
||||
assert!(!ps
|
||||
.save_user_secret("_test_key", &[2u8, 3u8, 4u8])
|
||||
.await
|
||||
.unwrap());
|
||||
assert!(!ps.save_user_secret("_test_key", &[2u8, 3u8, 4u8]).unwrap());
|
||||
info!("testing saving user secret");
|
||||
assert!(ps.save_user_secret("_test_key", &d1).await.unwrap());
|
||||
assert!(ps.save_user_secret("_test_key", &d1).unwrap());
|
||||
info!("testing loading user secret");
|
||||
assert_eq!(
|
||||
ps.load_user_secret("_test_key").await.unwrap(),
|
||||
Some(d1.to_vec())
|
||||
);
|
||||
assert_eq!(ps.load_user_secret("_test_key").unwrap(), Some(d1.to_vec()));
|
||||
info!("testing loading user secret again");
|
||||
assert_eq!(
|
||||
ps.load_user_secret("_test_key").await.unwrap(),
|
||||
Some(d1.to_vec())
|
||||
);
|
||||
assert_eq!(ps.load_user_secret("_test_key").unwrap(), Some(d1.to_vec()));
|
||||
info!("testing loading broken user secret");
|
||||
assert_eq!(ps.load_user_secret("_test_broken").await.unwrap(), None);
|
||||
assert_eq!(ps.load_user_secret("_test_broken").unwrap(), None);
|
||||
info!("testing loading broken user secret again");
|
||||
assert_eq!(ps.load_user_secret("_test_broken").await.unwrap(), None);
|
||||
assert_eq!(ps.load_user_secret("_test_broken").unwrap(), None);
|
||||
info!("testing remove user secret");
|
||||
assert!(ps.remove_user_secret("_test_key").await.unwrap());
|
||||
assert!(ps.remove_user_secret("_test_key").unwrap());
|
||||
info!("testing remove user secret again");
|
||||
assert!(!ps.remove_user_secret("_test_key").await.unwrap());
|
||||
assert!(!ps.remove_user_secret("_test_key").unwrap());
|
||||
info!("testing remove broken user secret");
|
||||
assert!(!ps.remove_user_secret("_test_broken").await.unwrap());
|
||||
assert!(!ps.remove_user_secret("_test_broken").unwrap());
|
||||
info!("testing remove broken user secret again");
|
||||
assert!(!ps.remove_user_secret("_test_broken").await.unwrap());
|
||||
assert!(!ps.remove_user_secret("_test_broken").unwrap());
|
||||
|
||||
let d2: [u8; 10] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
|
||||
|
||||
assert!(!ps
|
||||
.save_user_secret("_test_key", &[2u8, 3u8, 4u8])
|
||||
.await
|
||||
.unwrap());
|
||||
assert!(ps.save_user_secret("_test_key", &d2).await.unwrap());
|
||||
assert_eq!(
|
||||
ps.load_user_secret("_test_key").await.unwrap(),
|
||||
Some(d2.to_vec())
|
||||
);
|
||||
assert_eq!(
|
||||
ps.load_user_secret("_test_key").await.unwrap(),
|
||||
Some(d2.to_vec())
|
||||
);
|
||||
assert_eq!(ps.load_user_secret("_test_broken").await.unwrap(), None);
|
||||
assert_eq!(ps.load_user_secret("_test_broken").await.unwrap(), None);
|
||||
assert!(ps.remove_user_secret("_test_key").await.unwrap());
|
||||
assert!(!ps.remove_user_secret("_test_key").await.unwrap());
|
||||
assert!(!ps.remove_user_secret("_test_key").await.unwrap());
|
||||
assert!(!ps.remove_user_secret("_test_broken").await.unwrap());
|
||||
assert!(!ps.save_user_secret("_test_key", &[2u8, 3u8, 4u8]).unwrap());
|
||||
assert!(ps.save_user_secret("_test_key", &d2).unwrap());
|
||||
assert_eq!(ps.load_user_secret("_test_key").unwrap(), Some(d2.to_vec()));
|
||||
assert_eq!(ps.load_user_secret("_test_key").unwrap(), Some(d2.to_vec()));
|
||||
assert_eq!(ps.load_user_secret("_test_broken").unwrap(), None);
|
||||
assert_eq!(ps.load_user_secret("_test_broken").unwrap(), None);
|
||||
assert!(ps.remove_user_secret("_test_key").unwrap());
|
||||
assert!(!ps.remove_user_secret("_test_key").unwrap());
|
||||
assert!(!ps.remove_user_secret("_test_key").unwrap());
|
||||
assert!(!ps.remove_user_secret("_test_broken").unwrap());
|
||||
|
||||
let _ = ps.remove_user_secret("_test_key").await;
|
||||
let _ = ps.remove_user_secret("_test_broken").await;
|
||||
let _ = ps.remove_user_secret("_test_key");
|
||||
let _ = ps.remove_user_secret("_test_broken");
|
||||
}
|
||||
|
||||
pub async fn test_all() {
|
||||
let api = startup().await;
|
||||
let ps = api.protected_store().unwrap();
|
||||
test_protected_store(&ps).await;
|
||||
test_protected_store(&ps);
|
||||
|
||||
shutdown(api).await;
|
||||
}
|
||||
|
@ -62,25 +62,25 @@ wFAbkZY9eS/x6P7qrpd7dUA=
|
||||
cfg_if! {
|
||||
|
||||
if #[cfg(all(target_arch = "wasm32", target_os = "unknown"))] {
|
||||
pub fn get_table_store_path() -> String {
|
||||
#[must_use]pub fn get_table_store_path() -> String {
|
||||
String::new()
|
||||
}
|
||||
pub fn get_block_store_path() -> String {
|
||||
#[must_use]pub fn get_block_store_path() -> String {
|
||||
String::new()
|
||||
}
|
||||
pub fn get_protected_store_path() -> String {
|
||||
#[must_use]pub fn get_protected_store_path() -> String {
|
||||
String::new()
|
||||
}
|
||||
pub fn get_certfile_path() -> String {
|
||||
#[must_use]pub fn get_certfile_path() -> String {
|
||||
String::new()
|
||||
}
|
||||
pub fn get_keyfile_path() -> String {
|
||||
#[must_use]pub fn get_keyfile_path() -> String {
|
||||
String::new()
|
||||
}
|
||||
}
|
||||
else {
|
||||
|
||||
fn get_data_dir() -> PathBuf {
|
||||
#[must_use] fn get_data_dir() -> PathBuf {
|
||||
cfg_if! {
|
||||
if #[cfg(target_os = "android")] {
|
||||
PathBuf::from(crate::intf::android::get_files_dir())
|
||||
@ -96,7 +96,7 @@ cfg_if! {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_table_store_path() -> String {
|
||||
#[must_use] pub fn get_table_store_path() -> String {
|
||||
let mut out = get_data_dir();
|
||||
std::fs::create_dir_all(&out).unwrap();
|
||||
|
||||
@ -105,7 +105,7 @@ cfg_if! {
|
||||
out.into_os_string().into_string().unwrap()
|
||||
}
|
||||
|
||||
pub fn get_block_store_path() -> String {
|
||||
#[must_use] pub fn get_block_store_path() -> String {
|
||||
let mut out = get_data_dir();
|
||||
std::fs::create_dir_all(&out).unwrap();
|
||||
|
||||
@ -114,7 +114,7 @@ cfg_if! {
|
||||
out.into_os_string().into_string().unwrap()
|
||||
}
|
||||
|
||||
pub fn get_protected_store_path() -> String {
|
||||
#[must_use] pub fn get_protected_store_path() -> String {
|
||||
let mut out = get_data_dir();
|
||||
std::fs::create_dir_all(&out).unwrap();
|
||||
|
||||
@ -123,7 +123,7 @@ cfg_if! {
|
||||
out.into_os_string().into_string().unwrap()
|
||||
}
|
||||
|
||||
pub fn get_certfile_path() -> String {
|
||||
#[must_use]pub fn get_certfile_path() -> String {
|
||||
let mut out = get_data_dir();
|
||||
std::fs::create_dir_all(&out).unwrap();
|
||||
|
||||
@ -137,7 +137,7 @@ cfg_if! {
|
||||
out.into_os_string().into_string().unwrap()
|
||||
}
|
||||
|
||||
pub fn get_keyfile_path() -> String {
|
||||
#[must_use]pub fn get_keyfile_path() -> String {
|
||||
let mut out = get_data_dir();
|
||||
std::fs::create_dir_all(&out).unwrap();
|
||||
|
||||
@ -307,7 +307,7 @@ pub fn get_config() -> VeilidConfig {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn test_config() {
|
||||
pub fn test_config() {
|
||||
let vc = get_config();
|
||||
|
||||
let inner = vc.get();
|
||||
@ -426,6 +426,7 @@ pub async fn test_config() {
|
||||
assert_eq!(inner.network.virtual_network.server_address, "");
|
||||
}
|
||||
|
||||
#[expect(clippy::unused_async)]
|
||||
pub async fn test_all() {
|
||||
test_config().await;
|
||||
test_config();
|
||||
}
|
||||
|
@ -35,6 +35,7 @@ impl Drop for VeilidAPIInner {
|
||||
/// * Create and import private routes.
|
||||
/// * Reply to `AppCall` RPCs.
|
||||
#[derive(Clone, Debug)]
|
||||
#[must_use]
|
||||
pub struct VeilidAPI {
|
||||
inner: Arc<Mutex<VeilidAPIInner>>,
|
||||
}
|
||||
@ -69,6 +70,7 @@ impl VeilidAPI {
|
||||
}
|
||||
|
||||
/// Check to see if Veilid is already shut down.
|
||||
#[must_use]
|
||||
pub fn is_shutdown(&self) -> bool {
|
||||
self.inner.lock().context.is_none()
|
||||
}
|
||||
@ -154,6 +156,7 @@ impl VeilidAPI {
|
||||
// Attach/Detach
|
||||
|
||||
/// Get a full copy of the current state of Veilid.
|
||||
#[expect(clippy::unused_async)]
|
||||
pub async fn get_state(&self) -> VeilidAPIResult<VeilidState> {
|
||||
let attachment_manager = self.core_context()?.attachment_manager();
|
||||
let network_manager = attachment_manager.network_manager();
|
||||
@ -177,7 +180,7 @@ impl VeilidAPI {
|
||||
"VeilidAPI::attach()");
|
||||
|
||||
let attachment_manager = self.core_context()?.attachment_manager();
|
||||
if !attachment_manager.attach().await {
|
||||
if !Box::pin(attachment_manager.attach()).await {
|
||||
apibail_generic!("Already attached");
|
||||
}
|
||||
Ok(())
|
||||
@ -190,7 +193,7 @@ impl VeilidAPI {
|
||||
"VeilidAPI::detach()");
|
||||
|
||||
let attachment_manager = self.core_context()?.attachment_manager();
|
||||
if !attachment_manager.detach().await {
|
||||
if !Box::pin(attachment_manager.detach()).await {
|
||||
apibail_generic!("Already detached");
|
||||
}
|
||||
Ok(())
|
||||
@ -253,11 +256,11 @@ impl VeilidAPI {
|
||||
/// imported by another Veilid node.
|
||||
//#[instrument(target = "veilid_api", level = "debug", skip(self), ret, err)]
|
||||
pub async fn new_private_route(&self) -> VeilidAPIResult<(RouteId, Vec<u8>)> {
|
||||
self.new_custom_private_route(
|
||||
Box::pin(self.new_custom_private_route(
|
||||
&VALID_CRYPTO_KINDS,
|
||||
Stability::Reliable,
|
||||
Sequencing::PreferOrdered,
|
||||
)
|
||||
))
|
||||
.await
|
||||
}
|
||||
|
||||
@ -303,7 +306,7 @@ impl VeilidAPI {
|
||||
let rss = routing_table.route_spec_store();
|
||||
let route_id =
|
||||
rss.allocate_route(crypto_kinds, &safety_spec, DirectionSet::all(), &[], false)?;
|
||||
match rss.test_route(route_id).await? {
|
||||
match Box::pin(rss.test_route(route_id)).await? {
|
||||
Some(true) => {
|
||||
// route tested okay
|
||||
}
|
||||
|
@ -17,6 +17,7 @@ pub(crate) struct DebugCache {
|
||||
pub opened_record_contexts: Lazy<LinkedHashMap<TypedKey, RoutingContext>>,
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn format_opt_ts(ts: Option<TimestampDuration>) -> String {
|
||||
let Some(ts) = ts else {
|
||||
return "---".to_owned();
|
||||
@ -30,6 +31,7 @@ pub fn format_opt_ts(ts: Option<TimestampDuration>) -> String {
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn format_opt_bps(bps: Option<ByteCount>) -> String {
|
||||
let Some(bps) = bps else {
|
||||
return "---".to_owned();
|
||||
@ -288,7 +290,7 @@ fn get_dht_key(
|
||||
fn resolve_node_ref(
|
||||
registry: VeilidComponentRegistry,
|
||||
safety_selection: SafetySelection,
|
||||
) -> impl FnOnce(&str) -> SendPinBoxFuture<Option<NodeRef>> {
|
||||
) -> impl FnOnce(&str) -> PinBoxFutureStatic<Option<NodeRef>> {
|
||||
move |text| {
|
||||
let text = text.to_owned();
|
||||
Box::pin(async move {
|
||||
@ -318,7 +320,7 @@ fn resolve_node_ref(
|
||||
fn resolve_filtered_node_ref(
|
||||
registry: VeilidComponentRegistry,
|
||||
safety_selection: SafetySelection,
|
||||
) -> impl FnOnce(&str) -> SendPinBoxFuture<Option<FilteredNodeRef>> {
|
||||
) -> impl FnOnce(&str) -> PinBoxFutureStatic<Option<FilteredNodeRef>> {
|
||||
move |text| {
|
||||
let text = text.to_owned();
|
||||
Box::pin(async move {
|
||||
@ -503,7 +505,7 @@ fn get_debug_argument<T, G: FnOnce(&str) -> Option<T>>(
|
||||
Ok(val)
|
||||
}
|
||||
|
||||
async fn async_get_debug_argument<T, G: FnOnce(&str) -> SendPinBoxFuture<Option<T>>>(
|
||||
async fn async_get_debug_argument<T, G: FnOnce(&str) -> PinBoxFutureStatic<Option<T>>>(
|
||||
value: &str,
|
||||
context: &str,
|
||||
argument: &str,
|
||||
@ -532,7 +534,7 @@ fn get_debug_argument_at<T, G: FnOnce(&str) -> Option<T>>(
|
||||
Ok(val)
|
||||
}
|
||||
|
||||
async fn async_get_debug_argument_at<T, G: FnOnce(&str) -> SendPinBoxFuture<Option<T>>>(
|
||||
async fn async_get_debug_argument_at<T, G: FnOnce(&str) -> PinBoxFutureStatic<Option<T>>>(
|
||||
debug_args: &[String],
|
||||
pos: usize,
|
||||
context: &str,
|
||||
@ -549,6 +551,7 @@ async fn async_get_debug_argument_at<T, G: FnOnce(&str) -> SendPinBoxFuture<Opti
|
||||
Ok(val)
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn print_data(data: &[u8], truncate_len: Option<usize>) -> String {
|
||||
// check if message body is ascii printable
|
||||
let mut printable = true;
|
||||
@ -588,7 +591,7 @@ pub fn print_data(data: &[u8], truncate_len: Option<usize>) -> String {
|
||||
}
|
||||
|
||||
impl VeilidAPI {
|
||||
async fn debug_buckets(&self, args: String) -> VeilidAPIResult<String> {
|
||||
fn debug_buckets(&self, args: String) -> VeilidAPIResult<String> {
|
||||
let args: Vec<String> = args.split_whitespace().map(|s| s.to_owned()).collect();
|
||||
let mut min_state = BucketEntryState::Unreliable;
|
||||
if args.len() == 1 {
|
||||
@ -604,12 +607,12 @@ impl VeilidAPI {
|
||||
Ok(routing_table.debug_info_buckets(min_state))
|
||||
}
|
||||
|
||||
async fn debug_dialinfo(&self, _args: String) -> VeilidAPIResult<String> {
|
||||
fn debug_dialinfo(&self, _args: String) -> VeilidAPIResult<String> {
|
||||
// Dump routing table dialinfo
|
||||
let routing_table = self.core_context()?.routing_table();
|
||||
Ok(routing_table.debug_info_dialinfo())
|
||||
}
|
||||
async fn debug_peerinfo(&self, args: String) -> VeilidAPIResult<String> {
|
||||
fn debug_peerinfo(&self, args: String) -> VeilidAPIResult<String> {
|
||||
// Dump routing table peerinfo
|
||||
let args: Vec<String> = args.split_whitespace().map(|s| s.to_owned()).collect();
|
||||
let routing_table = self.core_context()?.routing_table();
|
||||
@ -647,7 +650,7 @@ impl VeilidAPI {
|
||||
Ok(routing_table.debug_info_txtrecord().await)
|
||||
}
|
||||
|
||||
async fn debug_keypair(&self, args: String) -> VeilidAPIResult<String> {
|
||||
fn debug_keypair(&self, args: String) -> VeilidAPIResult<String> {
|
||||
let args: Vec<String> = args.split_whitespace().map(|s| s.to_owned()).collect();
|
||||
let crypto = self.crypto()?;
|
||||
|
||||
@ -665,7 +668,7 @@ impl VeilidAPI {
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
async fn debug_entries(&self, args: String) -> VeilidAPIResult<String> {
|
||||
fn debug_entries(&self, args: String) -> VeilidAPIResult<String> {
|
||||
let args: Vec<String> = args.split_whitespace().map(|s| s.to_owned()).collect();
|
||||
|
||||
let mut min_state = BucketEntryState::Unreliable;
|
||||
@ -695,7 +698,7 @@ impl VeilidAPI {
|
||||
})
|
||||
}
|
||||
|
||||
async fn debug_entry(&self, args: String) -> VeilidAPIResult<String> {
|
||||
fn debug_entry(&self, args: String) -> VeilidAPIResult<String> {
|
||||
let args: Vec<String> = args.split_whitespace().map(|s| s.to_owned()).collect();
|
||||
let registry = self.core_context()?.registry();
|
||||
|
||||
@ -782,7 +785,7 @@ impl VeilidAPI {
|
||||
// Dump connection table
|
||||
let connman =
|
||||
if let Some(connection_manager) = registry.network_manager().opt_connection_manager() {
|
||||
connection_manager.debug_print().await
|
||||
connection_manager.debug_print()
|
||||
} else {
|
||||
"Connection manager unavailable when detached".to_owned()
|
||||
};
|
||||
@ -790,7 +793,7 @@ impl VeilidAPI {
|
||||
Ok(format!("{}\n{}\n{}\n", nodeinfo, peertable, connman))
|
||||
}
|
||||
|
||||
async fn debug_nodeid(&self, _args: String) -> VeilidAPIResult<String> {
|
||||
fn debug_nodeid(&self, _args: String) -> VeilidAPIResult<String> {
|
||||
// Dump routing table entry
|
||||
let registry = self.core_context()?.registry();
|
||||
let nodeid = registry.routing_table().debug_info_nodeid();
|
||||
@ -833,15 +836,15 @@ impl VeilidAPI {
|
||||
Ok("Config value set".to_owned())
|
||||
}
|
||||
|
||||
async fn debug_restart(&self, args: String) -> VeilidAPIResult<String> {
|
||||
async fn debug_network(&self, args: String) -> VeilidAPIResult<String> {
|
||||
let args = args.trim_start();
|
||||
if args.is_empty() {
|
||||
apibail_missing_argument!("debug_restart", "arg_0");
|
||||
apibail_missing_argument!("debug_network", "arg_0");
|
||||
}
|
||||
let (arg, _rest) = args.split_once(' ').unwrap_or((args, ""));
|
||||
// let rest = rest.trim_start().to_owned();
|
||||
|
||||
if arg == "network" {
|
||||
if arg == "restart" {
|
||||
// Must be attached
|
||||
if matches!(
|
||||
self.get_state().await?.attachment.state,
|
||||
@ -854,6 +857,11 @@ impl VeilidAPI {
|
||||
registry.network_manager().restart_network();
|
||||
|
||||
Ok("Network restarted".to_owned())
|
||||
} else if arg == "stats" {
|
||||
let registry = self.core_context()?.registry();
|
||||
let debug_stats = registry.network_manager().debug();
|
||||
|
||||
Ok(debug_stats)
|
||||
} else {
|
||||
apibail_invalid_argument!("debug_restart", "arg_1", arg);
|
||||
}
|
||||
@ -888,7 +896,6 @@ impl VeilidAPI {
|
||||
if let Some(connection_manager) = &opt_connection_manager {
|
||||
connection_manager
|
||||
.startup()
|
||||
.await
|
||||
.map_err(VeilidAPIError::internal)?;
|
||||
}
|
||||
Ok("Connections purged".to_owned())
|
||||
@ -942,7 +949,7 @@ impl VeilidAPI {
|
||||
Ok("Detached".to_owned())
|
||||
}
|
||||
|
||||
async fn debug_contact(&self, args: String) -> VeilidAPIResult<String> {
|
||||
fn debug_contact(&self, args: String) -> VeilidAPIResult<String> {
|
||||
let args: Vec<String> = args.split_whitespace().map(|s| s.to_owned()).collect();
|
||||
|
||||
let registry = self.core_context()?.registry();
|
||||
@ -1153,7 +1160,7 @@ impl VeilidAPI {
|
||||
Ok(format!("Replied with {} bytes", data_len))
|
||||
}
|
||||
|
||||
async fn debug_route_allocate(&self, args: Vec<String>) -> VeilidAPIResult<String> {
|
||||
fn debug_route_allocate(&self, args: Vec<String>) -> VeilidAPIResult<String> {
|
||||
// [ord|*ord] [rel] [<count>] [in|out] [avoid_node_id]
|
||||
|
||||
let registry = self.core_context()?.registry();
|
||||
@ -1212,7 +1219,7 @@ impl VeilidAPI {
|
||||
|
||||
Ok(out)
|
||||
}
|
||||
async fn debug_route_release(&self, args: Vec<String>) -> VeilidAPIResult<String> {
|
||||
fn debug_route_release(&self, args: Vec<String>) -> VeilidAPIResult<String> {
|
||||
// <route id>
|
||||
let registry = self.core_context()?.registry();
|
||||
let routing_table = registry.routing_table();
|
||||
@ -1233,7 +1240,7 @@ impl VeilidAPI {
|
||||
self.with_debug_cache(|dc| {
|
||||
for (n, ir) in dc.imported_routes.iter().enumerate() {
|
||||
if *ir == route_id {
|
||||
dc.imported_routes.remove(n);
|
||||
let _ = dc.imported_routes.remove(n);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -1245,7 +1252,7 @@ impl VeilidAPI {
|
||||
|
||||
Ok(out)
|
||||
}
|
||||
async fn debug_route_publish(&self, args: Vec<String>) -> VeilidAPIResult<String> {
|
||||
fn debug_route_publish(&self, args: Vec<String>) -> VeilidAPIResult<String> {
|
||||
// <route id> [full]
|
||||
let registry = self.core_context()?.registry();
|
||||
let routing_table = registry.routing_table();
|
||||
@ -1297,7 +1304,7 @@ impl VeilidAPI {
|
||||
|
||||
Ok(out)
|
||||
}
|
||||
async fn debug_route_unpublish(&self, args: Vec<String>) -> VeilidAPIResult<String> {
|
||||
fn debug_route_unpublish(&self, args: Vec<String>) -> VeilidAPIResult<String> {
|
||||
// <route id>
|
||||
let registry = self.core_context()?.registry();
|
||||
let routing_table = registry.routing_table();
|
||||
@ -1319,7 +1326,7 @@ impl VeilidAPI {
|
||||
};
|
||||
Ok(out)
|
||||
}
|
||||
async fn debug_route_print(&self, args: Vec<String>) -> VeilidAPIResult<String> {
|
||||
fn debug_route_print(&self, args: Vec<String>) -> VeilidAPIResult<String> {
|
||||
// <route id>
|
||||
let registry = self.core_context()?.registry();
|
||||
let routing_table = registry.routing_table();
|
||||
@ -1338,7 +1345,7 @@ impl VeilidAPI {
|
||||
None => Ok("Route does not exist".to_owned()),
|
||||
}
|
||||
}
|
||||
async fn debug_route_list(&self, _args: Vec<String>) -> VeilidAPIResult<String> {
|
||||
fn debug_route_list(&self, _args: Vec<String>) -> VeilidAPIResult<String> {
|
||||
//
|
||||
let registry = self.core_context()?.registry();
|
||||
let routing_table = registry.routing_table();
|
||||
@ -1361,7 +1368,7 @@ impl VeilidAPI {
|
||||
|
||||
Ok(out)
|
||||
}
|
||||
async fn debug_route_import(&self, args: Vec<String>) -> VeilidAPIResult<String> {
|
||||
fn debug_route_import(&self, args: Vec<String>) -> VeilidAPIResult<String> {
|
||||
// <blob>
|
||||
let registry = self.core_context()?.registry();
|
||||
let routing_table = registry.routing_table();
|
||||
@ -1420,19 +1427,19 @@ impl VeilidAPI {
|
||||
let command = get_debug_argument_at(&args, 0, "debug_route", "command", get_string)?;
|
||||
|
||||
if command == "allocate" {
|
||||
self.debug_route_allocate(args).await
|
||||
self.debug_route_allocate(args)
|
||||
} else if command == "release" {
|
||||
self.debug_route_release(args).await
|
||||
self.debug_route_release(args)
|
||||
} else if command == "publish" {
|
||||
self.debug_route_publish(args).await
|
||||
self.debug_route_publish(args)
|
||||
} else if command == "unpublish" {
|
||||
self.debug_route_unpublish(args).await
|
||||
self.debug_route_unpublish(args)
|
||||
} else if command == "print" {
|
||||
self.debug_route_print(args).await
|
||||
self.debug_route_print(args)
|
||||
} else if command == "list" {
|
||||
self.debug_route_list(args).await
|
||||
self.debug_route_list(args)
|
||||
} else if command == "import" {
|
||||
self.debug_route_import(args).await
|
||||
self.debug_route_import(args)
|
||||
} else if command == "test" {
|
||||
self.debug_route_test(args).await
|
||||
} else {
|
||||
@ -1953,7 +1960,7 @@ impl VeilidAPI {
|
||||
}
|
||||
}
|
||||
|
||||
async fn debug_table_list(&self, _args: Vec<String>) -> VeilidAPIResult<String> {
|
||||
fn debug_table_list(&self, _args: Vec<String>) -> VeilidAPIResult<String> {
|
||||
//
|
||||
let table_store = self.table_store()?;
|
||||
let table_names = table_store.list_all();
|
||||
@ -2012,7 +2019,7 @@ impl VeilidAPI {
|
||||
let command = get_debug_argument_at(&args, 0, "debug_table", "command", get_string)?;
|
||||
|
||||
if command == "list" {
|
||||
self.debug_table_list(args).await
|
||||
self.debug_table_list(args)
|
||||
} else if command == "info" {
|
||||
self.debug_table_info(args).await
|
||||
} else {
|
||||
@ -2020,7 +2027,7 @@ impl VeilidAPI {
|
||||
}
|
||||
}
|
||||
|
||||
async fn debug_punish_list(&self, _args: Vec<String>) -> VeilidAPIResult<String> {
|
||||
fn debug_punish_list(&self, _args: Vec<String>) -> VeilidAPIResult<String> {
|
||||
//
|
||||
let registry = self.core_context()?.registry();
|
||||
let network_manager = registry.network_manager();
|
||||
@ -2030,7 +2037,7 @@ impl VeilidAPI {
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
async fn debug_punish_clear(&self, _args: Vec<String>) -> VeilidAPIResult<String> {
|
||||
fn debug_punish_clear(&self, _args: Vec<String>) -> VeilidAPIResult<String> {
|
||||
//
|
||||
let registry = self.core_context()?.registry();
|
||||
let network_manager = registry.network_manager();
|
||||
@ -2041,23 +2048,23 @@ impl VeilidAPI {
|
||||
Ok("Address Filter punishments cleared\n".to_owned())
|
||||
}
|
||||
|
||||
async fn debug_punish(&self, args: String) -> VeilidAPIResult<String> {
|
||||
fn debug_punish(&self, args: String) -> VeilidAPIResult<String> {
|
||||
let args: Vec<String> =
|
||||
shell_words::split(&args).map_err(|e| VeilidAPIError::parse_error(e, args))?;
|
||||
|
||||
let command = get_debug_argument_at(&args, 0, "debug_punish", "command", get_string)?;
|
||||
|
||||
if command == "list" {
|
||||
self.debug_punish_list(args).await
|
||||
self.debug_punish_list(args)
|
||||
} else if command == "clear" {
|
||||
self.debug_punish_clear(args).await
|
||||
self.debug_punish_clear(args)
|
||||
} else {
|
||||
Ok(">>> Unknown command\n".to_owned())
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the help text for 'internal debug' commands.
|
||||
pub async fn debug_help(&self, _args: String) -> VeilidAPIResult<String> {
|
||||
pub fn debug_help(&self, _args: String) -> VeilidAPIResult<String> {
|
||||
Ok(r#"Node Information:
|
||||
nodeid - display a node's id(s)
|
||||
nodeinfo - display detailed information about this node
|
||||
@ -2089,9 +2096,12 @@ Utilities:
|
||||
txtrecord - Generate a TXT record for making this node into a bootstrap node capable of DNS bootstrap
|
||||
keypair [cryptokind] - Generate and display a random public/private keypair
|
||||
purge <buckets|connections|routes> - Throw away the node's routing table, connections, or routes
|
||||
|
||||
Network:
|
||||
attach - Attach the node to the network if it is detached
|
||||
detach - Detach the node from the network if it is attached
|
||||
restart network - Restart the low level network
|
||||
network restart - Restart the low level network
|
||||
stats - Print network manager statistics
|
||||
|
||||
RPC Operations:
|
||||
ping <destination> - Send a 'Status' RPC question to a destination node and display the returned ping status
|
||||
@ -2177,67 +2187,70 @@ TableDB Operations:
|
||||
let args = args.trim_start();
|
||||
if args.is_empty() {
|
||||
// No arguments runs help command
|
||||
return self.debug_help("".to_owned()).await;
|
||||
return self.debug_help("".to_owned());
|
||||
}
|
||||
let (arg, rest) = args.split_once(' ').unwrap_or((args, ""));
|
||||
let rest = rest.trim_start().to_owned();
|
||||
|
||||
if arg == "help" {
|
||||
self.debug_help(rest).await
|
||||
self.debug_help(rest)
|
||||
} else if arg == "nodeid" {
|
||||
self.debug_nodeid(rest).await
|
||||
self.debug_nodeid(rest)
|
||||
} else if arg == "buckets" {
|
||||
self.debug_buckets(rest).await
|
||||
self.debug_buckets(rest)
|
||||
} else if arg == "dialinfo" {
|
||||
self.debug_dialinfo(rest).await
|
||||
self.debug_dialinfo(rest)
|
||||
} else if arg == "peerinfo" {
|
||||
self.debug_peerinfo(rest).await
|
||||
} else if arg == "txtrecord" {
|
||||
self.debug_txtrecord(rest).await
|
||||
} else if arg == "keypair" {
|
||||
self.debug_keypair(rest).await
|
||||
} else if arg == "entries" {
|
||||
self.debug_entries(rest).await
|
||||
} else if arg == "entry" {
|
||||
self.debug_entry(rest).await
|
||||
} else if arg == "relay" {
|
||||
self.debug_relay(rest).await
|
||||
} else if arg == "ping" {
|
||||
self.debug_ping(rest).await
|
||||
} else if arg == "appmessage" {
|
||||
self.debug_app_message(rest).await
|
||||
} else if arg == "appcall" {
|
||||
self.debug_app_call(rest).await
|
||||
} else if arg == "appreply" {
|
||||
self.debug_app_reply(rest).await
|
||||
} else if arg == "resolve" {
|
||||
self.debug_resolve(rest).await
|
||||
self.debug_peerinfo(rest)
|
||||
} else if arg == "contact" {
|
||||
self.debug_contact(rest).await
|
||||
} else if arg == "nodeinfo" {
|
||||
self.debug_nodeinfo(rest).await
|
||||
} else if arg == "purge" {
|
||||
self.debug_purge(rest).await
|
||||
} else if arg == "attach" {
|
||||
self.debug_attach(rest).await
|
||||
} else if arg == "detach" {
|
||||
self.debug_detach(rest).await
|
||||
} else if arg == "config" {
|
||||
self.debug_config(rest).await
|
||||
} else if arg == "restart" {
|
||||
self.debug_restart(rest).await
|
||||
} else if arg == "route" {
|
||||
self.debug_route(rest).await
|
||||
} else if arg == "record" {
|
||||
self.debug_record(rest).await
|
||||
self.debug_contact(rest)
|
||||
} else if arg == "keypair" {
|
||||
self.debug_keypair(rest)
|
||||
} else if arg == "entries" {
|
||||
self.debug_entries(rest)
|
||||
} else if arg == "entry" {
|
||||
self.debug_entry(rest)
|
||||
} else if arg == "punish" {
|
||||
self.debug_punish(rest).await
|
||||
} else if arg == "table" {
|
||||
self.debug_table(rest).await
|
||||
} else if arg == "uptime" {
|
||||
self.debug_uptime(rest).await
|
||||
self.debug_punish(rest)
|
||||
} else {
|
||||
Err(VeilidAPIError::generic("Unknown debug command"))
|
||||
let fut = if arg == "txtrecord" {
|
||||
pin_dyn_future!(self.debug_txtrecord(rest))
|
||||
} else if arg == "relay" {
|
||||
pin_dyn_future!(self.debug_relay(rest))
|
||||
} else if arg == "ping" {
|
||||
pin_dyn_future!(self.debug_ping(rest))
|
||||
} else if arg == "appmessage" {
|
||||
pin_dyn_future!(self.debug_app_message(rest))
|
||||
} else if arg == "appcall" {
|
||||
pin_dyn_future!(self.debug_app_call(rest))
|
||||
} else if arg == "appreply" {
|
||||
pin_dyn_future!(self.debug_app_reply(rest))
|
||||
} else if arg == "resolve" {
|
||||
pin_dyn_future!(self.debug_resolve(rest))
|
||||
} else if arg == "nodeinfo" {
|
||||
pin_dyn_future!(self.debug_nodeinfo(rest))
|
||||
} else if arg == "purge" {
|
||||
pin_dyn_future!(self.debug_purge(rest))
|
||||
} else if arg == "attach" {
|
||||
pin_dyn_future!(self.debug_attach(rest))
|
||||
} else if arg == "detach" {
|
||||
pin_dyn_future!(self.debug_detach(rest))
|
||||
} else if arg == "config" {
|
||||
pin_dyn_future!(self.debug_config(rest))
|
||||
} else if arg == "network" {
|
||||
pin_dyn_future!(self.debug_network(rest))
|
||||
} else if arg == "route" {
|
||||
pin_dyn_future!(self.debug_route(rest))
|
||||
} else if arg == "record" {
|
||||
pin_dyn_future!(self.debug_record(rest))
|
||||
} else if arg == "table" {
|
||||
pin_dyn_future!(self.debug_table(rest))
|
||||
} else if arg == "uptime" {
|
||||
pin_dyn_future!(self.debug_uptime(rest))
|
||||
} else {
|
||||
return Err(VeilidAPIError::generic("Unknown debug command"));
|
||||
};
|
||||
fut.await
|
||||
}
|
||||
};
|
||||
res
|
||||
@ -2246,7 +2259,7 @@ TableDB Operations:
|
||||
fn get_destination(
|
||||
self,
|
||||
registry: VeilidComponentRegistry,
|
||||
) -> impl FnOnce(&str) -> SendPinBoxFuture<Option<Destination>> {
|
||||
) -> impl FnOnce(&str) -> PinBoxFutureStatic<Option<Destination>> {
|
||||
move |text| {
|
||||
let text = text.to_owned();
|
||||
Box::pin(async move {
|
||||
@ -2278,7 +2291,7 @@ TableDB Operations:
|
||||
let prid = *dc.imported_routes.get(n)?;
|
||||
let Some(private_route) = rss.best_remote_private_route(&prid) else {
|
||||
// Remove imported route
|
||||
dc.imported_routes.remove(n);
|
||||
let _ = dc.imported_routes.remove(n);
|
||||
veilid_log!(registry info "removed dead imported route {}", n);
|
||||
return None;
|
||||
};
|
||||
|
@ -113,6 +113,7 @@ macro_rules! apibail_already_initialized {
|
||||
tsify(into_wasm_abi)
|
||||
)]
|
||||
#[serde(tag = "kind")]
|
||||
#[must_use]
|
||||
pub enum VeilidAPIError {
|
||||
#[error("Not initialized")]
|
||||
NotInitialized,
|
||||
|
@ -1,5 +1,4 @@
|
||||
use super::*;
|
||||
use futures_util::FutureExt;
|
||||
|
||||
pub fn to_json_api_result<T: Clone + fmt::Debug + JsonSchema>(
|
||||
r: VeilidAPIResult<T>,
|
||||
@ -37,6 +36,7 @@ pub fn to_json_api_result_with_opt_vec_string<T: Clone + fmt::Debug>(
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn to_json_api_result_with_vec_u8(r: VeilidAPIResult<Vec<u8>>) -> json_api::ApiResultWithVecU8 {
|
||||
match r {
|
||||
Err(e) => json_api::ApiResultWithVecU8::Err { error: e },
|
||||
@ -44,6 +44,7 @@ pub fn to_json_api_result_with_vec_u8(r: VeilidAPIResult<Vec<u8>>) -> json_api::
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn to_json_api_result_with_vec_vec_u8(
|
||||
r: VeilidAPIResult<Vec<Vec<u8>>>,
|
||||
) -> json_api::ApiResultWithVecVecU8 {
|
||||
@ -69,6 +70,7 @@ pub struct JsonRequestProcessor {
|
||||
}
|
||||
|
||||
impl JsonRequestProcessor {
|
||||
#[must_use]
|
||||
pub fn new(api: VeilidAPI) -> Self {
|
||||
Self {
|
||||
api,
|
||||
@ -212,7 +214,7 @@ impl JsonRequestProcessor {
|
||||
// Target
|
||||
|
||||
// Parse target
|
||||
async fn parse_target(&self, s: String) -> VeilidAPIResult<Target> {
|
||||
fn parse_target(&self, s: String) -> VeilidAPIResult<Target> {
|
||||
// Is this a route id?
|
||||
if let Ok(rrid) = RouteId::from_str(&s) {
|
||||
let routing_table = self.api.core_context()?.routing_table();
|
||||
@ -277,18 +279,24 @@ impl JsonRequestProcessor {
|
||||
RoutingContextRequestOp::AppCall { target, message } => {
|
||||
RoutingContextResponseOp::AppCall {
|
||||
result: to_json_api_result_with_vec_u8(
|
||||
self.parse_target(target)
|
||||
.then(|tr| async { routing_context.app_call(tr?, message).await })
|
||||
.await,
|
||||
async {
|
||||
routing_context
|
||||
.app_call(self.parse_target(target)?, message)
|
||||
.await
|
||||
}
|
||||
.await,
|
||||
),
|
||||
}
|
||||
}
|
||||
RoutingContextRequestOp::AppMessage { target, message } => {
|
||||
RoutingContextResponseOp::AppMessage {
|
||||
result: to_json_api_result(
|
||||
self.parse_target(target)
|
||||
.then(|tr| async { routing_context.app_message(tr?, message).await })
|
||||
.await,
|
||||
async {
|
||||
routing_context
|
||||
.app_message(self.parse_target(target)?, message)
|
||||
.await
|
||||
}
|
||||
.await,
|
||||
),
|
||||
}
|
||||
}
|
||||
|
@ -4,6 +4,7 @@ use super::*;
|
||||
|
||||
/// Valid destinations for a message sent over a routing context.
|
||||
#[derive(Clone, Debug, Eq, PartialEq, Hash, Copy, PartialOrd, Ord)]
|
||||
#[must_use]
|
||||
pub enum Target {
|
||||
/// Node by its public key.
|
||||
NodeId(TypedKey),
|
||||
@ -25,6 +26,7 @@ pub struct RoutingContextUnlockedInner {
|
||||
/// To enable receiver privacy, you should send to a private route RouteId that you have imported, rather than directly to a NodeId.
|
||||
///
|
||||
#[derive(Clone)]
|
||||
#[must_use]
|
||||
pub struct RoutingContext {
|
||||
/// Veilid API handle.
|
||||
api: VeilidAPI,
|
||||
@ -144,10 +146,12 @@ impl RoutingContext {
|
||||
"RoutingContext::get_destination(self: {:?}, target: {:?})", self, target);
|
||||
|
||||
let rpc_processor = self.api.core_context()?.rpc_processor();
|
||||
rpc_processor
|
||||
.resolve_target_to_destination(target, self.unlocked_inner.safety_selection)
|
||||
.await
|
||||
.map_err(VeilidAPIError::invalid_target)
|
||||
Box::pin(
|
||||
rpc_processor
|
||||
.resolve_target_to_destination(target, self.unlocked_inner.safety_selection),
|
||||
)
|
||||
.await
|
||||
.map_err(VeilidAPIError::invalid_target)
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
@ -172,7 +176,7 @@ impl RoutingContext {
|
||||
let dest = self.get_destination(target).await?;
|
||||
|
||||
// Send app message
|
||||
let answer = match rpc_processor.rpc_call_app_call(dest, message).await {
|
||||
let answer = match Box::pin(rpc_processor.rpc_call_app_call(dest, message)).await {
|
||||
Ok(NetworkResult::Value(v)) => v,
|
||||
Ok(NetworkResult::Timeout) => apibail_timeout!(),
|
||||
Ok(NetworkResult::ServiceUnavailable(e)) => apibail_invalid_target!(e),
|
||||
@ -206,7 +210,7 @@ impl RoutingContext {
|
||||
let dest = self.get_destination(target).await?;
|
||||
|
||||
// Send app message
|
||||
match rpc_processor.rpc_call_app_message(dest, message).await {
|
||||
match Box::pin(rpc_processor.rpc_call_app_message(dest, message)).await {
|
||||
Ok(NetworkResult::Value(())) => {}
|
||||
Ok(NetworkResult::Timeout) => apibail_timeout!(),
|
||||
Ok(NetworkResult::ServiceUnavailable(e)) => apibail_invalid_target!(e),
|
||||
@ -267,9 +271,13 @@ impl RoutingContext {
|
||||
Crypto::validate_crypto_kind(kind)?;
|
||||
|
||||
let storage_manager = self.api.core_context()?.storage_manager();
|
||||
storage_manager
|
||||
.create_record(kind, schema, owner, self.unlocked_inner.safety_selection)
|
||||
.await
|
||||
Box::pin(storage_manager.create_record(
|
||||
kind,
|
||||
schema,
|
||||
owner,
|
||||
self.unlocked_inner.safety_selection,
|
||||
))
|
||||
.await
|
||||
}
|
||||
|
||||
/// Opens a DHT record at a specific key.
|
||||
@ -311,7 +319,7 @@ impl RoutingContext {
|
||||
Crypto::validate_crypto_kind(key.kind)?;
|
||||
|
||||
let storage_manager = self.api.core_context()?.storage_manager();
|
||||
storage_manager.close_record(key).await
|
||||
Box::pin(storage_manager.close_record(key)).await
|
||||
}
|
||||
|
||||
/// Deletes a DHT record at a specific key.
|
||||
@ -327,7 +335,7 @@ impl RoutingContext {
|
||||
Crypto::validate_crypto_kind(key.kind)?;
|
||||
|
||||
let storage_manager = self.api.core_context()?.storage_manager();
|
||||
storage_manager.delete_record(key).await
|
||||
Box::pin(storage_manager.delete_record(key)).await
|
||||
}
|
||||
|
||||
/// Gets the latest value of a subkey.
|
||||
@ -349,7 +357,7 @@ impl RoutingContext {
|
||||
Crypto::validate_crypto_kind(key.kind)?;
|
||||
|
||||
let storage_manager = self.api.core_context()?.storage_manager();
|
||||
storage_manager.get_value(key, subkey, force_refresh).await
|
||||
Box::pin(storage_manager.get_value(key, subkey, force_refresh)).await
|
||||
}
|
||||
|
||||
/// Pushes a changed subkey value to the network.
|
||||
@ -373,7 +381,7 @@ impl RoutingContext {
|
||||
Crypto::validate_crypto_kind(key.kind)?;
|
||||
|
||||
let storage_manager = self.api.core_context()?.storage_manager();
|
||||
storage_manager.set_value(key, subkey, data, writer).await
|
||||
Box::pin(storage_manager.set_value(key, subkey, data, writer)).await
|
||||
}
|
||||
|
||||
/// Add or update a watch to a DHT value that informs the user via an VeilidUpdate::ValueChange callback when the record has subkeys change.
|
||||
@ -410,9 +418,7 @@ impl RoutingContext {
|
||||
Crypto::validate_crypto_kind(key.kind)?;
|
||||
|
||||
let storage_manager = self.api.core_context()?.storage_manager();
|
||||
storage_manager
|
||||
.watch_values(key, subkeys, expiration, count)
|
||||
.await
|
||||
Box::pin(storage_manager.watch_values(key, subkeys, expiration, count)).await
|
||||
}
|
||||
|
||||
/// Cancels a watch early.
|
||||
@ -436,7 +442,7 @@ impl RoutingContext {
|
||||
Crypto::validate_crypto_kind(key.kind)?;
|
||||
|
||||
let storage_manager = self.api.core_context()?.storage_manager();
|
||||
storage_manager.cancel_watch_values(key, subkeys).await
|
||||
Box::pin(storage_manager.cancel_watch_values(key, subkeys)).await
|
||||
}
|
||||
|
||||
/// Inspects a DHT record for subkey state.
|
||||
@ -491,7 +497,7 @@ impl RoutingContext {
|
||||
Crypto::validate_crypto_kind(key.kind)?;
|
||||
|
||||
let storage_manager = self.api.core_context()?.storage_manager();
|
||||
storage_manager.inspect_record(key, subkeys, scope).await
|
||||
Box::pin(storage_manager.inspect_record(key, subkeys, scope)).await
|
||||
}
|
||||
|
||||
///////////////////////////////////
|
||||
|
@ -2,49 +2,50 @@ use super::test_types::*;
|
||||
use super::test_types_dht::*;
|
||||
use super::test_types_dht_schema::*;
|
||||
|
||||
#[expect(clippy::unused_async)]
|
||||
pub async fn test_all() {
|
||||
// test_types
|
||||
test_alignedu64().await;
|
||||
test_veilidappmessage().await;
|
||||
test_veilidappcall().await;
|
||||
test_fourcc().await;
|
||||
test_sequencing().await;
|
||||
test_stability().await;
|
||||
test_safetyselection().await;
|
||||
test_safetyspec().await;
|
||||
test_latencystats().await;
|
||||
test_transferstats().await;
|
||||
test_transferstatsdownup().await;
|
||||
test_rpcstats().await;
|
||||
test_peerstats().await;
|
||||
test_alignedu64();
|
||||
test_veilidappmessage();
|
||||
test_veilidappcall();
|
||||
test_fourcc();
|
||||
test_sequencing();
|
||||
test_stability();
|
||||
test_safetyselection();
|
||||
test_safetyspec();
|
||||
test_latencystats();
|
||||
test_transferstats();
|
||||
test_transferstatsdownup();
|
||||
test_rpcstats();
|
||||
test_peerstats();
|
||||
#[cfg(feature = "unstable-tunnels")]
|
||||
test_tunnelmode().await;
|
||||
test_tunnelmode();
|
||||
#[cfg(feature = "unstable-tunnels")]
|
||||
test_tunnelerror().await;
|
||||
test_tunnelerror();
|
||||
#[cfg(feature = "unstable-tunnels")]
|
||||
test_tunnelendpoint().await;
|
||||
test_tunnelendpoint();
|
||||
#[cfg(feature = "unstable-tunnels")]
|
||||
test_fulltunnel().await;
|
||||
test_fulltunnel();
|
||||
#[cfg(feature = "unstable-tunnels")]
|
||||
test_partialtunnel().await;
|
||||
test_veilidloglevel().await;
|
||||
test_veilidlog().await;
|
||||
test_attachmentstate().await;
|
||||
test_veilidstateattachment().await;
|
||||
test_peertabledata().await;
|
||||
test_veilidstatenetwork().await;
|
||||
test_veilidroutechange().await;
|
||||
test_veilidstateconfig().await;
|
||||
test_veilidvaluechange().await;
|
||||
test_veilidupdate().await;
|
||||
test_veilidstate().await;
|
||||
test_partialtunnel();
|
||||
test_veilidloglevel();
|
||||
test_veilidlog();
|
||||
test_attachmentstate();
|
||||
test_veilidstateattachment();
|
||||
test_peertabledata();
|
||||
test_veilidstatenetwork();
|
||||
test_veilidroutechange();
|
||||
test_veilidstateconfig();
|
||||
test_veilidvaluechange();
|
||||
test_veilidupdate();
|
||||
test_veilidstate();
|
||||
// test_types_dht
|
||||
test_dhtrecorddescriptor().await;
|
||||
test_valuedata().await;
|
||||
test_valuesubkeyrangeset().await;
|
||||
test_dhtrecorddescriptor();
|
||||
test_valuedata();
|
||||
test_valuesubkeyrangeset();
|
||||
// test_types_dht_schema
|
||||
test_dhtschemadflt().await;
|
||||
test_dhtschema().await;
|
||||
test_dhtschemasmplmember().await;
|
||||
test_dhtschemasmpl().await;
|
||||
test_dhtschemadflt();
|
||||
test_dhtschema();
|
||||
test_dhtschemasmplmember();
|
||||
test_dhtschemasmpl();
|
||||
}
|
||||
|
@ -3,7 +3,7 @@ use crate::*;
|
||||
|
||||
// aligned_u64
|
||||
|
||||
pub async fn test_alignedu64() {
|
||||
pub fn test_alignedu64() {
|
||||
let orig = AlignedU64::new(0x0123456789abcdef);
|
||||
let copy = deserialize_json(&serialize_json(orig)).unwrap();
|
||||
|
||||
@ -12,7 +12,7 @@ pub async fn test_alignedu64() {
|
||||
|
||||
// app_messsage_call
|
||||
|
||||
pub async fn test_veilidappmessage() {
|
||||
pub fn test_veilidappmessage() {
|
||||
let orig = VeilidAppMessage::new(
|
||||
Some(fix_typedkey()),
|
||||
Some(fix_cryptokey()),
|
||||
@ -23,7 +23,7 @@ pub async fn test_veilidappmessage() {
|
||||
assert_eq!(orig, copy);
|
||||
}
|
||||
|
||||
pub async fn test_veilidappcall() {
|
||||
pub fn test_veilidappcall() {
|
||||
let orig = VeilidAppCall::new(
|
||||
Some(fix_typedkey()),
|
||||
Some(fix_cryptokey()),
|
||||
@ -37,7 +37,7 @@ pub async fn test_veilidappcall() {
|
||||
|
||||
// fourcc
|
||||
|
||||
pub async fn test_fourcc() {
|
||||
pub fn test_fourcc() {
|
||||
let orig = FourCC::from_str("D34D").unwrap();
|
||||
let copy = deserialize_json(&serialize_json(orig)).unwrap();
|
||||
|
||||
@ -46,28 +46,28 @@ pub async fn test_fourcc() {
|
||||
|
||||
// safety
|
||||
|
||||
pub async fn test_sequencing() {
|
||||
pub fn test_sequencing() {
|
||||
let orig = Sequencing::PreferOrdered;
|
||||
let copy = deserialize_json(&serialize_json(orig)).unwrap();
|
||||
|
||||
assert_eq!(orig, copy);
|
||||
}
|
||||
|
||||
pub async fn test_stability() {
|
||||
pub fn test_stability() {
|
||||
let orig = Stability::Reliable;
|
||||
let copy = deserialize_json(&serialize_json(orig)).unwrap();
|
||||
|
||||
assert_eq!(orig, copy);
|
||||
}
|
||||
|
||||
pub async fn test_safetyselection() {
|
||||
pub fn test_safetyselection() {
|
||||
let orig = SafetySelection::Unsafe(Sequencing::EnsureOrdered);
|
||||
let copy = deserialize_json(&serialize_json(orig)).unwrap();
|
||||
|
||||
assert_eq!(orig, copy);
|
||||
}
|
||||
|
||||
pub async fn test_safetyspec() {
|
||||
pub fn test_safetyspec() {
|
||||
let orig = SafetySpec {
|
||||
preferred_route: Some(fix_cryptokey()),
|
||||
hop_count: 23,
|
||||
@ -81,35 +81,35 @@ pub async fn test_safetyspec() {
|
||||
|
||||
// stats
|
||||
|
||||
pub async fn test_latencystats() {
|
||||
pub fn test_latencystats() {
|
||||
let orig = fix_latencystats();
|
||||
let copy = deserialize_json(&serialize_json(&orig)).unwrap();
|
||||
|
||||
assert_eq!(orig, copy);
|
||||
}
|
||||
|
||||
pub async fn test_transferstats() {
|
||||
pub fn test_transferstats() {
|
||||
let orig = fix_transferstats();
|
||||
let copy = deserialize_json(&serialize_json(&orig)).unwrap();
|
||||
|
||||
assert_eq!(orig, copy);
|
||||
}
|
||||
|
||||
pub async fn test_transferstatsdownup() {
|
||||
pub fn test_transferstatsdownup() {
|
||||
let orig = fix_transferstatsdownup();
|
||||
let copy = deserialize_json(&serialize_json(&orig)).unwrap();
|
||||
|
||||
assert_eq!(orig, copy);
|
||||
}
|
||||
|
||||
pub async fn test_rpcstats() {
|
||||
pub fn test_rpcstats() {
|
||||
let orig = fix_rpcstats();
|
||||
let copy = deserialize_json(&serialize_json(&orig)).unwrap();
|
||||
|
||||
assert_eq!(orig, copy);
|
||||
}
|
||||
|
||||
pub async fn test_peerstats() {
|
||||
pub fn test_peerstats() {
|
||||
let orig = fix_peerstats();
|
||||
let copy = deserialize_json(&serialize_json(&orig)).unwrap();
|
||||
|
||||
@ -119,7 +119,7 @@ pub async fn test_peerstats() {
|
||||
// tunnel
|
||||
|
||||
#[cfg(feature = "unstable-tunnels")]
|
||||
pub async fn test_tunnelmode() {
|
||||
pub fn test_tunnelmode() {
|
||||
let orig = TunnelMode::Raw;
|
||||
let copy = deserialize_json(&serialize_json(&orig)).unwrap();
|
||||
|
||||
@ -127,7 +127,7 @@ pub async fn test_tunnelmode() {
|
||||
}
|
||||
|
||||
#[cfg(feature = "unstable-tunnels")]
|
||||
pub async fn test_tunnelerror() {
|
||||
pub fn test_tunnelerror() {
|
||||
let orig = TunnelError::NoCapacity;
|
||||
let copy = deserialize_json(&serialize_json(&orig)).unwrap();
|
||||
|
||||
@ -135,7 +135,7 @@ pub async fn test_tunnelerror() {
|
||||
}
|
||||
|
||||
#[cfg(feature = "unstable-tunnels")]
|
||||
pub async fn test_tunnelendpoint() {
|
||||
pub fn test_tunnelendpoint() {
|
||||
let orig = TunnelEndpoint {
|
||||
mode: TunnelMode::Raw,
|
||||
description: "Here there be tygers.".to_string(),
|
||||
@ -146,7 +146,7 @@ pub async fn test_tunnelendpoint() {
|
||||
}
|
||||
|
||||
#[cfg(feature = "unstable-tunnels")]
|
||||
pub async fn test_fulltunnel() {
|
||||
pub fn test_fulltunnel() {
|
||||
let orig = FullTunnel {
|
||||
id: AlignedU64::from(42),
|
||||
timeout: AlignedU64::from(3_000_000),
|
||||
@ -165,7 +165,7 @@ pub async fn test_fulltunnel() {
|
||||
}
|
||||
|
||||
#[cfg(feature = "unstable-tunnels")]
|
||||
pub async fn test_partialtunnel() {
|
||||
pub fn test_partialtunnel() {
|
||||
let orig = PartialTunnel {
|
||||
id: AlignedU64::from(42),
|
||||
timeout: AlignedU64::from(3_000_000),
|
||||
@ -181,14 +181,14 @@ pub async fn test_partialtunnel() {
|
||||
|
||||
// veilid_log
|
||||
|
||||
pub async fn test_veilidloglevel() {
|
||||
pub fn test_veilidloglevel() {
|
||||
let orig = VeilidLogLevel::Info;
|
||||
let copy = deserialize_json(&serialize_json(orig)).unwrap();
|
||||
|
||||
assert_eq!(orig, copy);
|
||||
}
|
||||
|
||||
pub async fn test_veilidlog() {
|
||||
pub fn test_veilidlog() {
|
||||
let orig = VeilidLog {
|
||||
log_level: VeilidLogLevel::Debug,
|
||||
message: "A log! A log!".to_string(),
|
||||
@ -201,14 +201,14 @@ pub async fn test_veilidlog() {
|
||||
|
||||
// veilid_state
|
||||
|
||||
pub async fn test_attachmentstate() {
|
||||
pub fn test_attachmentstate() {
|
||||
let orig = AttachmentState::FullyAttached;
|
||||
let copy = deserialize_json(&serialize_json(orig)).unwrap();
|
||||
|
||||
assert_eq!(orig, copy);
|
||||
}
|
||||
|
||||
pub async fn test_veilidstateattachment() {
|
||||
pub fn test_veilidstateattachment() {
|
||||
let orig = VeilidStateAttachment {
|
||||
state: AttachmentState::OverAttached,
|
||||
public_internet_ready: true,
|
||||
@ -221,14 +221,14 @@ pub async fn test_veilidstateattachment() {
|
||||
assert_eq!(orig, copy);
|
||||
}
|
||||
|
||||
pub async fn test_peertabledata() {
|
||||
pub fn test_peertabledata() {
|
||||
let orig = fix_peertabledata();
|
||||
let copy = deserialize_json(&serialize_json(&orig)).unwrap();
|
||||
|
||||
assert_eq!(orig, copy);
|
||||
}
|
||||
|
||||
pub async fn test_veilidstatenetwork() {
|
||||
pub fn test_veilidstatenetwork() {
|
||||
let orig = VeilidStateNetwork {
|
||||
started: true,
|
||||
bps_down: ByteCount::from(14_400),
|
||||
@ -240,7 +240,7 @@ pub async fn test_veilidstatenetwork() {
|
||||
assert_eq!(orig, copy);
|
||||
}
|
||||
|
||||
pub async fn test_veilidroutechange() {
|
||||
pub fn test_veilidroutechange() {
|
||||
let orig = VeilidRouteChange {
|
||||
dead_routes: vec![fix_cryptokey()],
|
||||
dead_remote_routes: vec![fix_cryptokey()],
|
||||
@ -250,7 +250,7 @@ pub async fn test_veilidroutechange() {
|
||||
assert_eq!(orig, copy);
|
||||
}
|
||||
|
||||
pub async fn test_veilidstateconfig() {
|
||||
pub fn test_veilidstateconfig() {
|
||||
let orig = VeilidStateConfig {
|
||||
config: fix_veilidconfiginner(),
|
||||
};
|
||||
@ -259,21 +259,21 @@ pub async fn test_veilidstateconfig() {
|
||||
assert_eq!(orig, copy);
|
||||
}
|
||||
|
||||
pub async fn test_veilidvaluechange() {
|
||||
pub fn test_veilidvaluechange() {
|
||||
let orig = fix_veilidvaluechange();
|
||||
let copy = deserialize_json(&serialize_json(&orig)).unwrap();
|
||||
|
||||
assert_eq!(orig, copy);
|
||||
}
|
||||
|
||||
pub async fn test_veilidupdate() {
|
||||
pub fn test_veilidupdate() {
|
||||
let orig = VeilidUpdate::ValueChange(Box::new(fix_veilidvaluechange()));
|
||||
let copy = deserialize_json(&serialize_json(&orig)).unwrap();
|
||||
|
||||
assert_eq!(orig, copy);
|
||||
}
|
||||
|
||||
pub async fn test_veilidstate() {
|
||||
pub fn test_veilidstate() {
|
||||
let orig = VeilidState {
|
||||
attachment: Box::new(VeilidStateAttachment {
|
||||
state: AttachmentState::OverAttached,
|
||||
|
@ -4,7 +4,7 @@ use range_set_blaze::*;
|
||||
|
||||
// dht_record_descriptors
|
||||
|
||||
pub async fn test_dhtrecorddescriptor() {
|
||||
pub fn test_dhtrecorddescriptor() {
|
||||
let orig = DHTRecordDescriptor::new(
|
||||
fix_typedkey(),
|
||||
fix_cryptokey(),
|
||||
@ -18,7 +18,7 @@ pub async fn test_dhtrecorddescriptor() {
|
||||
|
||||
// value_data
|
||||
|
||||
pub async fn test_valuedata() {
|
||||
pub fn test_valuedata() {
|
||||
let orig = ValueData::new_with_seq(42, b"Brent Spiner".to_vec(), fix_cryptokey());
|
||||
let copy = deserialize_json(&serialize_json(&orig)).unwrap();
|
||||
|
||||
@ -27,7 +27,7 @@ pub async fn test_valuedata() {
|
||||
|
||||
// value_subkey_range_set
|
||||
|
||||
pub async fn test_valuesubkeyrangeset() {
|
||||
pub fn test_valuesubkeyrangeset() {
|
||||
let orig = ValueSubkeyRangeSet::new_with_data(RangeSetBlaze::from_iter([20..=30]));
|
||||
let copy = deserialize_json(&serialize_json(&orig)).unwrap();
|
||||
|
||||
|
@ -3,7 +3,7 @@ use crate::*;
|
||||
|
||||
// dlft
|
||||
|
||||
pub async fn test_dhtschemadflt() {
|
||||
pub fn test_dhtschemadflt() {
|
||||
let orig = DHTSchemaDFLT::new(9);
|
||||
let copy = deserialize_json(&serialize_json(&orig)).unwrap();
|
||||
|
||||
@ -12,7 +12,7 @@ pub async fn test_dhtschemadflt() {
|
||||
|
||||
// mod
|
||||
|
||||
pub async fn test_dhtschema() {
|
||||
pub fn test_dhtschema() {
|
||||
let orig = DHTSchema::SMPL(
|
||||
DHTSchemaSMPL::new(
|
||||
91,
|
||||
@ -36,7 +36,7 @@ pub async fn test_dhtschema() {
|
||||
|
||||
// smpl
|
||||
|
||||
pub async fn test_dhtschemasmplmember() {
|
||||
pub fn test_dhtschemasmplmember() {
|
||||
let orig = DHTSchemaSMPLMember {
|
||||
m_key: fix_cryptokey(),
|
||||
m_cnt: 7,
|
||||
@ -46,7 +46,7 @@ pub async fn test_dhtschemasmplmember() {
|
||||
assert_eq!(orig, copy);
|
||||
}
|
||||
|
||||
pub async fn test_dhtschemasmpl() {
|
||||
pub fn test_dhtschemasmpl() {
|
||||
let orig = DHTSchemaSMPL::new(
|
||||
91,
|
||||
vec![
|
||||
|
@ -24,6 +24,7 @@ macro_rules! aligned_u64_type {
|
||||
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))]
|
||||
#[repr(C, align(8))]
|
||||
#[serde(transparent)]
|
||||
#[must_use]
|
||||
pub struct $name(
|
||||
#[serde(with = "as_human_string")]
|
||||
#[schemars(with = "String")]
|
||||
@ -49,6 +50,7 @@ macro_rules! aligned_u64_type {
|
||||
pub const fn new(v: u64) -> Self {
|
||||
Self(v)
|
||||
}
|
||||
#[must_use]
|
||||
pub fn as_u64(self) -> u64 {
|
||||
self.0
|
||||
}
|
||||
|
@ -3,6 +3,7 @@ use super::*;
|
||||
/// Direct statement blob passed to hosting application for processing.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
|
||||
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))]
|
||||
#[must_use]
|
||||
pub struct VeilidAppMessage {
|
||||
#[serde(with = "as_human_opt_string")]
|
||||
#[schemars(with = "Option<String>")]
|
||||
@ -43,16 +44,19 @@ impl VeilidAppMessage {
|
||||
}
|
||||
|
||||
/// Some(sender) if the message was sent directly, None if received via a private/safety route.
|
||||
#[must_use]
|
||||
pub fn sender(&self) -> Option<&TypedKey> {
|
||||
self.sender.as_ref()
|
||||
}
|
||||
|
||||
/// Some(route_id) if the message was received over a private route, None if received only a safety route or directly.
|
||||
#[must_use]
|
||||
pub fn route_id(&self) -> Option<&RouteId> {
|
||||
self.route_id.as_ref()
|
||||
}
|
||||
|
||||
/// The content of the message to deliver to the application.
|
||||
#[must_use]
|
||||
pub fn message(&self) -> &[u8] {
|
||||
&self.message
|
||||
}
|
||||
@ -61,6 +65,7 @@ impl VeilidAppMessage {
|
||||
/// Direct question blob passed to hosting application for processing to send an eventual AppReply.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
|
||||
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))]
|
||||
#[must_use]
|
||||
pub struct VeilidAppCall {
|
||||
#[serde(with = "as_human_opt_string")]
|
||||
#[schemars(with = "Option<String>")]
|
||||
@ -108,16 +113,19 @@ impl VeilidAppCall {
|
||||
}
|
||||
|
||||
/// Some(sender) if the request was sent directly, None if received via a private/safety route.
|
||||
#[must_use]
|
||||
pub fn sender(&self) -> Option<&TypedKey> {
|
||||
self.sender.as_ref()
|
||||
}
|
||||
|
||||
/// Some(route_id) if the request was received over a private route, None if received only a safety route or directly.
|
||||
#[must_use]
|
||||
pub fn route_id(&self) -> Option<&RouteId> {
|
||||
self.route_id.as_ref()
|
||||
}
|
||||
|
||||
/// The content of the request to deliver to the application.
|
||||
#[must_use]
|
||||
pub fn message(&self) -> &[u8] {
|
||||
&self.message
|
||||
}
|
||||
|
@ -7,6 +7,7 @@ use super::*;
|
||||
derive(Tsify),
|
||||
tsify(from_wasm_abi, into_wasm_abi)
|
||||
)]
|
||||
#[must_use]
|
||||
pub struct DHTRecordDescriptor {
|
||||
/// DHT Key = Hash(ownerKeyKind) of: [ ownerKeyValue, schema ]
|
||||
#[schemars(with = "String")]
|
||||
@ -46,6 +47,7 @@ impl DHTRecordDescriptor {
|
||||
&self.owner
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn owner_secret(&self) -> Option<&SecretKey> {
|
||||
self.owner_secret.as_ref()
|
||||
}
|
||||
|
@ -7,6 +7,7 @@ use super::*;
|
||||
derive(Tsify),
|
||||
tsify(from_wasm_abi, into_wasm_abi)
|
||||
)]
|
||||
#[must_use]
|
||||
pub struct DHTRecordReport {
|
||||
/// The actual subkey range within the schema being reported on
|
||||
/// This may be a subset of the requested range if it exceeds the schema limits
|
||||
@ -42,9 +43,11 @@ impl DHTRecordReport {
|
||||
pub fn offline_subkeys(&self) -> &ValueSubkeyRangeSet {
|
||||
&self.offline_subkeys
|
||||
}
|
||||
#[must_use]
|
||||
pub fn local_seqs(&self) -> &[ValueSeqNum] {
|
||||
&self.local_seqs
|
||||
}
|
||||
#[must_use]
|
||||
pub fn network_seqs(&self) -> &[ValueSeqNum] {
|
||||
&self.network_seqs
|
||||
}
|
||||
|
@ -8,6 +8,7 @@ use crate::storage_manager::{MAX_RECORD_DATA_SIZE, MAX_SUBKEY_SIZE};
|
||||
derive(Tsify),
|
||||
tsify(from_wasm_abi)
|
||||
)]
|
||||
#[must_use]
|
||||
pub struct DHTSchemaDFLT {
|
||||
/// Owner subkey count
|
||||
o_cnt: u16,
|
||||
@ -33,11 +34,13 @@ impl DHTSchemaDFLT {
|
||||
}
|
||||
|
||||
/// Get the owner subkey count
|
||||
#[must_use]
|
||||
pub fn o_cnt(&self) -> u16 {
|
||||
self.o_cnt
|
||||
}
|
||||
|
||||
/// Build the data representation of the schema
|
||||
#[must_use]
|
||||
pub fn compile(&self) -> Vec<u8> {
|
||||
let mut out = Vec::<u8>::with_capacity(Self::FIXED_SIZE);
|
||||
// kind
|
||||
@ -48,15 +51,18 @@ impl DHTSchemaDFLT {
|
||||
}
|
||||
|
||||
/// Get the maximum subkey this schema allocates
|
||||
#[must_use]
|
||||
pub fn max_subkey(&self) -> ValueSubkey {
|
||||
self.o_cnt as ValueSubkey - 1
|
||||
}
|
||||
/// Get the data size of this schema beyond the size of the structure itself
|
||||
#[must_use]
|
||||
pub fn data_size(&self) -> usize {
|
||||
0
|
||||
}
|
||||
|
||||
/// Check a subkey value data against the schema
|
||||
#[must_use]
|
||||
pub fn check_subkey_value_data(
|
||||
&self,
|
||||
owner: &PublicKey,
|
||||
@ -90,6 +96,7 @@ impl DHTSchemaDFLT {
|
||||
}
|
||||
|
||||
/// Check if a key is a schema member
|
||||
#[must_use]
|
||||
pub fn is_member(&self, _key: &PublicKey) -> bool {
|
||||
false
|
||||
}
|
||||
|
@ -14,6 +14,7 @@ pub use smpl::*;
|
||||
derive(Tsify),
|
||||
tsify(from_wasm_abi)
|
||||
)]
|
||||
#[must_use]
|
||||
pub enum DHTSchema {
|
||||
DFLT(DHTSchemaDFLT),
|
||||
SMPL(DHTSchemaSMPL),
|
||||
@ -36,6 +37,7 @@ impl DHTSchema {
|
||||
}
|
||||
|
||||
/// Build the data representation of the schema
|
||||
#[must_use]
|
||||
pub fn compile(&self) -> Vec<u8> {
|
||||
match self {
|
||||
DHTSchema::DFLT(d) => d.compile(),
|
||||
@ -44,6 +46,7 @@ impl DHTSchema {
|
||||
}
|
||||
|
||||
/// Get maximum subkey number for this schema
|
||||
#[must_use]
|
||||
pub fn max_subkey(&self) -> ValueSubkey {
|
||||
match self {
|
||||
DHTSchema::DFLT(d) => d.max_subkey(),
|
||||
@ -52,6 +55,7 @@ impl DHTSchema {
|
||||
}
|
||||
|
||||
/// Get the data size of this schema beyond the size of the structure itself
|
||||
#[must_use]
|
||||
pub fn data_size(&self) -> usize {
|
||||
match self {
|
||||
DHTSchema::DFLT(d) => d.data_size(),
|
||||
@ -60,6 +64,7 @@ impl DHTSchema {
|
||||
}
|
||||
|
||||
/// Check a subkey value data against the schema
|
||||
#[must_use]
|
||||
pub fn check_subkey_value_data(
|
||||
&self,
|
||||
owner: &PublicKey,
|
||||
@ -73,6 +78,7 @@ impl DHTSchema {
|
||||
}
|
||||
|
||||
/// Check if a key is a schema member
|
||||
#[must_use]
|
||||
pub fn is_member(&self, key: &PublicKey) -> bool {
|
||||
match self {
|
||||
DHTSchema::DFLT(d) => d.is_member(key),
|
||||
|
@ -8,6 +8,7 @@ use crate::storage_manager::{MAX_RECORD_DATA_SIZE, MAX_SUBKEY_SIZE};
|
||||
derive(Tsify),
|
||||
tsify(from_wasm_abi)
|
||||
)]
|
||||
#[must_use]
|
||||
pub struct DHTSchemaSMPLMember {
|
||||
/// Member key
|
||||
#[schemars(with = "String")]
|
||||
@ -23,6 +24,7 @@ pub struct DHTSchemaSMPLMember {
|
||||
derive(Tsify),
|
||||
tsify(from_wasm_abi)
|
||||
)]
|
||||
#[must_use]
|
||||
pub struct DHTSchemaSMPL {
|
||||
/// Owner subkey count
|
||||
o_cnt: u16,
|
||||
@ -58,6 +60,7 @@ impl DHTSchemaSMPL {
|
||||
}
|
||||
|
||||
/// Get the owner subkey count
|
||||
#[must_use]
|
||||
pub fn o_cnt(&self) -> u16 {
|
||||
self.o_cnt
|
||||
}
|
||||
@ -68,6 +71,7 @@ impl DHTSchemaSMPL {
|
||||
}
|
||||
|
||||
/// Build the data representation of the schema
|
||||
#[must_use]
|
||||
pub fn compile(&self) -> Vec<u8> {
|
||||
let mut out = Vec::<u8>::with_capacity(
|
||||
Self::FIXED_SIZE + (self.members.len() * (PUBLIC_KEY_LENGTH + 2)),
|
||||
@ -87,6 +91,7 @@ impl DHTSchemaSMPL {
|
||||
}
|
||||
|
||||
/// Get the maximum subkey this schema allocates
|
||||
#[must_use]
|
||||
pub fn max_subkey(&self) -> ValueSubkey {
|
||||
let subkey_count = self
|
||||
.members
|
||||
@ -96,11 +101,13 @@ impl DHTSchemaSMPL {
|
||||
}
|
||||
|
||||
/// Get the data size of this schema beyond the size of the structure itself
|
||||
#[must_use]
|
||||
pub fn data_size(&self) -> usize {
|
||||
self.members.len() * mem::size_of::<DHTSchemaSMPLMember>()
|
||||
}
|
||||
|
||||
/// Check a subkey value data against the schema
|
||||
#[must_use]
|
||||
pub fn check_subkey_value_data(
|
||||
&self,
|
||||
owner: &PublicKey,
|
||||
@ -156,6 +163,7 @@ impl DHTSchemaSMPL {
|
||||
}
|
||||
|
||||
/// Check if a key is a schema member
|
||||
#[must_use]
|
||||
pub fn is_member(&self, key: &PublicKey) -> bool {
|
||||
for m in &self.members {
|
||||
if m.m_key == *key {
|
||||
|
@ -7,6 +7,7 @@ use veilid_api::VeilidAPIResult;
|
||||
derive(Tsify),
|
||||
tsify(into_wasm_abi)
|
||||
)]
|
||||
#[must_use]
|
||||
pub struct ValueData {
|
||||
/// An increasing sequence number to time-order the DHT record changes
|
||||
seq: ValueSeqNum,
|
||||
@ -54,6 +55,7 @@ impl ValueData {
|
||||
Ok(Self { seq, data, writer })
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn seq(&self) -> ValueSeqNum {
|
||||
self.seq
|
||||
}
|
||||
@ -62,14 +64,17 @@ impl ValueData {
|
||||
&self.writer
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn data(&self) -> &[u8] {
|
||||
&self.data
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn data_size(&self) -> usize {
|
||||
self.data.len()
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn total_size(&self) -> usize {
|
||||
mem::size_of::<Self>() + self.data.len()
|
||||
}
|
||||
|
@ -11,6 +11,7 @@ use range_set_blaze::*;
|
||||
tsify(from_wasm_abi, into_wasm_abi)
|
||||
)]
|
||||
#[serde(transparent)]
|
||||
#[must_use]
|
||||
pub struct ValueSubkeyRangeSet {
|
||||
#[serde(with = "serialize_range_set_blaze")]
|
||||
#[schemars(with = "Vec<(u32,u32)>")]
|
||||
@ -52,13 +53,16 @@ impl ValueSubkeyRangeSet {
|
||||
Self::new_with_data(&self.data | &other.data)
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn data(&self) -> &RangeSetBlaze<ValueSubkey> {
|
||||
&self.data
|
||||
}
|
||||
#[must_use]
|
||||
pub fn into_data(self) -> RangeSetBlaze<ValueSubkey> {
|
||||
self.data
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn nth_subkey(&self, idx: usize) -> Option<ValueSubkey> {
|
||||
let mut idxleft = idx;
|
||||
for range in self.data.ranges() {
|
||||
@ -71,6 +75,7 @@ impl ValueSubkeyRangeSet {
|
||||
None
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn idx_of_subkey(&self, subkey: ValueSubkey) -> Option<usize> {
|
||||
let mut idx = 0usize;
|
||||
for range in self.data.ranges() {
|
||||
|
@ -6,6 +6,7 @@ use super::*;
|
||||
)]
|
||||
#[serde(try_from = "String")]
|
||||
#[serde(into = "String")]
|
||||
#[must_use]
|
||||
pub struct FourCC(pub [u8; 4]);
|
||||
|
||||
impl From<[u8; 4]> for FourCC {
|
||||
|
@ -9,6 +9,7 @@ use super::*;
|
||||
derive(Tsify),
|
||||
tsify(from_wasm_abi, into_wasm_abi, namespace)
|
||||
)]
|
||||
#[must_use]
|
||||
pub enum Sequencing {
|
||||
NoPreference = 0,
|
||||
PreferOrdered = 1,
|
||||
@ -31,6 +32,7 @@ impl Default for Sequencing {
|
||||
derive(Tsify),
|
||||
tsify(from_wasm_abi, into_wasm_abi, namespace)
|
||||
)]
|
||||
#[must_use]
|
||||
pub enum Stability {
|
||||
LowLatency = 0,
|
||||
Reliable = 1,
|
||||
@ -52,7 +54,7 @@ impl Default for Stability {
|
||||
derive(Tsify),
|
||||
tsify(from_wasm_abi, into_wasm_abi, namespace)
|
||||
)]
|
||||
|
||||
#[must_use]
|
||||
pub enum SafetySelection {
|
||||
/// Don't use a safety route, only specify the sequencing preference.
|
||||
Unsafe(Sequencing),
|
||||
@ -80,6 +82,7 @@ impl Default for SafetySelection {
|
||||
Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, JsonSchema,
|
||||
)]
|
||||
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))]
|
||||
#[must_use]
|
||||
pub struct SafetySpec {
|
||||
/// Preferred safety route set id if it still exists.
|
||||
#[schemars(with = "Option<String>")]
|
||||
|
@ -9,6 +9,7 @@ use super::*;
|
||||
derive(Tsify),
|
||||
tsify(namespace)
|
||||
)]
|
||||
#[must_use]
|
||||
pub enum VeilidLogLevel {
|
||||
Error = 1,
|
||||
Warn = 2,
|
||||
@ -36,6 +37,7 @@ impl VeilidLogLevel {
|
||||
log::Level::Trace => VeilidLogLevel::Trace,
|
||||
}
|
||||
}
|
||||
#[must_use]
|
||||
pub fn to_tracing_level(&self) -> tracing::Level {
|
||||
match self {
|
||||
Self::Error => tracing::Level::ERROR,
|
||||
@ -45,6 +47,7 @@ impl VeilidLogLevel {
|
||||
Self::Trace => tracing::Level::TRACE,
|
||||
}
|
||||
}
|
||||
#[must_use]
|
||||
pub fn to_log_level(&self) -> log::Level {
|
||||
match self {
|
||||
Self::Error => log::Level::Error,
|
||||
|
@ -7,6 +7,7 @@ use super::*;
|
||||
derive(Tsify),
|
||||
tsify(namespace, from_wasm_abi, into_wasm_abi)
|
||||
)]
|
||||
#[must_use]
|
||||
pub enum AttachmentState {
|
||||
Detached = 0,
|
||||
Attaching = 1,
|
||||
@ -18,9 +19,11 @@ pub enum AttachmentState {
|
||||
Detaching = 7,
|
||||
}
|
||||
impl AttachmentState {
|
||||
#[must_use]
|
||||
pub fn is_detached(&self) -> bool {
|
||||
matches!(self, Self::Detached)
|
||||
}
|
||||
#[must_use]
|
||||
pub fn is_attached(&self) -> bool {
|
||||
matches!(
|
||||
self,
|
||||
@ -78,6 +81,7 @@ impl TryFrom<&str> for AttachmentState {
|
||||
/// Describe the attachment state of the Veilid node
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
|
||||
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))]
|
||||
#[must_use]
|
||||
pub struct VeilidStateAttachment {
|
||||
/// The overall quality of the routing table if attached, or the current state the attachment state machine.
|
||||
pub state: AttachmentState,
|
||||
@ -95,6 +99,7 @@ pub struct VeilidStateAttachment {
|
||||
/// Describe a recently accessed peer
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
|
||||
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))]
|
||||
#[must_use]
|
||||
pub struct PeerTableData {
|
||||
/// The node ids used by this peer
|
||||
#[schemars(with = "Vec<String>")]
|
||||
@ -112,6 +117,7 @@ pub struct PeerTableData {
|
||||
/// Describe the current network state of the Veilid node
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
|
||||
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))]
|
||||
#[must_use]
|
||||
pub struct VeilidStateNetwork {
|
||||
/// If the network has been started or not.
|
||||
pub started: bool,
|
||||
@ -127,6 +133,7 @@ pub struct VeilidStateNetwork {
|
||||
/// Describe a private route change that has happened
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
|
||||
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))]
|
||||
#[must_use]
|
||||
pub struct VeilidRouteChange {
|
||||
/// If a private route that was allocated has died, it is listed here.
|
||||
#[schemars(with = "Vec<String>")]
|
||||
@ -142,6 +149,7 @@ pub struct VeilidRouteChange {
|
||||
/// itself during runtime.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
|
||||
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))]
|
||||
#[must_use]
|
||||
pub struct VeilidStateConfig {
|
||||
/// If the Veilid node configuration has changed the full new config will be here.
|
||||
pub config: VeilidConfigInner,
|
||||
@ -150,6 +158,7 @@ pub struct VeilidStateConfig {
|
||||
/// Describe when DHT records have subkey values changed
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
|
||||
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))]
|
||||
#[must_use]
|
||||
pub struct VeilidValueChange {
|
||||
/// The DHT Record key that changed
|
||||
#[schemars(with = "String")]
|
||||
@ -176,6 +185,7 @@ pub struct VeilidValueChange {
|
||||
tsify(into_wasm_abi)
|
||||
)]
|
||||
#[serde(tag = "kind")]
|
||||
#[must_use]
|
||||
pub enum VeilidUpdate {
|
||||
Log(Box<VeilidLog>),
|
||||
AppMessage(Box<VeilidAppMessage>),
|
||||
@ -196,6 +206,7 @@ from_impl_to_jsvalue!(VeilidUpdate);
|
||||
derive(Tsify),
|
||||
tsify(into_wasm_abi)
|
||||
)]
|
||||
#[must_use]
|
||||
pub struct VeilidState {
|
||||
pub attachment: Box<VeilidStateAttachment>,
|
||||
pub network: Box<VeilidStateNetwork>,
|
||||
|
@ -32,6 +32,7 @@ pub type ConfigCallback = Arc<dyn Fn(String) -> ConfigCallbackReturn + Send + Sy
|
||||
///
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
|
||||
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))]
|
||||
#[must_use]
|
||||
pub struct VeilidConfigHTTPS {
|
||||
pub enabled: bool,
|
||||
pub listen_address: String,
|
||||
@ -63,6 +64,7 @@ impl Default for VeilidConfigHTTPS {
|
||||
///
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
|
||||
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))]
|
||||
#[must_use]
|
||||
pub struct VeilidConfigHTTP {
|
||||
pub enabled: bool,
|
||||
pub listen_address: String,
|
||||
@ -90,6 +92,7 @@ impl Default for VeilidConfigHTTP {
|
||||
///
|
||||
#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
|
||||
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))]
|
||||
#[must_use]
|
||||
pub struct VeilidConfigApplication {
|
||||
pub https: VeilidConfigHTTPS,
|
||||
pub http: VeilidConfigHTTP,
|
||||
@ -107,6 +110,7 @@ pub struct VeilidConfigApplication {
|
||||
///
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
|
||||
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))]
|
||||
#[must_use]
|
||||
pub struct VeilidConfigUDP {
|
||||
pub enabled: bool,
|
||||
pub socket_pool_size: u32,
|
||||
@ -145,6 +149,7 @@ impl Default for VeilidConfigUDP {
|
||||
///
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
|
||||
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))]
|
||||
#[must_use]
|
||||
pub struct VeilidConfigTCP {
|
||||
pub connect: bool,
|
||||
pub listen: bool,
|
||||
@ -188,6 +193,7 @@ impl Default for VeilidConfigTCP {
|
||||
///
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
|
||||
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))]
|
||||
#[must_use]
|
||||
pub struct VeilidConfigWS {
|
||||
pub connect: bool,
|
||||
pub listen: bool,
|
||||
@ -233,6 +239,7 @@ impl Default for VeilidConfigWS {
|
||||
///
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
|
||||
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))]
|
||||
#[must_use]
|
||||
pub struct VeilidConfigWSS {
|
||||
pub connect: bool,
|
||||
pub listen: bool,
|
||||
@ -265,6 +272,7 @@ impl Default for VeilidConfigWSS {
|
||||
///
|
||||
#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
|
||||
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))]
|
||||
#[must_use]
|
||||
pub struct VeilidConfigProtocol {
|
||||
pub udp: VeilidConfigUDP,
|
||||
pub tcp: VeilidConfigTCP,
|
||||
@ -281,6 +289,7 @@ pub struct VeilidConfigProtocol {
|
||||
#[cfg(feature = "geolocation")]
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
|
||||
#[cfg_attr(target_arch = "wasm32", derive(Tsify))]
|
||||
#[must_use]
|
||||
pub struct VeilidConfigPrivacy {
|
||||
pub country_code_denylist: Vec<CountryCode>,
|
||||
}
|
||||
@ -304,6 +313,7 @@ impl Default for VeilidConfigPrivacy {
|
||||
#[cfg(feature = "virtual-network")]
|
||||
#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
|
||||
#[cfg_attr(target_arch = "wasm32", derive(Tsify))]
|
||||
#[must_use]
|
||||
pub struct VeilidConfigVirtualNetwork {
|
||||
pub enabled: bool,
|
||||
pub server_address: String,
|
||||
@ -319,6 +329,7 @@ pub struct VeilidConfigVirtualNetwork {
|
||||
///
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
|
||||
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))]
|
||||
#[must_use]
|
||||
pub struct VeilidConfigTLS {
|
||||
pub certificate_path: String,
|
||||
pub private_key_path: String,
|
||||
@ -339,6 +350,7 @@ impl Default for VeilidConfigTLS {
|
||||
all(target_arch = "wasm32", target_os = "unknown"),
|
||||
allow(unused_variables)
|
||||
)]
|
||||
#[must_use]
|
||||
pub fn get_default_ssl_directory(
|
||||
program_name: &str,
|
||||
organization: &str,
|
||||
@ -365,6 +377,7 @@ pub fn get_default_ssl_directory(
|
||||
/// for correct DHT operations.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
|
||||
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))]
|
||||
#[must_use]
|
||||
pub struct VeilidConfigDHT {
|
||||
pub max_find_node_count: u32,
|
||||
pub resolve_node_timeout_ms: u32,
|
||||
@ -449,6 +462,7 @@ impl Default for VeilidConfigDHT {
|
||||
///
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
|
||||
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))]
|
||||
#[must_use]
|
||||
pub struct VeilidConfigRPC {
|
||||
pub concurrency: u32,
|
||||
pub queue_size: u32,
|
||||
@ -479,6 +493,7 @@ impl Default for VeilidConfigRPC {
|
||||
///
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
|
||||
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))]
|
||||
#[must_use]
|
||||
pub struct VeilidConfigRoutingTable {
|
||||
#[schemars(with = "Vec<String>")]
|
||||
pub node_id: TypedKeyGroup,
|
||||
@ -519,6 +534,7 @@ impl Default for VeilidConfigRoutingTable {
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
|
||||
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))]
|
||||
#[must_use]
|
||||
pub struct VeilidConfigNetwork {
|
||||
pub connection_initial_timeout_ms: u32,
|
||||
pub connection_inactivity_timeout_ms: u32,
|
||||
@ -578,6 +594,7 @@ impl Default for VeilidConfigNetwork {
|
||||
|
||||
#[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
|
||||
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))]
|
||||
#[must_use]
|
||||
pub struct VeilidConfigTableStore {
|
||||
pub directory: String,
|
||||
pub delete: bool,
|
||||
@ -587,6 +604,7 @@ pub struct VeilidConfigTableStore {
|
||||
all(target_arch = "wasm32", target_os = "unknown"),
|
||||
allow(unused_variables)
|
||||
)]
|
||||
#[must_use]
|
||||
fn get_default_store_path(
|
||||
program_name: &str,
|
||||
organization: &str,
|
||||
@ -610,6 +628,7 @@ fn get_default_store_path(
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
|
||||
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))]
|
||||
#[must_use]
|
||||
pub struct VeilidConfigBlockStore {
|
||||
pub directory: String,
|
||||
pub delete: bool,
|
||||
@ -626,6 +645,7 @@ impl Default for VeilidConfigBlockStore {
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
|
||||
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))]
|
||||
#[must_use]
|
||||
pub struct VeilidConfigProtectedStore {
|
||||
pub allow_insecure_fallback: bool,
|
||||
pub always_use_insecure_storage: bool,
|
||||
@ -651,6 +671,7 @@ impl Default for VeilidConfigProtectedStore {
|
||||
|
||||
#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
|
||||
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))]
|
||||
#[must_use]
|
||||
pub struct VeilidConfigCapabilities {
|
||||
pub disable: Vec<FourCC>,
|
||||
}
|
||||
@ -661,6 +682,7 @@ pub struct VeilidConfigCapabilities {
|
||||
all(target_arch = "wasm32", target_os = "unknown"),
|
||||
tsify(namespace, from_wasm_abi)
|
||||
)]
|
||||
#[must_use]
|
||||
pub enum VeilidConfigLogLevel {
|
||||
Off,
|
||||
Error,
|
||||
@ -671,6 +693,7 @@ pub enum VeilidConfigLogLevel {
|
||||
}
|
||||
|
||||
impl VeilidConfigLogLevel {
|
||||
#[must_use]
|
||||
pub fn to_veilid_log_level(&self) -> Option<VeilidLogLevel> {
|
||||
match self {
|
||||
Self::Off => None,
|
||||
@ -681,6 +704,7 @@ impl VeilidConfigLogLevel {
|
||||
Self::Trace => Some(VeilidLogLevel::Trace),
|
||||
}
|
||||
}
|
||||
#[must_use]
|
||||
pub fn to_tracing_level_filter(&self) -> level_filters::LevelFilter {
|
||||
match self {
|
||||
Self::Off => level_filters::LevelFilter::OFF,
|
||||
@ -750,6 +774,7 @@ impl fmt::Display for VeilidConfigLogLevel {
|
||||
/// Top level of the Veilid configuration tree
|
||||
#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema)]
|
||||
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))]
|
||||
#[must_use]
|
||||
pub struct VeilidConfigInner {
|
||||
/// An identifier used to describe the program using veilid-core.
|
||||
/// Used to partition storage locations in places like the ProtectedStore.
|
||||
@ -857,6 +882,7 @@ impl VeilidConfigInner {
|
||||
|
||||
/// The configuration built for each Veilid node during API startup
|
||||
#[derive(Clone)]
|
||||
#[must_use]
|
||||
pub struct VeilidConfig {
|
||||
update_cb: UpdateCallback,
|
||||
inner: Arc<RwLock<VeilidConfigInner>>,
|
||||
@ -879,6 +905,25 @@ impl VeilidConfig {
|
||||
}
|
||||
}
|
||||
|
||||
fn get_config_key<T: 'static>(
|
||||
inner_field: &mut T,
|
||||
keyname: &str,
|
||||
cb: ConfigCallback,
|
||||
) -> VeilidAPIResult<()> {
|
||||
let v = cb(keyname.to_owned())?;
|
||||
*inner_field = match v.downcast() {
|
||||
Ok(v) => *v,
|
||||
Err(e) => {
|
||||
apibail_generic!(format!(
|
||||
"incorrect type for key {}: {:?}",
|
||||
keyname,
|
||||
type_name_of_val(&*e)
|
||||
))
|
||||
}
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn new_from_callback(
|
||||
cb: ConfigCallback,
|
||||
update_cb: UpdateCallback,
|
||||
@ -888,18 +933,7 @@ impl VeilidConfig {
|
||||
// Simple config transformation
|
||||
macro_rules! get_config {
|
||||
($key:expr) => {
|
||||
let keyname = &stringify!($key)[6..];
|
||||
let v = cb(keyname.to_owned())?;
|
||||
$key = match v.downcast() {
|
||||
Ok(v) => *v,
|
||||
Err(e) => {
|
||||
apibail_generic!(format!(
|
||||
"incorrect type for key {}: {:?}",
|
||||
keyname,
|
||||
type_name_of_val(&*e)
|
||||
))
|
||||
}
|
||||
};
|
||||
Self::get_config_key(&mut $key, &stringify!($key)[6..], cb.clone())?;
|
||||
};
|
||||
}
|
||||
|
||||
@ -1019,6 +1053,7 @@ impl VeilidConfig {
|
||||
})
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn update_callback(&self) -> UpdateCallback {
|
||||
self.update_cb.clone()
|
||||
}
|
||||
@ -1298,6 +1333,7 @@ impl VeilidConfig {
|
||||
}
|
||||
|
||||
/// Return the default veilid config as a json object.
|
||||
#[must_use]
|
||||
pub fn default_veilid_config() -> String {
|
||||
serialize_json(VeilidConfigInner::default())
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user