This commit is contained in:
John Smith 2022-11-26 21:37:23 -05:00
parent 5df46aecae
commit b1bdf76ae8
80 changed files with 865 additions and 700 deletions

3
.gitmodules vendored
View File

@ -16,9 +16,6 @@
[submodule "external/netlink"]
path = external/netlink
url = ../netlink.git
[submodule "external/no-std-net"]
path = external/no-std-net
url = ../no-std-net.git
[submodule "external/libmdns"]
path = external/libmdns
url = ../libmdns.git

View File

@ -1,6 +1,7 @@
[workspace]
members = [
"veilid-tools",
"veilid-core",
"veilid-server",
"veilid-cli",

View File

@ -81,7 +81,6 @@ core:
min_peer_refresh_time_ms: 2000
validate_dial_info_receipt_time_ms: 2000
upnp: true
natpmp: false
detect_address_changes: true
enable_local_peer_scope: false
restricted_nat_retries: 0

View File

@ -193,7 +193,6 @@ network:
bootstrap: ['bootstrap.dev.veilid.net']
bootstrap_nodes: []
upnp: true
natpmp: false
detect_address_changes: true
enable_local_peer_scope: false
restricted_nat_retries: 0

View File

@ -229,8 +229,8 @@ impl ClientApiConnection {
// Wait until rpc system completion or disconnect was requested
let res = rpc_jh.await;
#[cfg(feature = "rt-tokio")]
let res = res.map_err(|e| format!("join error: {}", e))?;
// #[cfg(feature = "rt-tokio")]
// let res = res.map_err(|e| format!("join error: {}", e))?;
res.map_err(|e| format!("client RPC system error: {}", e))
}

View File

@ -7,7 +7,7 @@ use std::cell::*;
use std::net::SocketAddr;
use std::rc::Rc;
use std::time::{Duration, SystemTime};
use veilid_core::xx::{Eventual, EventualCommon};
use veilid_core::xx::*;
use veilid_core::*;
pub fn convert_loglevel(s: &str) -> Result<VeilidConfigLogLevel, String> {

View File

@ -8,7 +8,6 @@ use flexi_logger::*;
use std::ffi::OsStr;
use std::net::ToSocketAddrs;
use std::path::Path;
use tools::*;
mod client_api_connection;
mod command_processor;

View File

@ -6,12 +6,7 @@ cfg_if! {
pub use async_std::task::JoinHandle;
pub use async_std::net::TcpStream;
pub use async_std::future::TimeoutError;
pub fn spawn_local<F: Future<Output = T> + 'static, T: 'static>(f: F) -> JoinHandle<T> {
async_std::task::spawn_local(f)
}
pub fn spawn_detached_local<F: Future<Output = T> + 'static, T: 'static>(f: F) {
let _ = async_std::task::spawn_local(f);
}
pub use async_std::task::sleep;
pub use async_std::future::timeout;
pub fn block_on<F: Future<Output = T>, T>(f: F) -> T {
@ -21,12 +16,7 @@ cfg_if! {
pub use tokio::task::JoinHandle;
pub use tokio::net::TcpStream;
pub use tokio::time::error::Elapsed as TimeoutError;
pub fn spawn_local<F: Future<Output = T> + 'static, T: 'static>(f: F) -> JoinHandle<T> {
tokio::task::spawn_local(f)
}
pub fn spawn_detached_local<F: Future<Output = T> + 'static, T: 'static>(f: F) {
let _ = tokio::task::spawn_local(f);
}
pub use tokio::time::sleep;
pub use tokio::time::timeout;
pub fn block_on<F: Future<Output = T>, T>(f: F) -> T {

View File

@ -11,14 +11,15 @@ crate-type = ["cdylib", "staticlib", "rlib"]
[features]
default = []
rt-async-std = [ "async-std", "async-std-resolver", "async_executors/async_std", "rtnetlink?/smol_socket" ]
rt-tokio = [ "tokio", "tokio-util", "tokio-stream", "trust-dns-resolver/tokio-runtime", "async_executors/tokio_tp", "async_executors/tokio_io", "async_executors/tokio_timer", "rtnetlink?/tokio_socket" ]
rt-async-std = [ "async-std", "async-std-resolver", "async_executors/async_std", "rtnetlink?/smol_socket", "veilid-tools/rt-async-std" ]
rt-tokio = [ "tokio", "tokio-util", "tokio-stream", "trust-dns-resolver/tokio-runtime", "async_executors/tokio_tp", "async_executors/tokio_io", "async_executors/tokio_timer", "rtnetlink?/tokio_socket", "veilid-tools/rt-tokio" ]
android_tests = []
ios_tests = [ "simplelog" ]
tracking = []
[dependencies]
veilid_tools = { path = "../veilid-tools", features = "tracing" }
tracing = { version = "^0", features = ["log", "attributes"] }
tracing-subscriber = "^0"
tracing-error = "^0"

View File

@ -254,7 +254,7 @@ impl AttachmentManager {
#[instrument(level = "debug", skip(self))]
async fn attachment_maintainer(self) {
debug!("attachment starting");
self.inner.lock().attach_timestamp = Some(intf::get_timestamp());
self.inner.lock().attach_timestamp = Some(get_timestamp());
let netman = self.network_manager();
let mut restart;
@ -286,7 +286,7 @@ impl AttachmentManager {
self.update_attachment().await;
// sleep should be at the end in case maintain_peers changes state
intf::sleep(1000).await;
sleep(1000).await;
}
debug!("stopped maintaining peers");
@ -299,7 +299,7 @@ impl AttachmentManager {
debug!("completely restarting attachment");
// chill out for a second first, give network stack time to settle out
intf::sleep(1000).await;
sleep(1000).await;
}
trace!("stopping attachment");
@ -348,7 +348,7 @@ impl AttachmentManager {
return;
}
inner.maintain_peers = true;
inner.attachment_maintainer_jh = Some(intf::spawn(self.clone().attachment_maintainer()));
inner.attachment_maintainer_jh = Some(spawn(self.clone().attachment_maintainer()));
}
#[instrument(level = "trace", skip(self))]

View File

@ -1,4 +1,3 @@
use crate::veilid_rng::*;
use crate::xx::*;
use crate::*;

View File

@ -137,7 +137,7 @@ impl Crypto {
// Schedule flushing
let this = self.clone();
let flush_future = intf::interval(60000, move || {
let flush_future = interval(60000, move || {
let this = this.clone();
async move {
if let Err(e) = this.flush().await {
@ -229,13 +229,13 @@ impl Crypto {
pub fn get_random_nonce() -> Nonce {
let mut nonce = [0u8; 24];
intf::random_bytes(&mut nonce).unwrap();
random_bytes(&mut nonce).unwrap();
nonce
}
pub fn get_random_secret() -> SharedSecret {
let mut s = [0u8; 32];
intf::random_bytes(&mut s).unwrap();
random_bytes(&mut s).unwrap();
s
}

View File

@ -1,201 +1,10 @@
#![allow(dead_code)]
use crate::xx::*;
use rand::prelude::*;
use std::time::{Duration, SystemTime, UNIX_EPOCH};
pub fn get_timestamp() -> u64 {
match SystemTime::now().duration_since(UNIX_EPOCH) {
Ok(n) => n.as_micros() as u64,
Err(_) => panic!("SystemTime before UNIX_EPOCH!"),
}
}
// pub fn get_timestamp_string() -> String {
// let dt = chrono::Utc::now();
// dt.time().format("%H:%M:%S.3f").to_string()
// }
pub fn random_bytes(dest: &mut [u8]) -> EyreResult<()> {
let mut rng = rand::thread_rng();
rng.try_fill_bytes(dest).wrap_err("failed to fill bytes")
}
pub fn get_random_u32() -> u32 {
let mut rng = rand::thread_rng();
rng.next_u32()
}
pub fn get_random_u64() -> u64 {
let mut rng = rand::thread_rng();
rng.next_u64()
}
pub async fn sleep(millis: u32) {
if millis == 0 {
cfg_if! {
if #[cfg(feature="rt-async-std")] {
async_std::task::yield_now().await;
} else if #[cfg(feature="rt-tokio")] {
tokio::task::yield_now().await;
}
}
} else {
cfg_if! {
if #[cfg(feature="rt-async-std")] {
async_std::task::sleep(Duration::from_millis(u64::from(millis))).await;
} else if #[cfg(feature="rt-tokio")] {
tokio::time::sleep(Duration::from_millis(u64::from(millis))).await;
}
}
}
}
pub fn system_boxed<'a, Out>(
future: impl Future<Output = Out> + Send + 'a,
) -> SendPinBoxFutureLifetime<'a, Out> {
Box::pin(future)
}
pub fn spawn<Out>(future: impl Future<Output = Out> + Send + 'static) -> MustJoinHandle<Out>
where
Out: Send + 'static,
{
cfg_if! {
if #[cfg(feature="rt-async-std")] {
MustJoinHandle::new(async_std::task::spawn(future))
} else if #[cfg(feature="rt-tokio")] {
MustJoinHandle::new(tokio::task::spawn(future))
}
}
}
pub fn spawn_local<Out>(future: impl Future<Output = Out> + 'static) -> MustJoinHandle<Out>
where
Out: 'static,
{
cfg_if! {
if #[cfg(feature="rt-async-std")] {
MustJoinHandle::new(async_std::task::spawn_local(future))
} else if #[cfg(feature="rt-tokio")] {
MustJoinHandle::new(tokio::task::spawn_local(future))
}
}
}
// pub fn spawn_with_local_set<Out>(
// future: impl Future<Output = Out> + Send + 'static,
// ) -> MustJoinHandle<Out>
// where
// Out: Send + 'static,
// {
// cfg_if! {
// if #[cfg(feature="rt-async-std")] {
// spawn(future)
// } else if #[cfg(feature="rt-tokio")] {
// MustJoinHandle::new(tokio::task::spawn_blocking(move || {
// let rt = tokio::runtime::Handle::current();
// rt.block_on(async {
// let local = tokio::task::LocalSet::new();
// local.run_until(future).await
// })
// }))
// }
// }
// }
pub fn spawn_detached<Out>(future: impl Future<Output = Out> + Send + 'static)
where
Out: Send + 'static,
{
cfg_if! {
if #[cfg(feature="rt-async-std")] {
drop(async_std::task::spawn(future));
} else if #[cfg(feature="rt-tokio")] {
drop(tokio::task::spawn(future));
}
}
}
pub fn interval<F, FUT>(freq_ms: u32, callback: F) -> SendPinBoxFuture<()>
where
F: Fn() -> FUT + Send + Sync + 'static,
FUT: Future<Output = ()> + Send,
{
let e = Eventual::new();
let ie = e.clone();
let jh = spawn(async move {
while timeout(freq_ms, ie.instance_clone(())).await.is_err() {
callback().await;
}
});
Box::pin(async move {
e.resolve().await;
jh.await;
})
}
pub async fn timeout<F, T>(dur_ms: u32, f: F) -> Result<T, TimeoutError>
where
F: Future<Output = T>,
{
cfg_if! {
if #[cfg(feature="rt-async-std")] {
async_std::future::timeout(Duration::from_millis(dur_ms as u64), f).await.map_err(|e| e.into())
} else if #[cfg(feature="rt-tokio")] {
tokio::time::timeout(Duration::from_millis(dur_ms as u64), f).await.map_err(|e| e.into())
}
}
}
pub async fn blocking_wrapper<F, R>(blocking_task: F, err_result: R) -> R
where
F: FnOnce() -> R + Send + 'static,
R: Send + 'static,
{
// run blocking stuff in blocking thread
cfg_if! {
if #[cfg(feature="rt-async-std")] {
async_std::task::spawn_blocking(blocking_task).await
} else if #[cfg(feature="rt-tokio")] {
tokio::task::spawn_blocking(blocking_task).await.unwrap_or(err_result)
} else {
#[compile_error("must use an executor")]
}
}
}
pub fn get_concurrency() -> u32 {
std::thread::available_parallelism()
.map(|x| x.get())
.unwrap_or_else(|e| {
warn!("unable to get concurrency defaulting to single core: {}", e);
1
}) as u32
}
pub async fn get_outbound_relay_peer() -> Option<crate::veilid_api::PeerInfo> {
panic!("Native Veilid should never require an outbound relay");
}
/*
pub fn async_callback<F, OF, EF, T, E>(fut: F, ok_fn: OF, err_fn: EF)
where
F: Future<Output = Result<T, E>> + Send + 'static,
OF: FnOnce(T) + Send + 'static,
EF: FnOnce(E) + Send + 'static,
{
spawn(Box::pin(async move {
match fut.await {
Ok(v) => ok_fn(v),
Err(e) => err_fn(e),
};
}));
}
*/
/////////////////////////////////////////////////////////////////////////////////
// Resolver
//

View File

@ -322,7 +322,7 @@ impl PlatformSupportNetlink {
.wrap_err("failed to create rtnetlink socket")?;
// Spawn a connection handler
let connection_jh = intf::spawn(connection);
let connection_jh = spawn(connection);
// Save the connection
self.connection_jh = Some(connection_jh);

View File

@ -1,159 +1,8 @@
use super::utils;
use crate::xx::*;
use crate::*;
use async_executors::{Bindgen, LocalSpawnHandleExt, SpawnHandleExt, Timer};
use futures_util::future::{select, Either};
use js_sys::*;
//use wasm_bindgen_futures::*;
//use web_sys::*;
#[wasm_bindgen]
extern "C" {
#[wasm_bindgen(catch, structural, js_namespace = global, js_name = setTimeout)]
fn nodejs_global_set_timeout_with_callback_and_timeout_and_arguments_0(
handler: &::js_sys::Function,
timeout: u32,
) -> Result<JsValue, JsValue>;
}
pub fn get_timestamp() -> u64 {
if utils::is_browser() {
return (Date::now() * 1000.0f64) as u64;
} else {
panic!("WASM requires browser environment");
}
}
// pub fn get_timestamp_string() -> String {
// let date = Date::new_0();
// let hours = Date::get_utc_hours(&date);
// let minutes = Date::get_utc_minutes(&date);
// let seconds = Date::get_utc_seconds(&date);
// let milliseconds = Date::get_utc_milliseconds(&date);
// format!(
// "{:02}:{:02}:{:02}.{}",
// hours, minutes, seconds, milliseconds
// )
// }
pub fn random_bytes(dest: &mut [u8]) -> EyreResult<()> {
let len = dest.len();
let u32len = len / 4;
let remlen = len % 4;
for n in 0..u32len {
let r = (Math::random() * (u32::max_value() as f64)) as u32;
dest[n * 4 + 0] = (r & 0xFF) as u8;
dest[n * 4 + 1] = ((r >> 8) & 0xFF) as u8;
dest[n * 4 + 2] = ((r >> 16) & 0xFF) as u8;
dest[n * 4 + 3] = ((r >> 24) & 0xFF) as u8;
}
if remlen > 0 {
let r = (Math::random() * (u32::max_value() as f64)) as u32;
for n in 0..remlen {
dest[u32len * 4 + n] = ((r >> (n * 8)) & 0xFF) as u8;
}
}
Ok(())
}
pub fn get_random_u32() -> u32 {
(Math::random() * (u32::max_value() as f64)) as u32
}
pub fn get_random_u64() -> u64 {
let v1: u32 = get_random_u32();
let v2: u32 = get_random_u32();
((v1 as u64) << 32) | ((v2 as u32) as u64)
}
pub async fn sleep(millis: u32) {
Bindgen.sleep(Duration::from_millis(millis.into())).await
}
pub fn system_boxed<'a, Out>(
future: impl Future<Output = Out> + Send + 'a,
) -> SendPinBoxFutureLifetime<'a, Out> {
Box::pin(future)
}
pub fn spawn<Out>(future: impl Future<Output = Out> + Send + 'static) -> MustJoinHandle<Out>
where
Out: Send + 'static,
{
MustJoinHandle::new(
Bindgen
.spawn_handle(future)
.expect("wasm-bindgen-futures spawn should never error out"),
)
}
pub fn spawn_local<Out>(future: impl Future<Output = Out> + 'static) -> MustJoinHandle<Out>
where
Out: 'static,
{
MustJoinHandle::new(
Bindgen
.spawn_handle_local(future)
.expect("wasm-bindgen-futures spawn_local should never error out"),
)
}
// pub fn spawn_with_local_set<Out>(
// future: impl Future<Output = Out> + Send + 'static,
// ) -> MustJoinHandle<Out>
// where
// Out: Send + 'static,
// {
// spawn(future)
// }
pub fn spawn_detached<Out>(future: impl Future<Output = Out> + Send + 'static)
where
Out: Send + 'static,
{
Bindgen
.spawn_handle_local(future)
.expect("wasm-bindgen-futures spawn_local should never error out")
.detach()
}
pub fn interval<F, FUT>(freq_ms: u32, callback: F) -> SendPinBoxFuture<()>
where
F: Fn() -> FUT + Send + Sync + 'static,
FUT: Future<Output = ()> + Send,
{
let e = Eventual::new();
let ie = e.clone();
let jh = spawn(Box::pin(async move {
while timeout(freq_ms, ie.instance_clone(())).await.is_err() {
callback().await;
}
}));
Box::pin(async move {
e.resolve().await;
jh.await;
})
}
pub async fn timeout<F, T>(dur_ms: u32, f: F) -> Result<T, TimeoutError>
where
F: Future<Output = T>,
{
match select(Box::pin(intf::sleep(dur_ms)), Box::pin(f)).await {
Either::Left((_x, _b)) => Err(TimeoutError()),
Either::Right((y, _a)) => Ok(y),
}
}
// xxx: for now until wasm threads are more stable, and/or we bother with web workers
pub fn get_concurrency() -> u32 {
1
}
pub async fn get_outbound_relay_peer() -> Option<crate::veilid_api::PeerInfo> {
// unimplemented!

View File

@ -1,54 +1 @@
#![cfg(target_arch = "wasm32")]
use crate::xx::*;
use core::sync::atomic::{AtomicI8, Ordering};
use js_sys::{global, Reflect};
#[wasm_bindgen]
extern "C" {
// Use `js_namespace` here to bind `console.log(..)` instead of just
// `log(..)`
#[wasm_bindgen(js_namespace = console, js_name = log)]
pub fn console_log(s: &str);
#[wasm_bindgen]
pub fn alert(s: &str);
}
pub fn is_browser() -> bool {
static CACHE: AtomicI8 = AtomicI8::new(-1);
let cache = CACHE.load(Ordering::Relaxed);
if cache != -1 {
return cache != 0;
}
let res = Reflect::has(&global().as_ref(), &"window".into()).unwrap_or_default();
CACHE.store(res as i8, Ordering::Relaxed);
res
}
// pub fn is_browser_https() -> bool {
// static CACHE: AtomicI8 = AtomicI8::new(-1);
// let cache = CACHE.load(Ordering::Relaxed);
// if cache != -1 {
// return cache != 0;
// }
// let res = js_sys::eval("window.location.protocol === 'https'")
// .map(|res| res.is_truthy())
// .unwrap_or_default();
// CACHE.store(res as i8, Ordering::Relaxed);
// res
// }
#[derive(ThisError, Debug, Clone, Eq, PartialEq)]
#[error("JsValue error")]
pub struct JsValueError(String);
pub fn map_jsvalue_error(x: JsValue) -> JsValueError {
JsValueError(x.as_string().unwrap_or_default())
}

View File

@ -32,10 +32,6 @@ mod veilid_api;
#[macro_use]
mod veilid_config;
mod veilid_layer_filter;
mod veilid_rng;
#[macro_use]
pub mod xx;
pub use self::api_tracing_layer::ApiTracingLayer;
pub use self::attachment_manager::AttachmentState;
@ -43,6 +39,7 @@ pub use self::core_context::{api_startup, api_startup_json, UpdateCallback};
pub use self::veilid_api::*;
pub use self::veilid_config::*;
pub use self::veilid_layer_filter::*;
pub use veilid_tools as tools;
pub mod veilid_capnp {
include!(concat!(env!("OUT_DIR"), "/proto/veilid_capnp.rs"));

View File

@ -78,7 +78,7 @@ impl ConnectionLimits {
pub fn add(&mut self, addr: IpAddr) -> Result<(), AddressFilterError> {
let ipblock = ip_to_ipblock(self.max_connections_per_ip6_prefix_size, addr);
let ts = intf::get_timestamp();
let ts = get_timestamp();
self.purge_old_timestamps(ts);
@ -134,7 +134,7 @@ impl ConnectionLimits {
pub fn remove(&mut self, addr: IpAddr) -> Result<(), AddressNotInTableError> {
let ipblock = ip_to_ipblock(self.max_connections_per_ip6_prefix_size, addr);
let ts = intf::get_timestamp();
let ts = get_timestamp();
self.purge_old_timestamps(ts);
match ipblock {

View File

@ -319,7 +319,7 @@ impl ConnectionManager {
};
log_net!(debug "get_or_create_connection retries left: {}", retry_count);
retry_count -= 1;
intf::sleep(500).await;
sleep(500).await;
});
// Add to the connection table

View File

@ -1,5 +1,5 @@
use crate::*;
use crate::xx::*;
use crate::*;
#[cfg(not(target_arch = "wasm32"))]
mod native;
@ -403,11 +403,11 @@ impl NetworkManager {
let mut inner = self.inner.lock();
match inner.client_whitelist.entry(client) {
hashlink::lru_cache::Entry::Occupied(mut entry) => {
entry.get_mut().last_seen_ts = intf::get_timestamp()
entry.get_mut().last_seen_ts = get_timestamp()
}
hashlink::lru_cache::Entry::Vacant(entry) => {
entry.insert(ClientWhitelistEntry {
last_seen_ts: intf::get_timestamp(),
last_seen_ts: get_timestamp(),
});
}
}
@ -419,7 +419,7 @@ impl NetworkManager {
match inner.client_whitelist.entry(client) {
hashlink::lru_cache::Entry::Occupied(mut entry) => {
entry.get_mut().last_seen_ts = intf::get_timestamp();
entry.get_mut().last_seen_ts = get_timestamp();
true
}
hashlink::lru_cache::Entry::Vacant(_) => false,
@ -429,7 +429,7 @@ impl NetworkManager {
pub fn purge_client_whitelist(&self) {
let timeout_ms = self.with_config(|c| c.network.client_whitelist_timeout_ms);
let mut inner = self.inner.lock();
let cutoff_timestamp = intf::get_timestamp() - ((timeout_ms as u64) * 1000u64);
let cutoff_timestamp = get_timestamp() - ((timeout_ms as u64) * 1000u64);
// Remove clients from the whitelist that haven't been since since our whitelist timeout
while inner
.client_whitelist
@ -516,7 +516,7 @@ impl NetworkManager {
.wrap_err("failed to generate signed receipt")?;
// Record the receipt for later
let exp_ts = intf::get_timestamp() + expiration_us;
let exp_ts = get_timestamp() + expiration_us;
receipt_manager.record_receipt(receipt, exp_ts, expected_returns, callback);
Ok(out)
@ -540,7 +540,7 @@ impl NetworkManager {
.wrap_err("failed to generate signed receipt")?;
// Record the receipt for later
let exp_ts = intf::get_timestamp() + expiration_us;
let exp_ts = get_timestamp() + expiration_us;
let eventual = SingleShotEventual::new(Some(ReceiptEvent::Cancelled));
let instance = eventual.instance();
receipt_manager.record_single_shot_receipt(receipt, exp_ts, eventual);
@ -707,7 +707,7 @@ impl NetworkManager {
// XXX: do we need a delay here? or another hole punch packet?
// Set the hole punch as our 'last connection' to ensure we return the receipt over the direct hole punch
peer_nr.set_last_connection(connection_descriptor, intf::get_timestamp());
peer_nr.set_last_connection(connection_descriptor, get_timestamp());
// Return the receipt using the same dial info send the receipt to it
rpc.rpc_call_return_receipt(Destination::direct(peer_nr), receipt)
@ -731,7 +731,7 @@ impl NetworkManager {
let node_id_secret = routing_table.node_id_secret();
// Get timestamp, nonce
let ts = intf::get_timestamp();
let ts = get_timestamp();
let nonce = Crypto::get_random_nonce();
// Encode envelope
@ -1116,8 +1116,7 @@ impl NetworkManager {
// );
// Update timestamp for this last connection since we just sent to it
node_ref
.set_last_connection(connection_descriptor, intf::get_timestamp());
node_ref.set_last_connection(connection_descriptor, get_timestamp());
return Ok(NetworkResult::value(SendDataKind::Existing(
connection_descriptor,
@ -1149,7 +1148,7 @@ impl NetworkManager {
this.net().send_data_to_dial_info(dial_info, data).await?
);
// If we connected to this node directly, save off the last connection so we can use it again
node_ref.set_last_connection(connection_descriptor, intf::get_timestamp());
node_ref.set_last_connection(connection_descriptor, get_timestamp());
Ok(NetworkResult::value(SendDataKind::Direct(
connection_descriptor,
@ -1324,7 +1323,7 @@ impl NetworkManager {
});
// Validate timestamp isn't too old
let ts = intf::get_timestamp();
let ts = get_timestamp();
let ets = envelope.get_timestamp();
if let Some(tsbehind) = tsbehind {
if tsbehind > 0 && (ts > ets && ts - ets > tsbehind) {
@ -1631,7 +1630,7 @@ impl NetworkManager {
// public dialinfo
let inconsistent = if inconsistencies.len() >= PUBLIC_ADDRESS_CHANGE_DETECTION_COUNT
{
let exp_ts = intf::get_timestamp() + PUBLIC_ADDRESS_INCONSISTENCY_TIMEOUT_US;
let exp_ts = get_timestamp() + PUBLIC_ADDRESS_INCONSISTENCY_TIMEOUT_US;
for i in &inconsistencies {
pait.insert(*i, exp_ts);
}
@ -1644,8 +1643,8 @@ impl NetworkManager {
.public_address_inconsistencies_table
.entry(key)
.or_insert_with(|| HashMap::new());
let exp_ts = intf::get_timestamp()
+ PUBLIC_ADDRESS_INCONSISTENCY_PUNISHMENT_TIMEOUT_US;
let exp_ts =
get_timestamp() + PUBLIC_ADDRESS_INCONSISTENCY_PUNISHMENT_TIMEOUT_US;
for i in inconsistencies {
pait.insert(i, exp_ts);
}
@ -1733,7 +1732,7 @@ impl NetworkManager {
}
// Get the list of refs to all nodes to update
let cur_ts = intf::get_timestamp();
let cur_ts = get_timestamp();
let node_refs =
this.routing_table()
.get_nodes_needing_updates(routing_domain, cur_ts, all);

View File

@ -176,7 +176,7 @@ impl IGDManager {
mapped_port: u16,
) -> Option<()> {
let this = self.clone();
intf::blocking_wrapper(move || {
blocking_wrapper(move || {
let mut inner = this.inner.lock();
// If we already have this port mapped, just return the existing portmap
@ -215,7 +215,7 @@ impl IGDManager {
expected_external_address: Option<IpAddr>,
) -> Option<SocketAddr> {
let this = self.clone();
intf::blocking_wrapper(move || {
blocking_wrapper(move || {
let mut inner = this.inner.lock();
// If we already have this port mapped, just return the existing portmap
@ -275,7 +275,7 @@ impl IGDManager {
};
// Add to mapping list to keep alive
let timestamp = intf::get_timestamp();
let timestamp = get_timestamp();
inner.port_maps.insert(PortMapKey {
llpt,
at,
@ -301,7 +301,7 @@ impl IGDManager {
let mut renews: Vec<(PortMapKey, PortMapValue)> = Vec::new();
{
let inner = self.inner.lock();
let now = intf::get_timestamp();
let now = get_timestamp();
for (k, v) in &inner.port_maps {
let mapping_lifetime = now.saturating_sub(v.timestamp);
@ -323,7 +323,7 @@ impl IGDManager {
}
let this = self.clone();
intf::blocking_wrapper(move || {
blocking_wrapper(move || {
let mut inner = this.inner.lock();
// Process full renewals
@ -356,7 +356,7 @@ impl IGDManager {
inner.port_maps.insert(k, PortMapValue {
ext_ip: v.ext_ip,
mapped_port,
timestamp: intf::get_timestamp(),
timestamp: get_timestamp(),
renewal_lifetime: (UPNP_MAPPING_LIFETIME_MS / 2) as u64 * 1000u64,
renewal_attempts: 0,
});
@ -397,7 +397,7 @@ impl IGDManager {
inner.port_maps.insert(k, PortMapValue {
ext_ip: v.ext_ip,
mapped_port: v.mapped_port,
timestamp: intf::get_timestamp(),
timestamp: get_timestamp(),
renewal_lifetime: (UPNP_MAPPING_LIFETIME_MS / 2) as u64 * 1000u64,
renewal_attempts: 0,
});

View File

@ -1,5 +1,4 @@
mod igd_manager;
mod natpmp_manager;
mod network_class_discovery;
mod network_tcp;
mod network_udp;
@ -94,11 +93,9 @@ struct NetworkUnlockedInner {
update_network_class_task: TickTask<EyreReport>,
network_interfaces_task: TickTask<EyreReport>,
upnp_task: TickTask<EyreReport>,
natpmp_task: TickTask<EyreReport>,
// Managers
igd_manager: igd_manager::IGDManager,
natpmp_manager: natpmp_manager::NATPMPManager,
}
#[derive(Clone)]
@ -150,9 +147,7 @@ impl Network {
update_network_class_task: TickTask::new(1),
network_interfaces_task: TickTask::new(5),
upnp_task: TickTask::new(1),
natpmp_task: TickTask::new(1),
igd_manager: igd_manager::IGDManager::new(config.clone()),
natpmp_manager: natpmp_manager::NATPMPManager::new(config),
}
}
@ -196,13 +191,6 @@ impl Network {
.upnp_task
.set_routine(move |s, l, t| Box::pin(this2.clone().upnp_task_routine(s, l, t)));
}
// Set natpmp tick task
{
let this2 = this.clone();
this.unlocked_inner
.natpmp_task
.set_routine(move |s, l, t| Box::pin(this2.clone().natpmp_task_routine(s, l, t)));
}
this
}
@ -904,31 +892,11 @@ impl Network {
Ok(())
}
#[instrument(level = "trace", skip(self), err)]
pub async fn natpmp_task_routine(
self,
stop_token: StopToken,
_l: u64,
_t: u64,
) -> EyreResult<()> {
if !self.unlocked_inner.natpmp_manager.tick().await? {
info!("natpmp failed, restarting local network");
let mut inner = self.inner.lock();
inner.network_needs_restart = true;
}
Ok(())
}
pub async fn tick(&self) -> EyreResult<()> {
let (detect_address_changes, upnp, natpmp) = {
let (detect_address_changes, upnp) = {
let config = self.network_manager().config();
let c = config.get();
(
c.network.detect_address_changes,
c.network.upnp,
c.network.natpmp,
)
(c.network.detect_address_changes, c.network.upnp)
};
// If we need to figure out our network class, tick the task for it
@ -962,11 +930,6 @@ impl Network {
self.unlocked_inner.upnp_task.tick().await?;
}
// If we need to tick natpmp, do it
if natpmp && !self.needs_restart() {
self.unlocked_inner.natpmp_task.tick().await?;
}
Ok(())
}
}

View File

@ -1,18 +0,0 @@
use super::*;
pub struct NATPMPManager {
config: VeilidConfig,
}
impl NATPMPManager {
//
pub fn new(config: VeilidConfig) -> Self {
Self { config }
}
pub async fn tick(&self) -> EyreResult<bool> {
// xxx
Ok(true)
}
}

View File

@ -275,7 +275,7 @@ impl DiscoveryContext {
LowLevelProtocolType::UDP => "udp",
LowLevelProtocolType::TCP => "tcp",
});
intf::sleep(PORT_MAP_VALIDATE_DELAY_MS).await
sleep(PORT_MAP_VALIDATE_DELAY_MS).await
} else {
break;
}
@ -304,9 +304,9 @@ impl DiscoveryContext {
#[instrument(level = "trace", skip(self), ret)]
async fn try_port_mapping(&self) -> Option<DialInfo> {
let (enable_upnp, _enable_natpmp) = {
let enable_upnp = {
let c = self.net.config.get();
(c.network.upnp, c.network.natpmp)
c.network.upnp
};
if enable_upnp {

View File

@ -58,7 +58,7 @@ impl Network {
// Don't waste more than N seconds getting it though, in case someone
// is trying to DoS us with a bunch of connections or something
// read a chunk of the stream
intf::timeout(
timeout(
tls_connection_initial_timeout_ms,
ps.peek_exact(&mut first_packet),
)

View File

@ -10,7 +10,7 @@ impl Network {
c.network.protocol.udp.socket_pool_size
};
if task_count == 0 {
task_count = intf::get_concurrency() / 2;
task_count = get_concurrency() / 2;
if task_count == 0 {
task_count = 1;
}

View File

@ -196,7 +196,7 @@ pub async fn nonblocking_connect(
let async_stream = Async::new(std::net::TcpStream::from(socket))?;
// The stream becomes writable when connected
timeout_or_try!(intf::timeout(timeout_ms, async_stream.writable())
timeout_or_try!(timeout(timeout_ms, async_stream.writable())
.await
.into_timeout_or()
.into_result()?);

View File

@ -99,13 +99,13 @@ pub struct NetworkConnection {
impl NetworkConnection {
pub(super) fn dummy(id: NetworkConnectionId, descriptor: ConnectionDescriptor) -> Self {
// Create handle for sending (dummy is immediately disconnected)
let (sender, _receiver) = flume::bounded(intf::get_concurrency() as usize);
let (sender, _receiver) = flume::bounded(get_concurrency() as usize);
Self {
connection_id: id,
descriptor,
processor: None,
established_time: intf::get_timestamp(),
established_time: get_timestamp(),
stats: Arc::new(Mutex::new(NetworkConnectionStats {
last_message_sent_time: None,
last_message_recv_time: None,
@ -125,7 +125,7 @@ impl NetworkConnection {
let descriptor = protocol_connection.descriptor();
// Create handle for sending
let (sender, receiver) = flume::bounded(intf::get_concurrency() as usize);
let (sender, receiver) = flume::bounded(get_concurrency() as usize);
// Create stats
let stats = Arc::new(Mutex::new(NetworkConnectionStats {
@ -137,7 +137,7 @@ impl NetworkConnection {
let local_stop_token = stop_source.token();
// Spawn connection processor and pass in protocol connection
let processor = intf::spawn(Self::process_connection(
let processor = spawn(Self::process_connection(
connection_manager,
local_stop_token,
manager_stop_token,
@ -153,7 +153,7 @@ impl NetworkConnection {
connection_id,
descriptor,
processor: Some(processor),
established_time: intf::get_timestamp(),
established_time: get_timestamp(),
stats,
sender,
stop_source: Some(stop_source),
@ -185,7 +185,7 @@ impl NetworkConnection {
stats: Arc<Mutex<NetworkConnectionStats>>,
message: Vec<u8>,
) -> io::Result<NetworkResult<()>> {
let ts = intf::get_timestamp();
let ts = get_timestamp();
let out = network_result_try!(protocol_connection.send(message).await?);
let mut stats = stats.lock();
@ -199,7 +199,7 @@ impl NetworkConnection {
protocol_connection: &ProtocolNetworkConnection,
stats: Arc<Mutex<NetworkConnectionStats>>,
) -> io::Result<NetworkResult<Vec<u8>>> {
let ts = intf::get_timestamp();
let ts = get_timestamp();
let out = network_result_try!(protocol_connection.recv().await?);
let mut stats = stats.lock();
@ -246,7 +246,7 @@ impl NetworkConnection {
// Push mutable timer so we can reset it
// Normally we would use an io::timeout here, but WASM won't support that, so we use a mutable sleep future
let new_timer = || {
intf::sleep(connection_manager.connection_inactivity_timeout_ms()).then(|_| async {
sleep(connection_manager.connection_inactivity_timeout_ms()).then(|_| async {
// timeout
log_net!("== Connection timeout on {:?}", descriptor.green());
RecvLoopAction::Timeout

View File

@ -281,7 +281,7 @@ impl ReceiptManager {
};
(inner.next_oldest_ts, inner.timeout_task.clone(), stop_token)
};
let now = intf::get_timestamp();
let now = get_timestamp();
// If we have at least one timestamp to expire, lets do it
if let Some(next_oldest_ts) = next_oldest_ts {
if now >= next_oldest_ts {

View File

@ -120,7 +120,7 @@ impl Bucket {
.iter()
.map(|(k, v)| (k.clone(), v.clone()))
.collect();
let cur_ts = intf::get_timestamp();
let cur_ts = get_timestamp();
sorted_entries.sort_by(|a, b| -> core::cmp::Ordering {
if a.0 == b.0 {
return core::cmp::Ordering::Equal;

View File

@ -231,7 +231,7 @@ impl BucketEntryInner {
// No need to update the signednodeinfo though since the timestamp is the same
// Touch the node and let it try to live again
self.updated_since_last_network_change = true;
self.touch_last_seen(intf::get_timestamp());
self.touch_last_seen(get_timestamp());
}
return;
}
@ -258,7 +258,7 @@ impl BucketEntryInner {
// Update the signed node info
*opt_current_sni = Some(Box::new(signed_node_info));
self.updated_since_last_network_change = true;
self.touch_last_seen(intf::get_timestamp());
self.touch_last_seen(get_timestamp());
}
pub fn has_node_info(&self, routing_domain_set: RoutingDomainSet) -> bool {
@ -672,7 +672,7 @@ pub struct BucketEntry {
impl BucketEntry {
pub(super) fn new() -> Self {
let now = intf::get_timestamp();
let now = get_timestamp();
Self {
ref_count: AtomicU32::new(0),
inner: RwLock::new(BucketEntryInner {

View File

@ -104,7 +104,7 @@ impl RoutingTable {
pub(crate) fn debug_info_entries(&self, limit: usize, min_state: BucketEntryState) -> String {
let inner = self.inner.read();
let inner = &*inner;
let cur_ts = intf::get_timestamp();
let cur_ts = get_timestamp();
let mut out = String::new();
@ -164,7 +164,7 @@ impl RoutingTable {
pub(crate) fn debug_info_buckets(&self, min_state: BucketEntryState) -> String {
let inner = self.inner.read();
let inner = &*inner;
let cur_ts = intf::get_timestamp();
let cur_ts = get_timestamp();
let mut out = String::new();
const COLS: usize = 16;

View File

@ -275,7 +275,7 @@ pub trait NodeRefBase: Sized {
} else {
// If this is not connection oriented, then we check our last seen time
// to see if this mapping has expired (beyond our timeout)
let cur_ts = intf::get_timestamp();
let cur_ts = get_timestamp();
if (last_seen + (CONNECTIONLESS_TIMEOUT_SECS as u64 * 1_000_000u64)) >= cur_ts {
return Some(last_connection);
}

View File

@ -624,7 +624,7 @@ impl RouteSpecStore {
.map(|nr| nr.node_id());
// Get list of all nodes, and sort them for selection
let cur_ts = intf::get_timestamp();
let cur_ts = get_timestamp();
let filter = Box::new(
move |rti: &RoutingTableInner, k: DHTKey, v: Option<Arc<BucketEntry>>| -> bool {
// Exclude our own node from routes
@ -994,7 +994,7 @@ impl RouteSpecStore {
pub async fn test_route(&self, key: &DHTKey) -> EyreResult<bool> {
let is_remote = {
let inner = &mut *self.inner.lock();
let cur_ts = intf::get_timestamp();
let cur_ts = get_timestamp();
Self::with_peek_remote_private_route(inner, cur_ts, key, |_| {}).is_some()
};
if is_remote {
@ -1058,7 +1058,7 @@ impl RouteSpecStore {
pub fn release_route(&self, key: &DHTKey) -> bool {
let is_remote = {
let inner = &mut *self.inner.lock();
let cur_ts = intf::get_timestamp();
let cur_ts = get_timestamp();
Self::with_peek_remote_private_route(inner, cur_ts, key, |_| {}).is_some()
};
if is_remote {
@ -1079,7 +1079,7 @@ impl RouteSpecStore {
directions: DirectionSet,
avoid_node_ids: &[DHTKey],
) -> Option<DHTKey> {
let cur_ts = intf::get_timestamp();
let cur_ts = get_timestamp();
for detail in &inner.content.details {
if detail.1.stability >= stability
&& detail.1.sequencing >= sequencing
@ -1137,7 +1137,7 @@ impl RouteSpecStore {
/// Get the debug description of a route
pub fn debug_route(&self, key: &DHTKey) -> Option<String> {
let inner = &mut *self.inner.lock();
let cur_ts = intf::get_timestamp();
let cur_ts = get_timestamp();
// If this is a remote route, print it
if let Some(s) =
Self::with_peek_remote_private_route(inner, cur_ts, key, |rpi| format!("{:#?}", rpi))
@ -1534,7 +1534,7 @@ impl RouteSpecStore {
}
// store the private route in our cache
let cur_ts = intf::get_timestamp();
let cur_ts = get_timestamp();
let key = Self::with_create_remote_private_route(inner, cur_ts, private_route, |r| {
r.private_route.as_ref().unwrap().public_key.clone()
});
@ -1557,7 +1557,7 @@ impl RouteSpecStore {
/// Retrieve an imported remote private route by its public key
pub fn get_remote_private_route(&self, key: &DHTKey) -> Option<PrivateRoute> {
let inner = &mut *self.inner.lock();
let cur_ts = intf::get_timestamp();
let cur_ts = get_timestamp();
Self::with_get_remote_private_route(inner, cur_ts, key, |r| {
r.private_route.as_ref().unwrap().clone()
})
@ -1566,7 +1566,7 @@ impl RouteSpecStore {
/// Retrieve an imported remote private route by its public key but don't 'touch' it
pub fn peek_remote_private_route(&self, key: &DHTKey) -> Option<PrivateRoute> {
let inner = &mut *self.inner.lock();
let cur_ts = intf::get_timestamp();
let cur_ts = get_timestamp();
Self::with_peek_remote_private_route(inner, cur_ts, key, |r| {
r.private_route.as_ref().unwrap().clone()
})
@ -1670,7 +1670,7 @@ impl RouteSpecStore {
/// private route and gotten a response before
pub fn has_remote_private_route_seen_our_node_info(&self, key: &DHTKey) -> bool {
let inner = &mut *self.inner.lock();
let cur_ts = intf::get_timestamp();
let cur_ts = get_timestamp();
Self::with_peek_remote_private_route(inner, cur_ts, key, |rpr| rpr.seen_our_node_info)
.unwrap_or_default()
}

View File

@ -227,7 +227,7 @@ impl RoutingTableInner {
}
pub fn reset_all_seen_our_node_info(&mut self, routing_domain: RoutingDomain) {
let cur_ts = intf::get_timestamp();
let cur_ts = get_timestamp();
self.with_entries_mut(cur_ts, BucketEntryState::Dead, |rti, _, v| {
v.with_mut(rti, |_rti, e| {
e.set_seen_our_node_info(routing_domain, false);
@ -237,7 +237,7 @@ impl RoutingTableInner {
}
pub fn reset_all_updated_since_last_network_change(&mut self) {
let cur_ts = intf::get_timestamp();
let cur_ts = get_timestamp();
self.with_entries_mut(cur_ts, BucketEntryState::Dead, |rti, _, v| {
v.with_mut(rti, |_rti, e| {
e.set_updated_since_last_network_change(false)
@ -330,7 +330,7 @@ impl RoutingTableInner {
// If the local network topology has changed, nuke the existing local node info and let new local discovery happen
if changed {
let cur_ts = intf::get_timestamp();
let cur_ts = get_timestamp();
self.with_entries_mut(cur_ts, BucketEntryState::Dead, |rti, _, e| {
e.with_mut(rti, |_rti, e| {
e.clear_signed_node_info(RoutingDomain::LocalNetwork);
@ -410,7 +410,7 @@ impl RoutingTableInner {
min_state: BucketEntryState,
) -> usize {
let mut count = 0usize;
let cur_ts = intf::get_timestamp();
let cur_ts = get_timestamp();
self.with_entries(cur_ts, min_state, |rti, _, e| {
if e.with(rti, |_rti, e| e.best_routing_domain(routing_domain_set))
.is_some()
@ -712,7 +712,7 @@ impl RoutingTableInner {
pub fn get_routing_table_health(&self) -> RoutingTableHealth {
let mut health = RoutingTableHealth::default();
let cur_ts = intf::get_timestamp();
let cur_ts = get_timestamp();
for bucket in &self.buckets {
for (_, v) in bucket.entries() {
match v.with(self, |_rti, e| e.state(cur_ts)) {
@ -869,7 +869,7 @@ impl RoutingTableInner {
where
T: for<'r> FnMut(&'r RoutingTableInner, DHTKey, Option<Arc<BucketEntry>>) -> O,
{
let cur_ts = intf::get_timestamp();
let cur_ts = get_timestamp();
// Add filter to remove dead nodes always
let filter_dead = Box::new(
@ -954,7 +954,7 @@ impl RoutingTableInner {
where
T: for<'r> FnMut(&'r RoutingTableInner, DHTKey, Option<Arc<BucketEntry>>) -> O,
{
let cur_ts = intf::get_timestamp();
let cur_ts = get_timestamp();
let node_count = {
let config = self.config();
let c = config.get();

View File

@ -64,7 +64,7 @@ pub struct RPCOperation {
impl RPCOperation {
pub fn new_question(question: RPCQuestion, sender_node_info: Option<SignedNodeInfo>) -> Self {
Self {
op_id: intf::get_random_u64(),
op_id: get_random_u64(),
sender_node_info,
kind: RPCOperationKind::Question(question),
}
@ -74,7 +74,7 @@ impl RPCOperation {
sender_node_info: Option<SignedNodeInfo>,
) -> Self {
Self {
op_id: intf::get_random_u64(),
op_id: get_random_u64(),
sender_node_info,
kind: RPCOperationKind::Statement(statement),
}

View File

@ -28,6 +28,7 @@ pub use rpc_error::*;
pub use rpc_status::*;
use super::*;
use crate::crypto::*;
use crate::xx::*;
use futures_util::StreamExt;
@ -256,7 +257,7 @@ impl RPCProcessor {
let timeout = ms_to_us(c.network.rpc.timeout_ms);
let max_route_hop_count = c.network.rpc.max_route_hop_count as usize;
if concurrency == 0 {
concurrency = intf::get_concurrency() / 2;
concurrency = get_concurrency() / 2;
if concurrency == 0 {
concurrency = 1;
}
@ -313,7 +314,7 @@ impl RPCProcessor {
for _ in 0..self.unlocked_inner.concurrency {
let this = self.clone();
let receiver = channel.1.clone();
let jh = intf::spawn(Self::rpc_worker(
let jh = spawn(Self::rpc_worker(
this,
inner.stop_source.as_ref().unwrap().token(),
receiver,
@ -460,7 +461,7 @@ impl RPCProcessor {
}
Ok(TimeoutOr::Value((rpcreader, _))) => {
// Reply received
let recv_ts = intf::get_timestamp();
let recv_ts = get_timestamp();
// Record answer received
self.record_answer_received(
@ -1011,7 +1012,7 @@ impl RPCProcessor {
// Send question
let bytes = message.len() as u64;
let send_ts = intf::get_timestamp();
let send_ts = get_timestamp();
let send_data_kind = network_result_try!(self
.network_manager()
.send_envelope(node_ref.clone(), Some(node_id), message)
@ -1078,7 +1079,7 @@ impl RPCProcessor {
// Send statement
let bytes = message.len() as u64;
let send_ts = intf::get_timestamp();
let send_ts = get_timestamp();
let _send_data_kind = network_result_try!(self
.network_manager()
.send_envelope(node_ref.clone(), Some(node_id), message)
@ -1139,7 +1140,7 @@ impl RPCProcessor {
// Send the reply
let bytes = message.len() as u64;
let send_ts = intf::get_timestamp();
let send_ts = get_timestamp();
network_result_try!(self.network_manager()
.send_envelope(node_ref.clone(), Some(node_id), message)
.await
@ -1357,7 +1358,7 @@ impl RPCProcessor {
connection_descriptor,
routing_domain,
}),
timestamp: intf::get_timestamp(),
timestamp: get_timestamp(),
body_len: body.len() as u64,
},
data: RPCMessageData { contents: body },
@ -1386,7 +1387,7 @@ impl RPCProcessor {
remote_safety_route,
sequencing,
}),
timestamp: intf::get_timestamp(),
timestamp: get_timestamp(),
body_len: body.len() as u64,
},
data: RPCMessageData { contents: body },
@ -1419,7 +1420,7 @@ impl RPCProcessor {
safety_spec,
},
),
timestamp: intf::get_timestamp(),
timestamp: get_timestamp(),
body_len: body.len() as u64,
},
data: RPCMessageData { contents: body },

View File

@ -104,9 +104,9 @@ where
pub async fn wait_for_op(
&self,
mut handle: OperationWaitHandle<T>,
timeout: u64,
timeout_us: u64,
) -> Result<TimeoutOr<(T, u64)>, RPCError> {
let timeout_ms = u32::try_from(timeout / 1000u64)
let timeout_ms = u32::try_from(timeout_us / 1000u64)
.map_err(|e| RPCError::map_internal("invalid timeout")(e))?;
// Take the instance
@ -114,8 +114,8 @@ where
let eventual_instance = handle.eventual_instance.take().unwrap();
// wait for eventualvalue
let start_ts = intf::get_timestamp();
let res = intf::timeout(timeout_ms, eventual_instance)
let start_ts = get_timestamp();
let res = timeout(timeout_ms, eventual_instance)
.await
.into_timeout_or();
Ok(res
@ -125,7 +125,7 @@ where
})
.map(|res| {
let (_span_id, ret) = res.take_value().unwrap();
let end_ts = intf::get_timestamp();
let end_ts = get_timestamp();
//xxx: causes crash (Missing otel data span extensions)
// Span::current().follows_from(span_id);

View File

@ -1,5 +1,4 @@
use crate::xx::*;
use crate::*;
pub async fn test_simple_no_contention() {
info!("test_simple_no_contention");
@ -36,12 +35,12 @@ pub async fn test_simple_single_contention() {
let g1 = table.lock_tag(a1).await;
info!("locked");
let t1 = intf::spawn(async move {
let t1 = spawn(async move {
// move the guard into the task
let _g1_take = g1;
// hold the guard for a bit
info!("waiting");
intf::sleep(1000).await;
sleep(1000).await;
// release the guard
info!("released");
});
@ -68,21 +67,21 @@ pub async fn test_simple_double_contention() {
let g2 = table.lock_tag(a2).await;
info!("locked");
let t1 = intf::spawn(async move {
let t1 = spawn(async move {
// move the guard into the tas
let _g1_take = g1;
// hold the guard for a bit
info!("waiting");
intf::sleep(1000).await;
sleep(1000).await;
// release the guard
info!("released");
});
let t2 = intf::spawn(async move {
let t2 = spawn(async move {
// move the guard into the task
let _g2_take = g2;
// hold the guard for a bit
info!("waiting");
intf::sleep(500).await;
sleep(500).await;
// release the guard
info!("released");
});
@ -109,37 +108,37 @@ pub async fn test_parallel_single_contention() {
let a1 = SocketAddr::new("1.2.3.4".parse().unwrap(), 1234);
let table1 = table.clone();
let t1 = intf::spawn(async move {
let t1 = spawn(async move {
// lock the tag
let _g = table1.lock_tag(a1).await;
info!("locked t1");
// hold the guard for a bit
info!("waiting t1");
intf::sleep(500).await;
sleep(500).await;
// release the guard
info!("released t1");
});
let table2 = table.clone();
let t2 = intf::spawn(async move {
let t2 = spawn(async move {
// lock the tag
let _g = table2.lock_tag(a1).await;
info!("locked t2");
// hold the guard for a bit
info!("waiting t2");
intf::sleep(500).await;
sleep(500).await;
// release the guard
info!("released t2");
});
let table3 = table.clone();
let t3 = intf::spawn(async move {
let t3 = spawn(async move {
// lock the tag
let _g = table3.lock_tag(a1).await;
info!("locked t3");
// hold the guard for a bit
info!("waiting t3");
intf::sleep(500).await;
sleep(500).await;
// release the guard
info!("released t3");
});

View File

@ -15,8 +15,8 @@ pub async fn test_log() {
pub async fn test_get_timestamp() {
info!("testing get_timestamp");
let t1 = intf::get_timestamp();
let t2 = intf::get_timestamp();
let t1 = get_timestamp();
let t2 = get_timestamp();
assert!(t2 >= t1);
}
@ -31,8 +31,8 @@ pub async fn test_eventual() {
let i4 = e1.instance_clone(4u32);
drop(i2);
let jh = intf::spawn(async move {
intf::sleep(1000).await;
let jh = spawn(async move {
sleep(1000).await;
e1.resolve();
});
@ -48,14 +48,14 @@ pub async fn test_eventual() {
let i3 = e1.instance_clone(3u32);
let i4 = e1.instance_clone(4u32);
let e1_c1 = e1.clone();
let jh = intf::spawn(async move {
let jh = spawn(async move {
let i5 = e1.instance_clone(5u32);
let i6 = e1.instance_clone(6u32);
assert_eq!(i1.await, 1u32);
assert_eq!(i5.await, 5u32);
assert_eq!(i6.await, 6u32);
});
intf::sleep(1000).await;
sleep(1000).await;
let resolved = e1_c1.resolve();
drop(i2);
drop(i3);
@ -68,11 +68,11 @@ pub async fn test_eventual() {
let i1 = e1.instance_clone(1u32);
let i2 = e1.instance_clone(2u32);
let e1_c1 = e1.clone();
let jh = intf::spawn(async move {
let jh = spawn(async move {
assert_eq!(i1.await, 1u32);
assert_eq!(i2.await, 2u32);
});
intf::sleep(1000).await;
sleep(1000).await;
e1_c1.resolve().await;
jh.await;
@ -81,11 +81,11 @@ pub async fn test_eventual() {
//
let j1 = e1.instance_clone(1u32);
let j2 = e1.instance_clone(2u32);
let jh = intf::spawn(async move {
let jh = spawn(async move {
assert_eq!(j1.await, 1u32);
assert_eq!(j2.await, 2u32);
});
intf::sleep(1000).await;
sleep(1000).await;
e1_c1.resolve().await;
jh.await;
@ -106,8 +106,8 @@ pub async fn test_eventual_value() {
drop(i2);
let e1_c1 = e1.clone();
let jh = intf::spawn(async move {
intf::sleep(1000).await;
let jh = spawn(async move {
sleep(1000).await;
e1_c1.resolve(3u32);
});
@ -123,14 +123,14 @@ pub async fn test_eventual_value() {
let i3 = e1.instance();
let i4 = e1.instance();
let e1_c1 = e1.clone();
let jh = intf::spawn(async move {
let jh = spawn(async move {
let i5 = e1.instance();
let i6 = e1.instance();
i1.await;
i5.await;
i6.await;
});
intf::sleep(1000).await;
sleep(1000).await;
let resolved = e1_c1.resolve(4u16);
drop(i2);
drop(i3);
@ -145,11 +145,11 @@ pub async fn test_eventual_value() {
let i1 = e1.instance();
let i2 = e1.instance();
let e1_c1 = e1.clone();
let jh = intf::spawn(async move {
let jh = spawn(async move {
i1.await;
i2.await;
});
intf::sleep(1000).await;
sleep(1000).await;
e1_c1.resolve(5u32).await;
jh.await;
assert_eq!(e1_c1.take_value(), Some(5u32));
@ -158,11 +158,11 @@ pub async fn test_eventual_value() {
//
let j1 = e1.instance();
let j2 = e1.instance();
let jh = intf::spawn(async move {
let jh = spawn(async move {
j1.await;
j2.await;
});
intf::sleep(1000).await;
sleep(1000).await;
e1_c1.resolve(6u32).await;
jh.await;
assert_eq!(e1_c1.take_value(), Some(6u32));
@ -182,8 +182,8 @@ pub async fn test_eventual_value_clone() {
let i4 = e1.instance();
drop(i2);
let jh = intf::spawn(async move {
intf::sleep(1000).await;
let jh = spawn(async move {
sleep(1000).await;
e1.resolve(3u32);
});
@ -200,14 +200,14 @@ pub async fn test_eventual_value_clone() {
let i3 = e1.instance();
let i4 = e1.instance();
let e1_c1 = e1.clone();
let jh = intf::spawn(async move {
let jh = spawn(async move {
let i5 = e1.instance();
let i6 = e1.instance();
assert_eq!(i1.await, 4);
assert_eq!(i5.await, 4);
assert_eq!(i6.await, 4);
});
intf::sleep(1000).await;
sleep(1000).await;
let resolved = e1_c1.resolve(4u16);
drop(i2);
drop(i3);
@ -221,22 +221,22 @@ pub async fn test_eventual_value_clone() {
let i1 = e1.instance();
let i2 = e1.instance();
let e1_c1 = e1.clone();
let jh = intf::spawn(async move {
let jh = spawn(async move {
assert_eq!(i1.await, 5);
assert_eq!(i2.await, 5);
});
intf::sleep(1000).await;
sleep(1000).await;
e1_c1.resolve(5u32).await;
jh.await;
e1_c1.reset();
//
let j1 = e1.instance();
let j2 = e1.instance();
let jh = intf::spawn(async move {
let jh = spawn(async move {
assert_eq!(j1.await, 6);
assert_eq!(j2.await, 6);
});
intf::sleep(1000).await;
sleep(1000).await;
e1_c1.resolve(6u32).await;
jh.await;
e1_c1.reset();
@ -246,7 +246,7 @@ pub async fn test_interval() {
info!("testing interval");
let tick: Arc<Mutex<u32>> = Arc::new(Mutex::new(0u32));
let stopper = intf::interval(1000, move || {
let stopper = interval(1000, move || {
let tick = tick.clone();
async move {
let mut tick = tick.lock();
@ -255,7 +255,7 @@ pub async fn test_interval() {
}
});
intf::sleep(5500).await;
sleep(5500).await;
stopper.await;
}
@ -266,19 +266,19 @@ pub async fn test_timeout() {
let tick: Arc<Mutex<u32>> = Arc::new(Mutex::new(0u32));
let tick_1 = tick.clone();
assert!(
intf::timeout(2500, async move {
timeout(2500, async move {
let mut tick = tick_1.lock();
trace!("tick {}", tick);
intf::sleep(1000).await;
sleep(1000).await;
*tick += 1;
trace!("tick {}", tick);
intf::sleep(1000).await;
sleep(1000).await;
*tick += 1;
trace!("tick {}", tick);
intf::sleep(1000).await;
sleep(1000).await;
*tick += 1;
trace!("tick {}", tick);
intf::sleep(1000).await;
sleep(1000).await;
*tick += 1;
})
.await
@ -305,7 +305,7 @@ pub async fn test_sleep() {
let sys_time = SystemTime::now();
let one_sec = Duration::from_secs(1);
intf::sleep(1000).await;
sleep(1000).await;
assert!(sys_time.elapsed().unwrap() >= one_sec);
}
}
@ -462,7 +462,7 @@ cfg_if! {
if #[cfg(not(target_arch = "wasm32"))] {
pub async fn test_network_interfaces() {
info!("testing network interfaces");
let t1 = intf::get_timestamp();
let t1 = get_timestamp();
let interfaces = intf::utils::network_interfaces::NetworkInterfaces::new();
let count = 100;
for x in 0..count {
@ -471,7 +471,7 @@ cfg_if! {
error!("error refreshing interfaces: {}", e);
}
}
let t2 = intf::get_timestamp();
let t2 = get_timestamp();
let tdiff = ((t2 - t1) as f64)/1000000.0f64;
info!("running network interface test with {} iterations took {} seconds", count, tdiff);
info!("interfaces: {:#?}", interfaces)
@ -481,12 +481,12 @@ cfg_if! {
pub async fn test_get_random_u64() {
info!("testing random number generator for u64");
let t1 = intf::get_timestamp();
let t1 = get_timestamp();
let count = 10000;
for _ in 0..count {
let _ = intf::get_random_u64();
let _ = get_random_u64();
}
let t2 = intf::get_timestamp();
let t2 = get_timestamp();
let tdiff = ((t2 - t1) as f64) / 1000000.0f64;
info!(
"running network interface test with {} iterations took {} seconds",
@ -496,12 +496,12 @@ pub async fn test_get_random_u64() {
pub async fn test_get_random_u32() {
info!("testing random number generator for u32");
let t1 = intf::get_timestamp();
let t1 = get_timestamp();
let count = 10000;
for _ in 0..count {
let _ = intf::get_random_u32();
let _ = get_random_u32();
}
let t2 = intf::get_timestamp();
let t2 = get_timestamp();
let tdiff = ((t2 - t1) as f64) / 1000000.0f64;
info!(
"running network interface test with {} iterations took {} seconds",
@ -515,7 +515,7 @@ pub async fn test_must_join_single_future() {
assert_eq!(sf.check().await, Ok(None));
assert_eq!(
sf.single_spawn(async {
intf::sleep(2000).await;
sleep(2000).await;
69
})
.await,
@ -526,22 +526,22 @@ pub async fn test_must_join_single_future() {
assert_eq!(sf.join().await, Ok(Some(69)));
assert_eq!(
sf.single_spawn(async {
intf::sleep(1000).await;
sleep(1000).await;
37
})
.await,
Ok((None, true))
);
intf::sleep(2000).await;
sleep(2000).await;
assert_eq!(
sf.single_spawn(async {
intf::sleep(1000).await;
sleep(1000).await;
27
})
.await,
Ok((Some(37), true))
);
intf::sleep(2000).await;
sleep(2000).await;
assert_eq!(sf.join().await, Ok(Some(27)));
assert_eq!(sf.check().await, Ok(None));
}

View File

@ -222,7 +222,6 @@ fn config_callback(key: String) -> ConfigCallbackReturn {
"network.dht.min_peer_refresh_time_ms" => Ok(Box::new(2_000u32)),
"network.dht.validate_dial_info_receipt_time_ms" => Ok(Box::new(5_000u32)),
"network.upnp" => Ok(Box::new(false)),
"network.natpmp" => Ok(Box::new(false)),
"network.detect_address_changes" => Ok(Box::new(true)),
"network.restricted_nat_retries" => Ok(Box::new(3u32)),
"network.tls.certificate_path" => Ok(Box::new(get_certfile_path())),
@ -352,7 +351,6 @@ pub async fn test_config() {
);
assert_eq!(inner.network.upnp, false);
assert_eq!(inner.network.natpmp, false);
assert_eq!(inner.network.detect_address_changes, true);
assert_eq!(inner.network.restricted_nat_retries, 3u32);
assert_eq!(inner.network.tls.certificate_path, get_certfile_path());

View File

@ -20,9 +20,9 @@ pub async fn test_attach_detach() {
.await
.expect("startup failed");
api.attach().await.unwrap();
intf::sleep(5000).await;
sleep(5000).await;
api.detach().await.unwrap();
intf::sleep(2000).await;
sleep(2000).await;
api.shutdown().await;
info!("--- test auto detach ---");
@ -31,7 +31,7 @@ pub async fn test_attach_detach() {
.await
.expect("startup failed");
api.attach().await.unwrap();
intf::sleep(5000).await;
sleep(5000).await;
api.shutdown().await;
info!("--- test detach without attach ---");

View File

@ -15,7 +15,7 @@ impl fmt::Debug for VeilidAPIInner {
impl Drop for VeilidAPIInner {
fn drop(&mut self) {
if let Some(context) = self.context.take() {
intf::spawn_detached(api_shutdown(context));
spawn_detached(api_shutdown(context));
}
}
}

View File

@ -1826,7 +1826,7 @@ impl SignedDirectNodeInfo {
node_info: NodeInfo,
secret: &DHTKeySecret,
) -> Result<Self, VeilidAPIError> {
let timestamp = intf::get_timestamp();
let timestamp = get_timestamp();
let node_info_bytes = Self::make_signature_bytes(&node_info, timestamp)?;
let signature = sign(&node_id.key, secret, &node_info_bytes)?;
Ok(Self {
@ -1858,7 +1858,7 @@ impl SignedDirectNodeInfo {
Self {
node_info,
signature: None,
timestamp: intf::get_timestamp(),
timestamp: get_timestamp(),
}
}
@ -1906,7 +1906,7 @@ impl SignedRelayedNodeInfo {
relay_info: SignedDirectNodeInfo,
secret: &DHTKeySecret,
) -> Result<Self, VeilidAPIError> {
let timestamp = intf::get_timestamp();
let timestamp = get_timestamp();
let node_info_bytes =
Self::make_signature_bytes(&node_info, &relay_id, &relay_info, timestamp)?;
let signature = sign(&node_id.key, secret, &node_info_bytes)?;

View File

@ -369,7 +369,6 @@ pub struct VeilidConfigNetwork {
pub rpc: VeilidConfigRPC,
pub dht: VeilidConfigDHT,
pub upnp: bool,
pub natpmp: bool,
pub detect_address_changes: bool,
pub restricted_nat_retries: u32,
pub tls: VeilidConfigTLS,
@ -665,7 +664,6 @@ impl VeilidConfig {
get_config!(inner.network.rpc.max_route_hop_count);
get_config!(inner.network.rpc.default_route_hop_count);
get_config!(inner.network.upnp);
get_config!(inner.network.natpmp);
get_config!(inner.network.detect_address_changes);
get_config!(inner.network.restricted_nat_retries);
get_config!(inner.network.tls.certificate_path);

View File

@ -1,28 +0,0 @@
use crate::*;
use rand::{CryptoRng, Error, RngCore};
#[derive(Clone, Copy, Debug, Default)]
pub struct VeilidRng;
impl CryptoRng for VeilidRng {}
impl RngCore for VeilidRng {
fn next_u32(&mut self) -> u32 {
intf::get_random_u32()
}
fn next_u64(&mut self) -> u64 {
intf::get_random_u64()
}
fn fill_bytes(&mut self, dest: &mut [u8]) {
if let Err(e) = self.try_fill_bytes(dest) {
panic!("Error: {}", e);
}
}
fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
intf::random_bytes(dest).map_err(Error::new)
}
}

View File

@ -84,7 +84,6 @@ Future<VeilidConfig> getDefaultVeilidConfig() async {
validateDialInfoReceiptTimeMs: 2000,
),
upnp: true,
natpmp: true,
detectAddressChanges: true,
restrictedNatRetries: 0,
tls: VeilidConfigTLS(

View File

@ -745,7 +745,6 @@ class VeilidConfigNetwork {
VeilidConfigRPC rpc;
VeilidConfigDHT dht;
bool upnp;
bool natpmp;
bool detectAddressChanges;
int restrictedNatRetries;
VeilidConfigTLS tls;
@ -770,7 +769,6 @@ class VeilidConfigNetwork {
required this.rpc,
required this.dht,
required this.upnp,
required this.natpmp,
required this.detectAddressChanges,
required this.restrictedNatRetries,
required this.tls,
@ -797,7 +795,6 @@ class VeilidConfigNetwork {
'rpc': rpc.json,
'dht': dht.json,
'upnp': upnp,
'natpmp': natpmp,
'detect_address_changes': detectAddressChanges,
'restricted_nat_retries': restrictedNatRetries,
'tls': tls.json,
@ -827,7 +824,6 @@ class VeilidConfigNetwork {
rpc = VeilidConfigRPC.fromJson(json['rpc']),
dht = VeilidConfigDHT.fromJson(json['dht']),
upnp = json['upnp'],
natpmp = json['natpmp'],
detectAddressChanges = json['detect_address_changes'],
restrictedNatRetries = json['restricted_nat_retries'],
tls = VeilidConfigTLS.fromJson(json['tls']),

View File

@ -100,7 +100,6 @@ core:
min_peer_refresh_time_ms: 2000
validate_dial_info_receipt_time_ms: 2000
upnp: true
natpmp: false
detect_address_changes: true
restricted_nat_retries: 0
tls:
@ -607,7 +606,6 @@ pub struct Network {
pub rpc: Rpc,
pub dht: Dht,
pub upnp: bool,
pub natpmp: bool,
pub detect_address_changes: bool,
pub restricted_nat_retries: u32,
pub tls: Tls,
@ -1005,7 +1003,6 @@ impl Settings {
value
);
set_config_value!(inner.core.network.upnp, value);
set_config_value!(inner.core.network.natpmp, value);
set_config_value!(inner.core.network.detect_address_changes, value);
set_config_value!(inner.core.network.restricted_nat_retries, value);
set_config_value!(inner.core.network.tls.certificate_path, value);
@ -1206,7 +1203,6 @@ impl Settings {
inner.core.network.dht.validate_dial_info_receipt_time_ms,
)),
"network.upnp" => Ok(Box::new(inner.core.network.upnp)),
"network.natpmp" => Ok(Box::new(inner.core.network.natpmp)),
"network.detect_address_changes" => {
Ok(Box::new(inner.core.network.detect_address_changes))
}
@ -1530,7 +1526,6 @@ mod tests {
);
//
assert_eq!(s.core.network.upnp, true);
assert_eq!(s.core.network.natpmp, false);
assert_eq!(s.core.network.detect_address_changes, true);
assert_eq!(s.core.network.restricted_nat_retries, 0u32);
//

179
veilid-tools/Cargo.toml Normal file
View File

@ -0,0 +1,179 @@
[package]
name = "veilid-core"
version = "0.1.0"
authors = ["John Smith <nobody@example.com>"]
edition = "2021"
build = "build.rs"
license = "LGPL-2.0-or-later OR MPL-2.0 OR (MIT AND BSD-3-Clause)"
[lib]
crate-type = ["cdylib", "staticlib", "rlib"]
[features]
default = []
rt-async-std = [ "async-std", "async-std-resolver", "async_executors/async_std", "rtnetlink?/smol_socket" ]
rt-tokio = [ "tokio", "tokio-util", "tokio-stream", "trust-dns-resolver/tokio-runtime", "async_executors/tokio_tp", "async_executors/tokio_io", "async_executors/tokio_timer", "rtnetlink?/tokio_socket" ]
android_tests = []
ios_tests = [ "simplelog" ]
tracking = []
[dependencies]
tracing = { version = "^0", features = ["log", "attributes"] }
tracing-subscriber = "^0"
tracing-error = "^0"
eyre = "^0"
capnp = { version = "^0", default_features = false }
rust-fsm = "^0"
static_assertions = "^1"
cfg-if = "^1"
thiserror = "^1"
hex = "^0"
generic-array = "^0"
secrecy = "^0"
chacha20poly1305 = "^0"
chacha20 = "^0"
hashlink = { path = "../external/hashlink", features = ["serde_impl"] }
serde = { version = "^1", features = ["derive" ] }
serde_json = { version = "^1" }
serde-big-array = "^0"
futures-util = { version = "^0", default_features = false, features = ["alloc"] }
parking_lot = "^0"
lazy_static = "^1"
directories = "^4"
once_cell = "^1"
json = "^0"
owning_ref = "^0"
flume = { version = "^0", features = ["async"] }
enumset = { version= "^1", features = ["serde"] }
backtrace = { version = "^0" }
owo-colors = "^3"
stop-token = { version = "^0", default-features = false }
ed25519-dalek = { version = "^1", default_features = false, features = ["alloc", "u64_backend"] }
x25519-dalek = { package = "x25519-dalek-ng", version = "^1", default_features = false, features = ["u64_backend"] }
curve25519-dalek = { package = "curve25519-dalek-ng", version = "^4", default_features = false, features = ["alloc", "u64_backend"] }
# ed25519-dalek needs rand 0.7 until it updates itself
rand = "0.7"
# curve25519-dalek-ng is stuck on digest 0.9.0
blake3 = { version = "1.1.0", default_features = false }
digest = "0.9.0"
rtnetlink = { version = "^0", default-features = false, optional = true }
async-std-resolver = { version = "^0", optional = true }
trust-dns-resolver = { version = "^0", optional = true }
keyvaluedb = { path = "../external/keyvaluedb/keyvaluedb" }
#rkyv = { version = "^0", default_features = false, features = ["std", "alloc", "strict", "size_32", "validation"] }
rkyv = { git = "https://github.com/rkyv/rkyv.git", rev = "57e2a8d", default_features = false, features = ["std", "alloc", "strict", "size_32", "validation"] }
bytecheck = "^0"
# Dependencies for native builds only
# Linux, Windows, Mac, iOS, Android
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
async-std = { version = "^1", features = ["unstable"], optional = true}
tokio = { version = "^1", features = ["full"], optional = true}
tokio-util = { version = "^0", features = ["compat"], optional = true}
tokio-stream = { version = "^0", features = ["net"], optional = true}
async-io = { version = "^1" }
async-tungstenite = { version = "^0", features = ["async-tls"] }
maplit = "^1"
config = { version = "^0", features = ["yaml"] }
keyring-manager = { path = "../external/keyring-manager" }
async-tls = "^0.11"
igd = { path = "../external/rust-igd" }
webpki = "^0"
webpki-roots = "^0"
rustls = "^0.19"
rustls-pemfile = "^0.2"
futures-util = { version = "^0", default-features = false, features = ["async-await", "sink", "std", "io"] }
keyvaluedb-sqlite = { path = "../external/keyvaluedb/keyvaluedb-sqlite" }
data-encoding = { version = "^2" }
socket2 = "^0"
bugsalot = "^0"
chrono = "^0"
libc = "^0"
nix = "^0"
# Dependencies for WASM builds only
[target.'cfg(target_arch = "wasm32")'.dependencies]
wasm-bindgen = "^0"
js-sys = "^0"
wasm-bindgen-futures = "^0"
keyvaluedb-web = { path = "../external/keyvaluedb/keyvaluedb-web" }
data-encoding = { version = "^2", default_features = false, features = ["alloc"] }
getrandom = { version = "^0", features = ["js"] }
ws_stream_wasm = "^0"
async_executors = { version = "^0", default-features = false, features = [ "bindgen", "timer" ]}
async-lock = "^2"
send_wrapper = { version = "^0", features = ["futures"] }
wasm-logger = "^0"
tracing-wasm = "^0"
# Configuration for WASM32 'web-sys' crate
[target.'cfg(target_arch = "wasm32")'.dependencies.web-sys]
version = "^0"
features = [
'Document',
'HtmlDocument',
# 'Element',
# 'HtmlElement',
# 'Node',
'IdbFactory',
'IdbOpenDbRequest',
'Storage',
'Location',
'Window',
]
# Dependencies for Android
[target.'cfg(target_os = "android")'.dependencies]
jni = "^0"
jni-sys = "^0"
ndk = { version = "^0", features = ["trace"] }
ndk-glue = { version = "^0", features = ["logger"] }
tracing-android = { version = "^0" }
# Dependenices for all Unix (Linux, Android, MacOS, iOS)
[target.'cfg(unix)'.dependencies]
ifstructs = "^0"
# Dependencies for Linux or Android
[target.'cfg(any(target_os = "android",target_os = "linux"))'.dependencies]
rtnetlink = { version = "^0", default-features = false }
# Dependencies for Windows
[target.'cfg(target_os = "windows")'.dependencies]
winapi = { version = "^0", features = [ "iptypes", "iphlpapi" ] }
windows = { version = "^0", features = [ "Win32_NetworkManagement_Dns", "Win32_Foundation", "alloc" ]}
windows-permissions = "^0"
# Dependencies for iOS
[target.'cfg(target_os = "ios")'.dependencies]
simplelog = { version = "^0", optional = true }
# Rusqlite configuration to ensure platforms that don't come with sqlite get it bundled
# Except WASM which doesn't use sqlite
[target.'cfg(all(not(target_os = "ios"),not(target_os = "android"),not(target_arch = "wasm32")))'.dependencies.rusqlite]
version = "^0"
features = ["bundled"]
### DEV DEPENDENCIES
[dev-dependencies]
serial_test = "^0"
[target.'cfg(not(target_arch = "wasm32"))'.dev-dependencies]
simplelog = { version = "^0", features=["test"] }
[target.'cfg(target_arch = "wasm32")'.dev-dependencies]
wasm-bindgen-test = "^0"
console_error_panic_hook = "^0"
wee_alloc = "^0"
wasm-logger = "^0"
### BUILD OPTIONS
[build-dependencies]
capnpc = "^0"
[package.metadata.wasm-pack.profile.release]
wasm-opt = ["-O", "--enable-mutable-globals"]

44
veilid-tools/ios_build.sh Executable file
View File

@ -0,0 +1,44 @@
#!/bin/bash
SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
CARGO_MANIFEST_PATH=$(python -c "import os; print(os.path.realpath(\"$SCRIPTDIR/Cargo.toml\"))")
# echo CARGO_MANIFEST_PATH: $CARGO_MANIFEST_PATH
if [ "$CONFIGURATION" == "Debug" ]; then
EXTRA_CARGO_OPTIONS="$@"
else
EXTRA_CARGO_OPTIONS="$@ --release"
fi
ARCHS=${ARCHS:=arm64}
for arch in $ARCHS
do
if [ "$arch" == "arm64" ]; then
echo arm64
CARGO_TARGET=aarch64-apple-ios
#CARGO_TOOLCHAIN=+ios-arm64-1.57.0
CARGO_TOOLCHAIN=
elif [ "$arch" == "x86_64" ]; then
echo x86_64
CARGO_TARGET=x86_64-apple-ios
CARGO_TOOLCHAIN=
else
echo Unsupported ARCH: $arch
continue
fi
CARGO=`which cargo`
CARGO=${CARGO:=~/.cargo/bin/cargo}
CARGO_DIR=$(dirname $CARGO)
# Choose arm64 brew for unit tests by default if we are on M1
if [ -f /opt/homebrew/bin/brew ]; then
HOMEBREW_DIR=/opt/homebrew/bin
elif [ -f /usr/local/bin/brew ]; then
HOMEBREW_DIR=/usr/local/bin
else
HOMEBREW_DIR=$(dirname `which brew`)
fi
env -i PATH=/usr/bin:/bin:$HOMEBREW_DIR:$CARGO_DIR HOME="$HOME" USER="$USER" cargo $CARGO_TOOLCHAIN build $EXTRA_CARGO_OPTIONS --target $CARGO_TARGET --manifest-path $CARGO_MANIFEST_PATH
done

View File

@ -1,4 +1,5 @@
use super::*;
use std::io;
use task::{Context, Poll};

View File

@ -1,4 +1,5 @@
use super::*;
use core::fmt::Debug;
use core::hash::Hash;

View File

@ -1,4 +1,5 @@
use super::*;
cfg_if! {
if #[cfg(target_arch = "wasm32")] {

View File

@ -1,4 +1,5 @@
use crate::xx::*;
use super::*;
use core::pin::Pin;
use core::task::{Context, Poll};
use futures_util::AsyncRead as Read;

View File

@ -1,4 +1,5 @@
use super::*;
use eventual_base::*;
pub struct Eventual {

View File

@ -1,4 +1,5 @@
use super::*;
use eventual_base::*;
pub struct EventualValue<T: Unpin> {

View File

@ -1,4 +1,5 @@
use super::*;
use eventual_base::*;
pub struct EventualValueClone<T: Unpin + Clone> {

View File

@ -0,0 +1,49 @@
use super::*;
cfg_if! {
if #[cfg(target_arch = "wasm32")] {
pub fn interval<F, FUT>(freq_ms: u32, callback: F) -> SendPinBoxFuture<()>
where
F: Fn() -> FUT + Send + Sync + 'static,
FUT: Future<Output = ()> + Send,
{
let e = Eventual::new();
let ie = e.clone();
let jh = spawn(Box::pin(async move {
while timeout(freq_ms, ie.instance_clone(())).await.is_err() {
callback().await;
}
}));
Box::pin(async move {
e.resolve().await;
jh.await;
})
}
} else {
pub fn interval<F, FUT>(freq_ms: u32, callback: F) -> SendPinBoxFuture<()>
where
F: Fn() -> FUT + Send + Sync + 'static,
FUT: Future<Output = ()> + Send,
{
let e = Eventual::new();
let ie = e.clone();
let jh = spawn(async move {
while timeout(freq_ms, ie.instance_clone(())).await.is_err() {
callback().await;
}
});
Box::pin(async move {
e.resolve().await;
jh.await;
})
}
}
}

View File

@ -1,4 +1,5 @@
use super::*;
use core::fmt;
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, PartialOrd, Ord)]
pub struct IpAddrPort {

View File

@ -2,7 +2,8 @@
// This file really shouldn't be necessary, but 'ip' isn't a stable feature
//
use crate::xx::*;
use super::*;
use core::hash::*;
#[derive(Copy, PartialEq, Eq, Clone, Hash, Debug)]

View File

@ -2,7 +2,7 @@
// Pass errors through and log them simultaneously via map_err()
// Also contains common log facilities (net, rpc, rtab, pstore, crypto, etc )
pub use alloc::string::{String, ToString};
use alloc::string::{String, ToString};
pub fn map_to_string<X: ToString>(arg: X) -> String {
arg.to_string()

View File

@ -6,6 +6,7 @@ mod eventual;
mod eventual_base;
mod eventual_value;
mod eventual_value_clone;
mod interval;
mod ip_addr_port;
mod ip_extra;
mod log_thru;
@ -13,11 +14,18 @@ mod must_join_handle;
mod must_join_single_future;
mod mutable_future;
mod network_result;
mod random;
mod single_shot_eventual;
mod sleep;
mod spawn;
mod split_url;
mod tick_task;
mod timeout;
mod timeout_or;
mod timestamp;
mod tools;
#[cfg(target_arch = "wasm32")]
mod wasm;
pub use cfg_if::*;
#[allow(unused_imports)]
@ -33,8 +41,13 @@ pub use split_url::*;
pub use static_assertions::*;
pub use stop_token::*;
pub use thiserror::Error as ThisError;
cfg_if! {
if #[cfg(feature = "tracing")] {
pub use tracing::*;
} else {
pub use log::*;
}
}
pub type PinBox<T> = Pin<Box<T>>;
pub type PinBoxFuture<T> = PinBox<dyn Future<Output = T> + 'static>;
pub type PinBoxFutureLifetime<'a, T> = PinBox<dyn Future<Output = T> + 'a>;
@ -70,8 +83,6 @@ pub use std::vec::Vec;
cfg_if! {
if #[cfg(target_arch = "wasm32")] {
pub use wasm_bindgen::prelude::*;
pub use async_lock::Mutex as AsyncMutex;
pub use async_lock::MutexGuard as AsyncMutexGuard;
pub use async_lock::MutexGuardArc as AsyncMutexGuardArc;
@ -103,13 +114,21 @@ pub use eventual::*;
pub use eventual_base::{EventualCommon, EventualResolvedFuture};
pub use eventual_value::*;
pub use eventual_value_clone::*;
pub use interval::*;
pub use ip_addr_port::*;
pub use ip_extra::*;
pub use must_join_handle::*;
pub use must_join_single_future::*;
pub use mutable_future::*;
pub use network_result::*;
pub use random::*;
pub use single_shot_eventual::*;
pub use sleep::*;
pub use spawn::*;
pub use tick_task::*;
pub use timeout::*;
pub use timeout_or::*;
pub use timestamp::*;
pub use tools::*;
#[cfg(target_arch = "wasm32")]
pub use wasm::*;

View File

@ -1,6 +1,5 @@
use super::*;
use core::future::Future;
use core::pin::Pin;
use core::task::{Context, Poll};
#[derive(Debug)]

View File

@ -1,5 +1,5 @@
use super::*;
use crate::*;
use core::task::Poll;
use futures_util::poll;
@ -157,7 +157,7 @@ where
// Run if we should do that
if run {
self.unlock(Some(intf::spawn_local(future)));
self.unlock(Some(spawn_local(future)));
}
// Return the prior result if we have one
@ -197,7 +197,7 @@ where
}
// Run if we should do that
if run {
self.unlock(Some(intf::spawn(future)));
self.unlock(Some(spawn(future)));
}
// Return the prior result if we have one
Ok((out, run))

View File

@ -1,4 +1,5 @@
use super::*;
use core::fmt::{Debug, Display};
use core::result::Result;
use std::error::Error;

View File

@ -0,0 +1,81 @@
use super::*;
use rand::prelude::*;
#[derive(Clone, Copy, Debug, Default)]
pub struct VeilidRng;
impl CryptoRng for VeilidRng {}
impl RngCore for VeilidRng {
fn next_u32(&mut self) -> u32 {
get_random_u32()
}
fn next_u64(&mut self) -> u64 {
get_random_u64()
}
fn fill_bytes(&mut self, dest: &mut [u8]) {
if let Err(e) = self.try_fill_bytes(dest) {
panic!("Error: {}", e);
}
}
fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), rand::Error> {
random_bytes(dest).map_err(rand::Error::new)
}
}
cfg_if! {
if #[cfg(target_arch = "wasm32")] {
pub fn random_bytes(dest: &mut [u8]) -> EyreResult<()> {
let len = dest.len();
let u32len = len / 4;
let remlen = len % 4;
for n in 0..u32len {
let r = (Math::random() * (u32::max_value() as f64)) as u32;
dest[n * 4 + 0] = (r & 0xFF) as u8;
dest[n * 4 + 1] = ((r >> 8) & 0xFF) as u8;
dest[n * 4 + 2] = ((r >> 16) & 0xFF) as u8;
dest[n * 4 + 3] = ((r >> 24) & 0xFF) as u8;
}
if remlen > 0 {
let r = (Math::random() * (u32::max_value() as f64)) as u32;
for n in 0..remlen {
dest[u32len * 4 + n] = ((r >> (n * 8)) & 0xFF) as u8;
}
}
Ok(())
}
pub fn get_random_u32() -> u32 {
(Math::random() * (u32::max_value() as f64)) as u32
}
pub fn get_random_u64() -> u64 {
let v1: u32 = get_random_u32();
let v2: u32 = get_random_u32();
((v1 as u64) << 32) | ((v2 as u32) as u64)
}
} else {
pub fn random_bytes(dest: &mut [u8]) -> EyreResult<()> {
let mut rng = rand::thread_rng();
rng.try_fill_bytes(dest).wrap_err("failed to fill bytes")
}
pub fn get_random_u32() -> u32 {
let mut rng = rand::thread_rng();
rng.next_u32()
}
pub fn get_random_u64() -> u64 {
let mut rng = rand::thread_rng();
rng.next_u64()
}
}
}

34
veilid-tools/src/sleep.rs Normal file
View File

@ -0,0 +1,34 @@
use super::*;
use std::time::Duration;
cfg_if! {
if #[cfg(target_arch = "wasm32")] {
use async_executors::Bindgen;
pub async fn sleep(millis: u32) {
Bindgen.sleep(Duration::from_millis(millis.into())).await
}
} else {
pub async fn sleep(millis: u32) {
if millis == 0 {
cfg_if! {
if #[cfg(feature="rt-async-std")] {
async_std::task::yield_now().await;
} else if #[cfg(feature="rt-tokio")] {
tokio::task::yield_now().await;
}
}
} else {
cfg_if! {
if #[cfg(feature="rt-async-std")] {
async_std::task::sleep(Duration::from_millis(u64::from(millis))).await;
} else if #[cfg(feature="rt-tokio")] {
tokio::time::sleep(Duration::from_millis(u64::from(millis))).await;
}
}
}
}
}
}

119
veilid-tools/src/spawn.rs Normal file
View File

@ -0,0 +1,119 @@
use super::*;
cfg_if! {
if #[cfg(target_arch = "wasm32")] {
use async_executors::{Bindgen, LocalSpawnHandleExt, SpawnHandleExt};
pub fn spawn<Out>(future: impl Future<Output = Out> + Send + 'static) -> MustJoinHandle<Out>
where
Out: Send + 'static,
{
MustJoinHandle::new(
Bindgen
.spawn_handle(future)
.expect("wasm-bindgen-futures spawn_handle_local should never error out"),
)
}
pub fn spawn_local<Out>(future: impl Future<Output = Out> + 'static) -> MustJoinHandle<Out>
where
Out: 'static,
{
MustJoinHandle::new(
Bindgen
.spawn_handle_local(future)
.expect("wasm-bindgen-futures spawn_handle_local should never error out"),
)
}
pub fn spawn_detached<Out>(future: impl Future<Output = Out> + Send + 'static)
where
Out: Send + 'static,
{
Bindgen
.spawn_handle_local(future)
.expect("wasm-bindgen-futures spawn_handle_local should never error out")
.detach()
}
pub fn spawn_detached_local<Out>(future: impl Future<Output = Out> + 'static)
where
Out: 'static,
{
Bindgen
.spawn_handle_local(future)
.expect("wasm-bindgen-futures spawn_handle_local should never error out")
.detach()
}
} else {
pub fn spawn<Out>(future: impl Future<Output = Out> + Send + 'static) -> MustJoinHandle<Out>
where
Out: Send + 'static,
{
cfg_if! {
if #[cfg(feature="rt-async-std")] {
MustJoinHandle::new(async_std::task::spawn(future))
} else if #[cfg(feature="rt-tokio")] {
MustJoinHandle::new(tokio::task::spawn(future))
}
}
}
pub fn spawn_local<Out>(future: impl Future<Output = Out> + 'static) -> MustJoinHandle<Out>
where
Out: 'static,
{
cfg_if! {
if #[cfg(feature="rt-async-std")] {
MustJoinHandle::new(async_std::task::spawn_local(future))
} else if #[cfg(feature="rt-tokio")] {
MustJoinHandle::new(tokio::task::spawn_local(future))
}
}
}
pub fn spawn_detached<Out>(future: impl Future<Output = Out> + Send + 'static)
where
Out: Send + 'static,
{
cfg_if! {
if #[cfg(feature="rt-async-std")] {
drop(async_std::task::spawn(future));
} else if #[cfg(feature="rt-tokio")] {
drop(tokio::task::spawn(future));
}
}
}
pub fn spawn_detached_local<Out>(future: impl Future<Output = Out> + 'static)
where
Out: 'static,
{
cfg_if! {
if #[cfg(feature="rt-async-std")] {
drop(async_std::task::spawn_local(future));
} else if #[cfg(feature="rt-tokio")] {
drop(tokio::task::spawn_local(future));
}
}
}
pub async fn blocking_wrapper<F, R>(blocking_task: F, err_result: R) -> R
where
F: FnOnce() -> R + Send + 'static,
R: Send + 'static,
{
// run blocking stuff in blocking thread
cfg_if! {
if #[cfg(feature="rt-async-std")] {
async_std::task::spawn_blocking(blocking_task).await
} else if #[cfg(feature="rt-tokio")] {
tokio::task::spawn_blocking(blocking_task).await.unwrap_or(err_result)
} else {
#[compile_error("must use an executor")]
}
}
}
}
}

View File

@ -8,10 +8,7 @@
// Only IP address and DNS hostname host fields are supported
use super::*;
use alloc::borrow::ToOwned;
use alloc::string::String;
use alloc::vec::Vec;
use core::fmt;
use core::str::FromStr;
fn is_alphanum(c: u8) -> bool {

View File

@ -1,5 +1,5 @@
use super::*;
use crate::*;
use core::sync::atomic::{AtomicU64, Ordering};
use once_cell::sync::OnceCell;
@ -80,7 +80,7 @@ impl<E: Send + 'static> TickTask<E> {
}
pub async fn tick(&self) -> Result<(), E> {
let now = intf::get_timestamp();
let now = get_timestamp();
let last_timestamp_us = self.last_timestamp_us.load(Ordering::Acquire);
if last_timestamp_us != 0u64 && now.saturating_sub(last_timestamp_us) < self.tick_period_us

View File

@ -0,0 +1,32 @@
use super::*;
cfg_if! {
if #[cfg(target_arch = "wasm32")] {
pub async fn timeout<F, T>(dur_ms: u32, f: F) -> Result<T, TimeoutError>
where
F: Future<Output = T>,
{
match select(Box::pin(intf::sleep(dur_ms)), Box::pin(f)).await {
Either::Left((_x, _b)) => Err(TimeoutError()),
Either::Right((y, _a)) => Ok(y),
}
}
} else {
pub async fn timeout<F, T>(dur_ms: u32, f: F) -> Result<T, TimeoutError>
where
F: Future<Output = T>,
{
cfg_if! {
if #[cfg(feature="rt-async-std")] {
async_std::future::timeout(Duration::from_millis(dur_ms as u64), f).await.map_err(|e| e.into())
} else if #[cfg(feature="rt-tokio")] {
tokio::time::timeout(Duration::from_millis(dur_ms as u64), f).await.map_err(|e| e.into())
}
}
}
}
}

View File

@ -1,5 +1,5 @@
use super::*;
use cfg_if::*;
use core::fmt::{Debug, Display};
use core::result::Result;
use std::error::Error;

View File

@ -0,0 +1,25 @@
use super::*;
cfg_if! {
if #[cfg(target_arch = "wasm32")] {
use js_sys::Date;
pub fn get_timestamp() -> u64 {
if utils::is_browser() {
return (Date::now() * 1000.0f64) as u64;
} else {
panic!("WASM requires browser environment");
}
}
} else {
use std::time::{SystemTime, UNIX_EPOCH};
pub fn get_timestamp() -> u64 {
match SystemTime::now().duration_since(UNIX_EPOCH) {
Ok(n) => n.as_micros() as u64,
Err(_) => panic!("SystemTime before UNIX_EPOCH!"),
}
}
}
}

View File

@ -1,8 +1,11 @@
use crate::xx::*;
use super::*;
use alloc::string::ToString;
use std::io;
use std::path::Path;
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
#[macro_export]
macro_rules! assert_err {
($ex:expr) => {
@ -30,6 +33,40 @@ macro_rules! bail_io_error_other {
};
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
pub fn system_boxed<'a, Out>(
future: impl Future<Output = Out> + Send + 'a,
) -> SendPinBoxFutureLifetime<'a, Out> {
Box::pin(future)
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
cfg_if! {
if #[cfg(target_arch = "wasm32")] {
// xxx: for now until wasm threads are more stable, and/or we bother with web workers
pub fn get_concurrency() -> u32 {
1
}
} else {
pub fn get_concurrency() -> u32 {
std::thread::available_parallelism()
.map(|x| x.get())
.unwrap_or_else(|e| {
warn!("unable to get concurrency defaulting to single core: {}", e);
1
}) as u32
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////
pub fn split_port(name: &str) -> EyreResult<(String, Option<u16>)> {
if let Some(split) = name.rfind(':') {
let hoststr = &name[0..split];

52
veilid-tools/src/wasm.rs Normal file
View File

@ -0,0 +1,52 @@
use super::*;
use core::sync::atomic::{AtomicI8, Ordering};
use js_sys::{global, Reflect};
#[wasm_bindgen]
extern "C" {
// Use `js_namespace` here to bind `console.log(..)` instead of just
// `log(..)`
#[wasm_bindgen(js_namespace = console, js_name = log)]
pub fn console_log(s: &str);
#[wasm_bindgen]
pub fn alert(s: &str);
}
pub fn is_browser() -> bool {
static CACHE: AtomicI8 = AtomicI8::new(-1);
let cache = CACHE.load(Ordering::Relaxed);
if cache != -1 {
return cache != 0;
}
let res = Reflect::has(&global().as_ref(), &"window".into()).unwrap_or_default();
CACHE.store(res as i8, Ordering::Relaxed);
res
}
// pub fn is_browser_https() -> bool {
// static CACHE: AtomicI8 = AtomicI8::new(-1);
// let cache = CACHE.load(Ordering::Relaxed);
// if cache != -1 {
// return cache != 0;
// }
// let res = js_sys::eval("window.location.protocol === 'https'")
// .map(|res| res.is_truthy())
// .unwrap_or_default();
// CACHE.store(res as i8, Ordering::Relaxed);
// res
// }
#[derive(ThisError, Debug, Clone, Eq, PartialEq)]
#[error("JsValue error")]
pub struct JsValueError(String);
pub fn map_jsvalue_error(x: JsValue) -> JsValueError {
JsValueError(x.as_string().unwrap_or_default())
}

View File

@ -61,7 +61,6 @@ fn init_callbacks() {
case "network.dht.min_peer_refresh_time": return 2000000;
case "network.dht.validate_dial_info_receipt_time": return 5000000;
case "network.upnp": return false;
case "network.natpmp": return false;
case "network.detect_address_changes": return true;
case "network.address_filter": return true;
case "network.restricted_nat_retries": return 3;