This commit is contained in:
John Smith 2022-11-26 21:37:23 -05:00
parent 5df46aecae
commit b1bdf76ae8
80 changed files with 865 additions and 700 deletions

3
.gitmodules vendored
View File

@ -16,9 +16,6 @@
[submodule "external/netlink"]
path = external/netlink
url = ../netlink.git
[submodule "external/no-std-net"]
path = external/no-std-net
url = ../no-std-net.git
[submodule "external/libmdns"]
path = external/libmdns
url = ../libmdns.git

View File

@ -1,6 +1,7 @@
[workspace]
members = [
"veilid-tools",
"veilid-core",
"veilid-server",
"veilid-cli",

View File

@ -81,7 +81,6 @@ core:
min_peer_refresh_time_ms: 2000
validate_dial_info_receipt_time_ms: 2000
upnp: true
natpmp: false
detect_address_changes: true
enable_local_peer_scope: false
restricted_nat_retries: 0

View File

@ -193,7 +193,6 @@ network:
bootstrap: ['bootstrap.dev.veilid.net']
bootstrap_nodes: []
upnp: true
natpmp: false
detect_address_changes: true
enable_local_peer_scope: false
restricted_nat_retries: 0

View File

@ -229,8 +229,8 @@ impl ClientApiConnection {
// Wait until rpc system completion or disconnect was requested
let res = rpc_jh.await;
#[cfg(feature = "rt-tokio")]
let res = res.map_err(|e| format!("join error: {}", e))?;
// #[cfg(feature = "rt-tokio")]
// let res = res.map_err(|e| format!("join error: {}", e))?;
res.map_err(|e| format!("client RPC system error: {}", e))
}

View File

@ -7,7 +7,7 @@ use std::cell::*;
use std::net::SocketAddr;
use std::rc::Rc;
use std::time::{Duration, SystemTime};
use veilid_core::xx::{Eventual, EventualCommon};
use veilid_core::xx::*;
use veilid_core::*;
pub fn convert_loglevel(s: &str) -> Result<VeilidConfigLogLevel, String> {

View File

@ -8,7 +8,6 @@ use flexi_logger::*;
use std::ffi::OsStr;
use std::net::ToSocketAddrs;
use std::path::Path;
use tools::*;
mod client_api_connection;
mod command_processor;

View File

@ -6,12 +6,7 @@ cfg_if! {
pub use async_std::task::JoinHandle;
pub use async_std::net::TcpStream;
pub use async_std::future::TimeoutError;
pub fn spawn_local<F: Future<Output = T> + 'static, T: 'static>(f: F) -> JoinHandle<T> {
async_std::task::spawn_local(f)
}
pub fn spawn_detached_local<F: Future<Output = T> + 'static, T: 'static>(f: F) {
let _ = async_std::task::spawn_local(f);
}
pub use async_std::task::sleep;
pub use async_std::future::timeout;
pub fn block_on<F: Future<Output = T>, T>(f: F) -> T {
@ -21,12 +16,7 @@ cfg_if! {
pub use tokio::task::JoinHandle;
pub use tokio::net::TcpStream;
pub use tokio::time::error::Elapsed as TimeoutError;
pub fn spawn_local<F: Future<Output = T> + 'static, T: 'static>(f: F) -> JoinHandle<T> {
tokio::task::spawn_local(f)
}
pub fn spawn_detached_local<F: Future<Output = T> + 'static, T: 'static>(f: F) {
let _ = tokio::task::spawn_local(f);
}
pub use tokio::time::sleep;
pub use tokio::time::timeout;
pub fn block_on<F: Future<Output = T>, T>(f: F) -> T {

View File

@ -11,14 +11,15 @@ crate-type = ["cdylib", "staticlib", "rlib"]
[features]
default = []
rt-async-std = [ "async-std", "async-std-resolver", "async_executors/async_std", "rtnetlink?/smol_socket" ]
rt-tokio = [ "tokio", "tokio-util", "tokio-stream", "trust-dns-resolver/tokio-runtime", "async_executors/tokio_tp", "async_executors/tokio_io", "async_executors/tokio_timer", "rtnetlink?/tokio_socket" ]
rt-async-std = [ "async-std", "async-std-resolver", "async_executors/async_std", "rtnetlink?/smol_socket", "veilid-tools/rt-async-std" ]
rt-tokio = [ "tokio", "tokio-util", "tokio-stream", "trust-dns-resolver/tokio-runtime", "async_executors/tokio_tp", "async_executors/tokio_io", "async_executors/tokio_timer", "rtnetlink?/tokio_socket", "veilid-tools/rt-tokio" ]
android_tests = []
ios_tests = [ "simplelog" ]
tracking = []
[dependencies]
veilid_tools = { path = "../veilid-tools", features = "tracing" }
tracing = { version = "^0", features = ["log", "attributes"] }
tracing-subscriber = "^0"
tracing-error = "^0"

View File

@ -254,7 +254,7 @@ impl AttachmentManager {
#[instrument(level = "debug", skip(self))]
async fn attachment_maintainer(self) {
debug!("attachment starting");
self.inner.lock().attach_timestamp = Some(intf::get_timestamp());
self.inner.lock().attach_timestamp = Some(get_timestamp());
let netman = self.network_manager();
let mut restart;
@ -286,7 +286,7 @@ impl AttachmentManager {
self.update_attachment().await;
// sleep should be at the end in case maintain_peers changes state
intf::sleep(1000).await;
sleep(1000).await;
}
debug!("stopped maintaining peers");
@ -299,7 +299,7 @@ impl AttachmentManager {
debug!("completely restarting attachment");
// chill out for a second first, give network stack time to settle out
intf::sleep(1000).await;
sleep(1000).await;
}
trace!("stopping attachment");
@ -348,7 +348,7 @@ impl AttachmentManager {
return;
}
inner.maintain_peers = true;
inner.attachment_maintainer_jh = Some(intf::spawn(self.clone().attachment_maintainer()));
inner.attachment_maintainer_jh = Some(spawn(self.clone().attachment_maintainer()));
}
#[instrument(level = "trace", skip(self))]

View File

@ -1,4 +1,3 @@
use crate::veilid_rng::*;
use crate::xx::*;
use crate::*;

View File

@ -137,7 +137,7 @@ impl Crypto {
// Schedule flushing
let this = self.clone();
let flush_future = intf::interval(60000, move || {
let flush_future = interval(60000, move || {
let this = this.clone();
async move {
if let Err(e) = this.flush().await {
@ -229,13 +229,13 @@ impl Crypto {
pub fn get_random_nonce() -> Nonce {
let mut nonce = [0u8; 24];
intf::random_bytes(&mut nonce).unwrap();
random_bytes(&mut nonce).unwrap();
nonce
}
pub fn get_random_secret() -> SharedSecret {
let mut s = [0u8; 32];
intf::random_bytes(&mut s).unwrap();
random_bytes(&mut s).unwrap();
s
}

View File

@ -1,201 +1,10 @@
#![allow(dead_code)]
use crate::xx::*;
use rand::prelude::*;
use std::time::{Duration, SystemTime, UNIX_EPOCH};
pub fn get_timestamp() -> u64 {
match SystemTime::now().duration_since(UNIX_EPOCH) {
Ok(n) => n.as_micros() as u64,
Err(_) => panic!("SystemTime before UNIX_EPOCH!"),
}
}
// pub fn get_timestamp_string() -> String {
// let dt = chrono::Utc::now();
// dt.time().format("%H:%M:%S.3f").to_string()
// }
pub fn random_bytes(dest: &mut [u8]) -> EyreResult<()> {
let mut rng = rand::thread_rng();
rng.try_fill_bytes(dest).wrap_err("failed to fill bytes")
}
pub fn get_random_u32() -> u32 {
let mut rng = rand::thread_rng();
rng.next_u32()
}
pub fn get_random_u64() -> u64 {
let mut rng = rand::thread_rng();
rng.next_u64()
}
pub async fn sleep(millis: u32) {
if millis == 0 {
cfg_if! {
if #[cfg(feature="rt-async-std")] {
async_std::task::yield_now().await;
} else if #[cfg(feature="rt-tokio")] {
tokio::task::yield_now().await;
}
}
} else {
cfg_if! {
if #[cfg(feature="rt-async-std")] {
async_std::task::sleep(Duration::from_millis(u64::from(millis))).await;
} else if #[cfg(feature="rt-tokio")] {
tokio::time::sleep(Duration::from_millis(u64::from(millis))).await;
}
}
}
}
pub fn system_boxed<'a, Out>(
future: impl Future<Output = Out> + Send + 'a,
) -> SendPinBoxFutureLifetime<'a, Out> {
Box::pin(future)
}
pub fn spawn<Out>(future: impl Future<Output = Out> + Send + 'static) -> MustJoinHandle<Out>
where
Out: Send + 'static,
{
cfg_if! {
if #[cfg(feature="rt-async-std")] {
MustJoinHandle::new(async_std::task::spawn(future))
} else if #[cfg(feature="rt-tokio")] {
MustJoinHandle::new(tokio::task::spawn(future))
}
}
}
pub fn spawn_local<Out>(future: impl Future<Output = Out> + 'static) -> MustJoinHandle<Out>
where
Out: 'static,
{
cfg_if! {
if #[cfg(feature="rt-async-std")] {
MustJoinHandle::new(async_std::task::spawn_local(future))
} else if #[cfg(feature="rt-tokio")] {
MustJoinHandle::new(tokio::task::spawn_local(future))
}
}
}
// pub fn spawn_with_local_set<Out>(
// future: impl Future<Output = Out> + Send + 'static,
// ) -> MustJoinHandle<Out>
// where
// Out: Send + 'static,
// {
// cfg_if! {
// if #[cfg(feature="rt-async-std")] {
// spawn(future)
// } else if #[cfg(feature="rt-tokio")] {
// MustJoinHandle::new(tokio::task::spawn_blocking(move || {
// let rt = tokio::runtime::Handle::current();
// rt.block_on(async {
// let local = tokio::task::LocalSet::new();
// local.run_until(future).await
// })
// }))
// }
// }
// }
pub fn spawn_detached<Out>(future: impl Future<Output = Out> + Send + 'static)
where
Out: Send + 'static,
{
cfg_if! {
if #[cfg(feature="rt-async-std")] {
drop(async_std::task::spawn(future));
} else if #[cfg(feature="rt-tokio")] {
drop(tokio::task::spawn(future));
}
}
}
pub fn interval<F, FUT>(freq_ms: u32, callback: F) -> SendPinBoxFuture<()>
where
F: Fn() -> FUT + Send + Sync + 'static,
FUT: Future<Output = ()> + Send,
{
let e = Eventual::new();
let ie = e.clone();
let jh = spawn(async move {
while timeout(freq_ms, ie.instance_clone(())).await.is_err() {
callback().await;
}
});
Box::pin(async move {
e.resolve().await;
jh.await;
})
}
pub async fn timeout<F, T>(dur_ms: u32, f: F) -> Result<T, TimeoutError>
where
F: Future<Output = T>,
{
cfg_if! {
if #[cfg(feature="rt-async-std")] {
async_std::future::timeout(Duration::from_millis(dur_ms as u64), f).await.map_err(|e| e.into())
} else if #[cfg(feature="rt-tokio")] {
tokio::time::timeout(Duration::from_millis(dur_ms as u64), f).await.map_err(|e| e.into())
}
}
}
pub async fn blocking_wrapper<F, R>(blocking_task: F, err_result: R) -> R
where
F: FnOnce() -> R + Send + 'static,
R: Send + 'static,
{
// run blocking stuff in blocking thread
cfg_if! {
if #[cfg(feature="rt-async-std")] {
async_std::task::spawn_blocking(blocking_task).await
} else if #[cfg(feature="rt-tokio")] {
tokio::task::spawn_blocking(blocking_task).await.unwrap_or(err_result)
} else {
#[compile_error("must use an executor")]
}
}
}
pub fn get_concurrency() -> u32 {
std::thread::available_parallelism()
.map(|x| x.get())
.unwrap_or_else(|e| {
warn!("unable to get concurrency defaulting to single core: {}", e);
1
}) as u32
}
pub async fn get_outbound_relay_peer() -> Option<crate::veilid_api::PeerInfo> {
panic!("Native Veilid should never require an outbound relay");
}
/*
pub fn async_callback<F, OF, EF, T, E>(fut: F, ok_fn: OF, err_fn: EF)
where
F: Future<Output = Result<T, E>> + Send + 'static,
OF: FnOnce(T) + Send + 'static,
EF: FnOnce(E) + Send + 'static,
{
spawn(Box::pin(async move {
match fut.await {
Ok(v) => ok_fn(v),
Err(e) => err_fn(e),
};
}));
}
*/
/////////////////////////////////////////////////////////////////////////////////
// Resolver
//

View File

@ -322,7 +322,7 @@ impl PlatformSupportNetlink {
.wrap_err("failed to create rtnetlink socket")?;
// Spawn a connection handler
let connection_jh = intf::spawn(connection);
let connection_jh = spawn(connection);
// Save the connection
self.connection_jh = Some(connection_jh);

View File

@ -1,159 +1,8 @@
use super::utils;
use crate::xx::*;
use crate::*;
use async_executors::{Bindgen, LocalSpawnHandleExt, SpawnHandleExt, Timer};
use futures_util::future::{select, Either};
use js_sys::*;
//use wasm_bindgen_futures::*;
//use web_sys::*;
#[wasm_bindgen]
extern "C" {
#[wasm_bindgen(catch, structural, js_namespace = global, js_name = setTimeout)]
fn nodejs_global_set_timeout_with_callback_and_timeout_and_arguments_0(
handler: &::js_sys::Function,
timeout: u32,
) -> Result<JsValue, JsValue>;
}
pub fn get_timestamp() -> u64 {
if utils::is_browser() {
return (Date::now() * 1000.0f64) as u64;
} else {
panic!("WASM requires browser environment");
}
}
// pub fn get_timestamp_string() -> String {
// let date = Date::new_0();
// let hours = Date::get_utc_hours(&date);
// let minutes = Date::get_utc_minutes(&date);
// let seconds = Date::get_utc_seconds(&date);
// let milliseconds = Date::get_utc_milliseconds(&date);
// format!(
// "{:02}:{:02}:{:02}.{}",
// hours, minutes, seconds, milliseconds
// )
// }
pub fn random_bytes(dest: &mut [u8]) -> EyreResult<()> {
let len = dest.len();
let u32len = len / 4;
let remlen = len % 4;
for n in 0..u32len {
let r = (Math::random() * (u32::max_value() as f64)) as u32;
dest[n * 4 + 0] = (r & 0xFF) as u8;
dest[n * 4 + 1] = ((r >> 8) & 0xFF) as u8;
dest[n * 4 + 2] = ((r >> 16) & 0xFF) as u8;
dest[n * 4 + 3] = ((r >> 24) & 0xFF) as u8;
}
if remlen > 0 {
let r = (Math::random() * (u32::max_value() as f64)) as u32;
for n in 0..remlen {
dest[u32len * 4 + n] = ((r >> (n * 8)) & 0xFF) as u8;
}
}
Ok(())
}
pub fn get_random_u32() -> u32 {
(Math::random() * (u32::max_value() as f64)) as u32
}
pub fn get_random_u64() -> u64 {
let v1: u32 = get_random_u32();
let v2: u32 = get_random_u32();
((v1 as u64) << 32) | ((v2 as u32) as u64)
}
pub async fn sleep(millis: u32) {
Bindgen.sleep(Duration::from_millis(millis.into())).await
}
pub fn system_boxed<'a, Out>(
future: impl Future<Output = Out> + Send + 'a,
) -> SendPinBoxFutureLifetime<'a, Out> {
Box::pin(future)
}
pub fn spawn<Out>(future: impl Future<Output = Out> + Send + 'static) -> MustJoinHandle<Out>
where
Out: Send + 'static,
{
MustJoinHandle::new(
Bindgen
.spawn_handle(future)
.expect("wasm-bindgen-futures spawn should never error out"),
)
}
pub fn spawn_local<Out>(future: impl Future<Output = Out> + 'static) -> MustJoinHandle<Out>
where
Out: 'static,
{
MustJoinHandle::new(
Bindgen
.spawn_handle_local(future)
.expect("wasm-bindgen-futures spawn_local should never error out"),
)
}
// pub fn spawn_with_local_set<Out>(
// future: impl Future<Output = Out> + Send + 'static,
// ) -> MustJoinHandle<Out>
// where
// Out: Send + 'static,
// {
// spawn(future)
// }
pub fn spawn_detached<Out>(future: impl Future<Output = Out> + Send + 'static)
where
Out: Send + 'static,
{
Bindgen
.spawn_handle_local(future)
.expect("wasm-bindgen-futures spawn_local should never error out")
.detach()
}
pub fn interval<F, FUT>(freq_ms: u32, callback: F) -> SendPinBoxFuture<()>
where
F: Fn() -> FUT + Send + Sync + 'static,
FUT: Future<Output = ()> + Send,
{
let e = Eventual::new();
let ie = e.clone();
let jh = spawn(Box::pin(async move {
while timeout(freq_ms, ie.instance_clone(())).await.is_err() {
callback().await;
}
}));
Box::pin(async move {
e.resolve().await;
jh.await;
})
}
pub async fn timeout<F, T>(dur_ms: u32, f: F) -> Result<T, TimeoutError>
where
F: Future<Output = T>,
{
match select(Box::pin(intf::sleep(dur_ms)), Box::pin(f)).await {
Either::Left((_x, _b)) => Err(TimeoutError()),
Either::Right((y, _a)) => Ok(y),
}
}
// xxx: for now until wasm threads are more stable, and/or we bother with web workers
pub fn get_concurrency() -> u32 {
1
}
pub async fn get_outbound_relay_peer() -> Option<crate::veilid_api::PeerInfo> {
// unimplemented!

View File

@ -1,54 +1 @@
#![cfg(target_arch = "wasm32")]
use crate::xx::*;
use core::sync::atomic::{AtomicI8, Ordering};
use js_sys::{global, Reflect};
#[wasm_bindgen]
extern "C" {
// Use `js_namespace` here to bind `console.log(..)` instead of just
// `log(..)`
#[wasm_bindgen(js_namespace = console, js_name = log)]
pub fn console_log(s: &str);
#[wasm_bindgen]
pub fn alert(s: &str);
}
pub fn is_browser() -> bool {
static CACHE: AtomicI8 = AtomicI8::new(-1);
let cache = CACHE.load(Ordering::Relaxed);
if cache != -1 {
return cache != 0;
}
let res = Reflect::has(&global().as_ref(), &"window".into()).unwrap_or_default();
CACHE.store(res as i8, Ordering::Relaxed);
res
}
// pub fn is_browser_https() -> bool {
// static CACHE: AtomicI8 = AtomicI8::new(-1);
// let cache = CACHE.load(Ordering::Relaxed);
// if cache != -1 {
// return cache != 0;
// }
// let res = js_sys::eval("window.location.protocol === 'https'")
// .map(|res| res.is_truthy())
// .unwrap_or_default();
// CACHE.store(res as i8, Ordering::Relaxed);
// res
// }
#[derive(ThisError, Debug, Clone, Eq, PartialEq)]
#[error("JsValue error")]
pub struct JsValueError(String);
pub fn map_jsvalue_error(x: JsValue) -> JsValueError {
JsValueError(x.as_string().unwrap_or_default())
}

View File

@ -32,10 +32,6 @@ mod veilid_api;
#[macro_use]
mod veilid_config;
mod veilid_layer_filter;
mod veilid_rng;
#[macro_use]
pub mod xx;
pub use self::api_tracing_layer::ApiTracingLayer;
pub use self::attachment_manager::AttachmentState;
@ -43,6 +39,7 @@ pub use self::core_context::{api_startup, api_startup_json, UpdateCallback};
pub use self::veilid_api::*;
pub use self::veilid_config::*;
pub use self::veilid_layer_filter::*;
pub use veilid_tools as tools;
pub mod veilid_capnp {
include!(concat!(env!("OUT_DIR"), "/proto/veilid_capnp.rs"));

View File

@ -78,7 +78,7 @@ impl ConnectionLimits {
pub fn add(&mut self, addr: IpAddr) -> Result<(), AddressFilterError> {
let ipblock = ip_to_ipblock(self.max_connections_per_ip6_prefix_size, addr);
let ts = intf::get_timestamp();
let ts = get_timestamp();
self.purge_old_timestamps(ts);
@ -134,7 +134,7 @@ impl ConnectionLimits {
pub fn remove(&mut self, addr: IpAddr) -> Result<(), AddressNotInTableError> {
let ipblock = ip_to_ipblock(self.max_connections_per_ip6_prefix_size, addr);
let ts = intf::get_timestamp();
let ts = get_timestamp();
self.purge_old_timestamps(ts);
match ipblock {

View File

@ -319,7 +319,7 @@ impl ConnectionManager {
};
log_net!(debug "get_or_create_connection retries left: {}", retry_count);
retry_count -= 1;
intf::sleep(500).await;
sleep(500).await;
});
// Add to the connection table

View File

@ -1,5 +1,5 @@
use crate::*;
use crate::xx::*;
use crate::*;
#[cfg(not(target_arch = "wasm32"))]
mod native;
@ -403,11 +403,11 @@ impl NetworkManager {
let mut inner = self.inner.lock();
match inner.client_whitelist.entry(client) {
hashlink::lru_cache::Entry::Occupied(mut entry) => {
entry.get_mut().last_seen_ts = intf::get_timestamp()
entry.get_mut().last_seen_ts = get_timestamp()
}
hashlink::lru_cache::Entry::Vacant(entry) => {
entry.insert(ClientWhitelistEntry {
last_seen_ts: intf::get_timestamp(),
last_seen_ts: get_timestamp(),
});
}
}
@ -419,7 +419,7 @@ impl NetworkManager {
match inner.client_whitelist.entry(client) {
hashlink::lru_cache::Entry::Occupied(mut entry) => {
entry.get_mut().last_seen_ts = intf::get_timestamp();
entry.get_mut().last_seen_ts = get_timestamp();
true
}
hashlink::lru_cache::Entry::Vacant(_) => false,
@ -429,7 +429,7 @@ impl NetworkManager {
pub fn purge_client_whitelist(&self) {
let timeout_ms = self.with_config(|c| c.network.client_whitelist_timeout_ms);
let mut inner = self.inner.lock();
let cutoff_timestamp = intf::get_timestamp() - ((timeout_ms as u64) * 1000u64);
let cutoff_timestamp = get_timestamp() - ((timeout_ms as u64) * 1000u64);
// Remove clients from the whitelist that haven't been since since our whitelist timeout
while inner
.client_whitelist
@ -516,7 +516,7 @@ impl NetworkManager {
.wrap_err("failed to generate signed receipt")?;
// Record the receipt for later
let exp_ts = intf::get_timestamp() + expiration_us;
let exp_ts = get_timestamp() + expiration_us;
receipt_manager.record_receipt(receipt, exp_ts, expected_returns, callback);
Ok(out)
@ -540,7 +540,7 @@ impl NetworkManager {
.wrap_err("failed to generate signed receipt")?;
// Record the receipt for later
let exp_ts = intf::get_timestamp() + expiration_us;
let exp_ts = get_timestamp() + expiration_us;
let eventual = SingleShotEventual::new(Some(ReceiptEvent::Cancelled));
let instance = eventual.instance();
receipt_manager.record_single_shot_receipt(receipt, exp_ts, eventual);
@ -707,7 +707,7 @@ impl NetworkManager {
// XXX: do we need a delay here? or another hole punch packet?
// Set the hole punch as our 'last connection' to ensure we return the receipt over the direct hole punch
peer_nr.set_last_connection(connection_descriptor, intf::get_timestamp());
peer_nr.set_last_connection(connection_descriptor, get_timestamp());
// Return the receipt using the same dial info send the receipt to it
rpc.rpc_call_return_receipt(Destination::direct(peer_nr), receipt)
@ -731,7 +731,7 @@ impl NetworkManager {
let node_id_secret = routing_table.node_id_secret();
// Get timestamp, nonce
let ts = intf::get_timestamp();
let ts = get_timestamp();
let nonce = Crypto::get_random_nonce();
// Encode envelope
@ -1116,8 +1116,7 @@ impl NetworkManager {
// );
// Update timestamp for this last connection since we just sent to it
node_ref
.set_last_connection(connection_descriptor, intf::get_timestamp());
node_ref.set_last_connection(connection_descriptor, get_timestamp());
return Ok(NetworkResult::value(SendDataKind::Existing(
connection_descriptor,
@ -1149,7 +1148,7 @@ impl NetworkManager {
this.net().send_data_to_dial_info(dial_info, data).await?
);
// If we connected to this node directly, save off the last connection so we can use it again
node_ref.set_last_connection(connection_descriptor, intf::get_timestamp());
node_ref.set_last_connection(connection_descriptor, get_timestamp());
Ok(NetworkResult::value(SendDataKind::Direct(
connection_descriptor,
@ -1324,7 +1323,7 @@ impl NetworkManager {
});
// Validate timestamp isn't too old
let ts = intf::get_timestamp();
let ts = get_timestamp();
let ets = envelope.get_timestamp();
if let Some(tsbehind) = tsbehind {
if tsbehind > 0 && (ts > ets && ts - ets > tsbehind) {
@ -1631,7 +1630,7 @@ impl NetworkManager {
// public dialinfo
let inconsistent = if inconsistencies.len() >= PUBLIC_ADDRESS_CHANGE_DETECTION_COUNT
{
let exp_ts = intf::get_timestamp() + PUBLIC_ADDRESS_INCONSISTENCY_TIMEOUT_US;
let exp_ts = get_timestamp() + PUBLIC_ADDRESS_INCONSISTENCY_TIMEOUT_US;
for i in &inconsistencies {
pait.insert(*i, exp_ts);
}
@ -1644,8 +1643,8 @@ impl NetworkManager {
.public_address_inconsistencies_table
.entry(key)
.or_insert_with(|| HashMap::new());
let exp_ts = intf::get_timestamp()
+ PUBLIC_ADDRESS_INCONSISTENCY_PUNISHMENT_TIMEOUT_US;
let exp_ts =
get_timestamp() + PUBLIC_ADDRESS_INCONSISTENCY_PUNISHMENT_TIMEOUT_US;
for i in inconsistencies {
pait.insert(i, exp_ts);
}
@ -1733,7 +1732,7 @@ impl NetworkManager {
}
// Get the list of refs to all nodes to update
let cur_ts = intf::get_timestamp();
let cur_ts = get_timestamp();
let node_refs =
this.routing_table()
.get_nodes_needing_updates(routing_domain, cur_ts, all);

View File

@ -176,7 +176,7 @@ impl IGDManager {
mapped_port: u16,
) -> Option<()> {
let this = self.clone();
intf::blocking_wrapper(move || {
blocking_wrapper(move || {
let mut inner = this.inner.lock();
// If we already have this port mapped, just return the existing portmap
@ -215,7 +215,7 @@ impl IGDManager {
expected_external_address: Option<IpAddr>,
) -> Option<SocketAddr> {
let this = self.clone();
intf::blocking_wrapper(move || {
blocking_wrapper(move || {
let mut inner = this.inner.lock();
// If we already have this port mapped, just return the existing portmap
@ -275,7 +275,7 @@ impl IGDManager {
};
// Add to mapping list to keep alive
let timestamp = intf::get_timestamp();
let timestamp = get_timestamp();
inner.port_maps.insert(PortMapKey {
llpt,
at,
@ -301,7 +301,7 @@ impl IGDManager {
let mut renews: Vec<(PortMapKey, PortMapValue)> = Vec::new();
{
let inner = self.inner.lock();
let now = intf::get_timestamp();
let now = get_timestamp();
for (k, v) in &inner.port_maps {
let mapping_lifetime = now.saturating_sub(v.timestamp);
@ -323,7 +323,7 @@ impl IGDManager {
}
let this = self.clone();
intf::blocking_wrapper(move || {
blocking_wrapper(move || {
let mut inner = this.inner.lock();
// Process full renewals
@ -356,7 +356,7 @@ impl IGDManager {
inner.port_maps.insert(k, PortMapValue {
ext_ip: v.ext_ip,
mapped_port,
timestamp: intf::get_timestamp(),
timestamp: get_timestamp(),
renewal_lifetime: (UPNP_MAPPING_LIFETIME_MS / 2) as u64 * 1000u64,
renewal_attempts: 0,
});
@ -397,7 +397,7 @@ impl IGDManager {
inner.port_maps.insert(k, PortMapValue {
ext_ip: v.ext_ip,
mapped_port: v.mapped_port,
timestamp: intf::get_timestamp(),
timestamp: get_timestamp(),
renewal_lifetime: (UPNP_MAPPING_LIFETIME_MS / 2) as u64 * 1000u64,
renewal_attempts: 0,
});

View File

@ -1,5 +1,4 @@
mod igd_manager;
mod natpmp_manager;
mod network_class_discovery;
mod network_tcp;
mod network_udp;
@ -94,11 +93,9 @@ struct NetworkUnlockedInner {
update_network_class_task: TickTask<EyreReport>,
network_interfaces_task: TickTask<EyreReport>,
upnp_task: TickTask<EyreReport>,
natpmp_task: TickTask<EyreReport>,
// Managers
igd_manager: igd_manager::IGDManager,
natpmp_manager: natpmp_manager::NATPMPManager,
}
#[derive(Clone)]
@ -150,9 +147,7 @@ impl Network {
update_network_class_task: TickTask::new(1),
network_interfaces_task: TickTask::new(5),
upnp_task: TickTask::new(1),
natpmp_task: TickTask::new(1),
igd_manager: igd_manager::IGDManager::new(config.clone()),
natpmp_manager: natpmp_manager::NATPMPManager::new(config),
}
}
@ -196,13 +191,6 @@ impl Network {
.upnp_task
.set_routine(move |s, l, t| Box::pin(this2.clone().upnp_task_routine(s, l, t)));
}
// Set natpmp tick task
{
let this2 = this.clone();
this.unlocked_inner
.natpmp_task
.set_routine(move |s, l, t| Box::pin(this2.clone().natpmp_task_routine(s, l, t)));
}
this
}
@ -904,31 +892,11 @@ impl Network {
Ok(())
}
#[instrument(level = "trace", skip(self), err)]
pub async fn natpmp_task_routine(
self,
stop_token: StopToken,
_l: u64,
_t: u64,
) -> EyreResult<()> {
if !self.unlocked_inner.natpmp_manager.tick().await? {
info!("natpmp failed, restarting local network");
let mut inner = self.inner.lock();
inner.network_needs_restart = true;
}
Ok(())
}
pub async fn tick(&self) -> EyreResult<()> {
let (detect_address_changes, upnp, natpmp) = {
let (detect_address_changes, upnp) = {
let config = self.network_manager().config();
let c = config.get();
(
c.network.detect_address_changes,
c.network.upnp,
c.network.natpmp,
)
(c.network.detect_address_changes, c.network.upnp)
};
// If we need to figure out our network class, tick the task for it
@ -962,11 +930,6 @@ impl Network {
self.unlocked_inner.upnp_task.tick().await?;
}
// If we need to tick natpmp, do it
if natpmp && !self.needs_restart() {
self.unlocked_inner.natpmp_task.tick().await?;
}
Ok(())
}
}

View File

@ -1,18 +0,0 @@
use super::*;
pub struct NATPMPManager {
config: VeilidConfig,
}
impl NATPMPManager {
//
pub fn new(config: VeilidConfig) -> Self {
Self { config }
}
pub async fn tick(&self) -> EyreResult<bool> {
// xxx
Ok(true)
}
}

View File

@ -275,7 +275,7 @@ impl DiscoveryContext {
LowLevelProtocolType::UDP => "udp",
LowLevelProtocolType::TCP => "tcp",
});
intf::sleep(PORT_MAP_VALIDATE_DELAY_MS).await
sleep(PORT_MAP_VALIDATE_DELAY_MS).await
} else {
break;
}
@ -304,9 +304,9 @@ impl DiscoveryContext {
#[instrument(level = "trace", skip(self), ret)]
async fn try_port_mapping(&self) -> Option<DialInfo> {
let (enable_upnp, _enable_natpmp) = {
let enable_upnp = {
let c = self.net.config.get();
(c.network.upnp, c.network.natpmp)
c.network.upnp
};
if enable_upnp {

View File

@ -58,7 +58,7 @@ impl Network {
// Don't waste more than N seconds getting it though, in case someone
// is trying to DoS us with a bunch of connections or something
// read a chunk of the stream
intf::timeout(
timeout(
tls_connection_initial_timeout_ms,
ps.peek_exact(&mut first_packet),
)

View File

@ -10,7 +10,7 @@ impl Network {
c.network.protocol.udp.socket_pool_size
};
if task_count == 0 {
task_count = intf::get_concurrency() / 2;
task_count = get_concurrency() / 2;
if task_count == 0 {
task_count = 1;
}

View File

@ -196,7 +196,7 @@ pub async fn nonblocking_connect(
let async_stream = Async::new(std::net::TcpStream::from(socket))?;
// The stream becomes writable when connected
timeout_or_try!(intf::timeout(timeout_ms, async_stream.writable())
timeout_or_try!(timeout(timeout_ms, async_stream.writable())
.await
.into_timeout_or()
.into_result()?);

View File

@ -99,13 +99,13 @@ pub struct NetworkConnection {
impl NetworkConnection {
pub(super) fn dummy(id: NetworkConnectionId, descriptor: ConnectionDescriptor) -> Self {
// Create handle for sending (dummy is immediately disconnected)
let (sender, _receiver) = flume::bounded(intf::get_concurrency() as usize);
let (sender, _receiver) = flume::bounded(get_concurrency() as usize);
Self {
connection_id: id,
descriptor,
processor: None,
established_time: intf::get_timestamp(),
established_time: get_timestamp(),
stats: Arc::new(Mutex::new(NetworkConnectionStats {
last_message_sent_time: None,
last_message_recv_time: None,
@ -125,7 +125,7 @@ impl NetworkConnection {
let descriptor = protocol_connection.descriptor();
// Create handle for sending
let (sender, receiver) = flume::bounded(intf::get_concurrency() as usize);
let (sender, receiver) = flume::bounded(get_concurrency() as usize);
// Create stats
let stats = Arc::new(Mutex::new(NetworkConnectionStats {
@ -137,7 +137,7 @@ impl NetworkConnection {
let local_stop_token = stop_source.token();
// Spawn connection processor and pass in protocol connection
let processor = intf::spawn(Self::process_connection(
let processor = spawn(Self::process_connection(
connection_manager,
local_stop_token,
manager_stop_token,
@ -153,7 +153,7 @@ impl NetworkConnection {
connection_id,
descriptor,
processor: Some(processor),
established_time: intf::get_timestamp(),
established_time: get_timestamp(),
stats,
sender,
stop_source: Some(stop_source),
@ -185,7 +185,7 @@ impl NetworkConnection {
stats: Arc<Mutex<NetworkConnectionStats>>,
message: Vec<u8>,
) -> io::Result<NetworkResult<()>> {
let ts = intf::get_timestamp();
let ts = get_timestamp();
let out = network_result_try!(protocol_connection.send(message).await?);
let mut stats = stats.lock();
@ -199,7 +199,7 @@ impl NetworkConnection {
protocol_connection: &ProtocolNetworkConnection,
stats: Arc<Mutex<NetworkConnectionStats>>,
) -> io::Result<NetworkResult<Vec<u8>>> {
let ts = intf::get_timestamp();
let ts = get_timestamp();
let out = network_result_try!(protocol_connection.recv().await?);
let mut stats = stats.lock();
@ -246,7 +246,7 @@ impl NetworkConnection {
// Push mutable timer so we can reset it
// Normally we would use an io::timeout here, but WASM won't support that, so we use a mutable sleep future
let new_timer = || {
intf::sleep(connection_manager.connection_inactivity_timeout_ms()).then(|_| async {
sleep(connection_manager.connection_inactivity_timeout_ms()).then(|_| async {
// timeout
log_net!("== Connection timeout on {:?}", descriptor.green());
RecvLoopAction::Timeout

View File

@ -281,7 +281,7 @@ impl ReceiptManager {
};
(inner.next_oldest_ts, inner.timeout_task.clone(), stop_token)
};
let now = intf::get_timestamp();
let now = get_timestamp();
// If we have at least one timestamp to expire, lets do it
if let Some(next_oldest_ts) = next_oldest_ts {
if now >= next_oldest_ts {

View File

@ -120,7 +120,7 @@ impl Bucket {
.iter()
.map(|(k, v)| (k.clone(), v.clone()))
.collect();
let cur_ts = intf::get_timestamp();
let cur_ts = get_timestamp();
sorted_entries.sort_by(|a, b| -> core::cmp::Ordering {
if a.0 == b.0 {
return core::cmp::Ordering::Equal;