mirror of
https://gitlab.com/veilid/veilid.git
synced 2025-02-13 13:11:20 -05:00
[skip ci] more refactor
This commit is contained in:
parent
62eaedcaf8
commit
24f5755d76
@ -14,10 +14,13 @@ pub type UpdateCallback = Arc<dyn Fn(VeilidUpdate) + Send + Sync>;
|
||||
type InitKey = (String, String);
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct VeilidCoreContext {
|
||||
registry: VeilidComponentRegistry,
|
||||
}
|
||||
|
||||
impl_veilid_component_registry_accessor!(VeilidCoreContext);
|
||||
|
||||
impl VeilidCoreContext {
|
||||
#[instrument(level = "trace", target = "core_context", err, skip_all)]
|
||||
async fn new_with_config_callback(
|
||||
@ -127,10 +130,6 @@ impl VeilidCoreContext {
|
||||
// send final shutdown update
|
||||
update_callback(VeilidUpdate::Shutdown);
|
||||
}
|
||||
|
||||
pub fn registry(&self) -> VeilidComponentRegistry {
|
||||
self.registry.clone()
|
||||
}
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -192,7 +192,7 @@ impl Crypto {
|
||||
// Schedule flushing
|
||||
let registry = self.registry();
|
||||
let flush_future = interval("crypto flush", 60000, move || {
|
||||
let crypto = registry.lookup::<Crypto>().unwrap();
|
||||
let crypto = registry.crypto();
|
||||
async move {
|
||||
if let Err(e) = crypto.flush().await {
|
||||
warn!("flush failed: {}", e);
|
||||
|
@ -57,7 +57,6 @@ struct ConnectionManagerInner {
|
||||
async_processor_jh: Option<MustJoinHandle<()>>,
|
||||
stop_source: Option<StopSource>,
|
||||
protected_addresses: HashMap<SocketAddress, ProtectedAddress>,
|
||||
reconnection_processor: DeferredStreamProcessor,
|
||||
}
|
||||
|
||||
struct ConnectionManagerArc {
|
||||
@ -67,6 +66,7 @@ struct ConnectionManagerArc {
|
||||
address_lock_table: AsyncTagLockTable<SocketAddr>,
|
||||
startup_lock: StartupLock,
|
||||
inner: Mutex<Option<ConnectionManagerInner>>,
|
||||
reconnection_processor: DeferredStreamProcessor,
|
||||
}
|
||||
impl core::fmt::Debug for ConnectionManagerArc {
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
|
||||
@ -89,7 +89,6 @@ impl ConnectionManager {
|
||||
stop_source: StopSource,
|
||||
sender: flume::Sender<ConnectionManagerEvent>,
|
||||
async_processor_jh: MustJoinHandle<()>,
|
||||
reconnection_processor: DeferredStreamProcessor,
|
||||
) -> ConnectionManagerInner {
|
||||
ConnectionManagerInner {
|
||||
next_id: 0.into(),
|
||||
@ -97,7 +96,6 @@ impl ConnectionManager {
|
||||
sender,
|
||||
async_processor_jh: Some(async_processor_jh),
|
||||
protected_addresses: HashMap::new(),
|
||||
reconnection_processor,
|
||||
}
|
||||
}
|
||||
fn new_arc(registry: VeilidComponentRegistry) -> ConnectionManagerArc {
|
||||
@ -111,6 +109,7 @@ impl ConnectionManager {
|
||||
};
|
||||
|
||||
ConnectionManagerArc {
|
||||
reconnection_processor: DeferredStreamProcessor::new(),
|
||||
connection_initial_timeout_ms,
|
||||
connection_inactivity_timeout_ms,
|
||||
connection_table: ConnectionTable::new(registry),
|
||||
@ -147,21 +146,17 @@ impl ConnectionManager {
|
||||
self.clone().async_processor(stop_source.token(), receiver),
|
||||
);
|
||||
|
||||
// Spawn the reconnection processor
|
||||
let mut reconnection_processor = DeferredStreamProcessor::new();
|
||||
reconnection_processor.init().await;
|
||||
|
||||
// Store in the inner object
|
||||
let mut inner = self.arc.inner.lock();
|
||||
if inner.is_some() {
|
||||
panic!("shouldn't start connection manager twice without shutting it down first");
|
||||
{
|
||||
let mut inner = self.arc.inner.lock();
|
||||
if inner.is_some() {
|
||||
panic!("shouldn't start connection manager twice without shutting it down first");
|
||||
}
|
||||
*inner = Some(Self::new_inner(stop_source, sender, async_processor));
|
||||
}
|
||||
*inner = Some(Self::new_inner(
|
||||
stop_source,
|
||||
sender,
|
||||
async_processor,
|
||||
reconnection_processor,
|
||||
));
|
||||
|
||||
// Spawn the reconnection processor
|
||||
self.arc.reconnection_processor.init().await;
|
||||
|
||||
guard.success();
|
||||
|
||||
@ -175,6 +170,10 @@ impl ConnectionManager {
|
||||
return;
|
||||
};
|
||||
|
||||
// Stop the reconnection processor
|
||||
log_net!(debug "stopping reconnection processor task");
|
||||
self.arc.reconnection_processor.terminate().await;
|
||||
|
||||
// Remove the inner from the lock
|
||||
let mut inner = {
|
||||
let mut inner_lock = self.arc.inner.lock();
|
||||
@ -185,9 +184,6 @@ impl ConnectionManager {
|
||||
}
|
||||
}
|
||||
};
|
||||
// Stop the reconnection processor
|
||||
log_net!(debug "stopping reconnection processor task");
|
||||
inner.reconnection_processor.terminate().await;
|
||||
// Stop all the connections and the async processor
|
||||
log_net!(debug "stopping async processor task");
|
||||
drop(inner.stop_source.take());
|
||||
@ -658,7 +654,7 @@ impl ConnectionManager {
|
||||
// Reconnect the protected connection immediately
|
||||
if reconnect {
|
||||
if let Some(dial_info) = conn.dial_info() {
|
||||
self.spawn_reconnector_inner(inner, dial_info);
|
||||
self.spawn_reconnector(dial_info);
|
||||
} else {
|
||||
log_net!(debug "Can't reconnect to accepted protected connection: {} -> {} for node {}", conn.connection_id(), conn.debug_print(Timestamp::now()), protect_nr);
|
||||
}
|
||||
@ -673,9 +669,9 @@ impl ConnectionManager {
|
||||
}
|
||||
}
|
||||
|
||||
fn spawn_reconnector_inner(&self, inner: &mut ConnectionManagerInner, dial_info: DialInfo) {
|
||||
fn spawn_reconnector(&self, dial_info: DialInfo) {
|
||||
let this = self.clone();
|
||||
inner.reconnection_processor.add(
|
||||
self.arc.reconnection_processor.add(
|
||||
Box::pin(futures_util::stream::once(async { dial_info })),
|
||||
move |dial_info| {
|
||||
let this = this.clone();
|
||||
|
@ -59,6 +59,14 @@ pub(super) struct DiscoveryContext {
|
||||
|
||||
impl_veilid_component_registry_accessor!(DiscoveryContext);
|
||||
|
||||
impl core::ops::Deref for DiscoveryContext {
|
||||
type Target = DiscoveryContextUnlockedInner;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.unlocked_inner
|
||||
}
|
||||
}
|
||||
|
||||
impl DiscoveryContext {
|
||||
pub fn new(registry: VeilidComponentRegistry, config: DiscoveryContextConfig) -> Self {
|
||||
let routing_table = registry.routing_table();
|
||||
@ -137,9 +145,9 @@ impl DiscoveryContext {
|
||||
.with(|c| c.network.dht.max_find_node_count as usize);
|
||||
let routing_domain = RoutingDomain::PublicInternet;
|
||||
|
||||
let protocol_type = self.unlocked_inner.config.protocol_type;
|
||||
let address_type = self.unlocked_inner.config.address_type;
|
||||
let port = self.unlocked_inner.config.port;
|
||||
let protocol_type = self.config.protocol_type;
|
||||
let address_type = self.config.address_type;
|
||||
let port = self.config.port;
|
||||
|
||||
// Build an filter that matches our protocol and address type
|
||||
// and excludes relayed nodes so we can get an accurate external address
|
||||
@ -308,9 +316,9 @@ impl DiscoveryContext {
|
||||
|
||||
#[instrument(level = "trace", skip(self), ret)]
|
||||
async fn try_upnp_port_mapping(&self) -> Option<DialInfo> {
|
||||
let protocol_type = self.unlocked_inner.config.protocol_type;
|
||||
let address_type = self.unlocked_inner.config.address_type;
|
||||
let local_port = self.unlocked_inner.config.port;
|
||||
let protocol_type = self.config.protocol_type;
|
||||
let address_type = self.config.address_type;
|
||||
let local_port = self.config.port;
|
||||
|
||||
let igd_protocol_type = match protocol_type.low_level_protocol_type() {
|
||||
LowLevelProtocolType::UDP => IGDProtocolType::UDP,
|
||||
@ -323,12 +331,7 @@ impl DiscoveryContext {
|
||||
|
||||
let external_1 = self.inner.lock().external_info.first().unwrap().clone();
|
||||
|
||||
let igd_manager = self
|
||||
.network_manager()
|
||||
.net()
|
||||
.unlocked_inner
|
||||
.igd_manager
|
||||
.clone();
|
||||
let igd_manager = self.network_manager().net().igd_manager.clone();
|
||||
let mut tries = 0;
|
||||
loop {
|
||||
tries += 1;
|
||||
@ -414,7 +417,7 @@ impl DiscoveryContext {
|
||||
{
|
||||
// Add public dial info with Direct dialinfo class
|
||||
Some(DetectionResult {
|
||||
config: this.unlocked_inner.config,
|
||||
config: this.config,
|
||||
ddi: DetectedDialInfo::Detected(DialInfoDetail {
|
||||
dial_info: external_1.dial_info.clone(),
|
||||
class: DialInfoClass::Direct,
|
||||
@ -424,7 +427,7 @@ impl DiscoveryContext {
|
||||
} else {
|
||||
// Add public dial info with Blocked dialinfo class
|
||||
Some(DetectionResult {
|
||||
config: this.unlocked_inner.config,
|
||||
config: this.config,
|
||||
ddi: DetectedDialInfo::Detected(DialInfoDetail {
|
||||
dial_info: external_1.dial_info.clone(),
|
||||
class: DialInfoClass::Blocked,
|
||||
@ -446,7 +449,7 @@ impl DiscoveryContext {
|
||||
let inner = self.inner.lock();
|
||||
inner.external_info.clone()
|
||||
};
|
||||
let local_port = self.unlocked_inner.config.port;
|
||||
let local_port = self.config.port;
|
||||
|
||||
// Get the external dial info histogram for our use here
|
||||
let mut external_info_addr_port_hist = HashMap::<SocketAddress, usize>::new();
|
||||
@ -502,7 +505,7 @@ impl DiscoveryContext {
|
||||
let do_symmetric_nat_fut: SendPinBoxFuture<Option<DetectionResult>> =
|
||||
Box::pin(async move {
|
||||
Some(DetectionResult {
|
||||
config: this.unlocked_inner.config,
|
||||
config: this.config,
|
||||
ddi: DetectedDialInfo::SymmetricNAT,
|
||||
external_address_types,
|
||||
})
|
||||
@ -536,7 +539,7 @@ impl DiscoveryContext {
|
||||
{
|
||||
// Add public dial info with Direct dialinfo class
|
||||
return Some(DetectionResult {
|
||||
config: c_this.unlocked_inner.config,
|
||||
config: c_this.config,
|
||||
ddi: DetectedDialInfo::Detected(DialInfoDetail {
|
||||
dial_info: external_1_dial_info_with_local_port,
|
||||
class: DialInfoClass::Direct,
|
||||
@ -583,7 +586,7 @@ impl DiscoveryContext {
|
||||
// Add public dial info with full cone NAT network class
|
||||
|
||||
return Some(DetectionResult {
|
||||
config: c_this.unlocked_inner.config,
|
||||
config: c_this.config,
|
||||
ddi: DetectedDialInfo::Detected(DialInfoDetail {
|
||||
dial_info: c_external_1.dial_info,
|
||||
class: DialInfoClass::FullConeNAT,
|
||||
@ -618,7 +621,7 @@ impl DiscoveryContext {
|
||||
{
|
||||
// Got a reply from a non-default port, which means we're only address restricted
|
||||
return Some(DetectionResult {
|
||||
config: c_this.unlocked_inner.config,
|
||||
config: c_this.config,
|
||||
ddi: DetectedDialInfo::Detected(DialInfoDetail {
|
||||
dial_info: c_external_1.dial_info.clone(),
|
||||
class: DialInfoClass::AddressRestrictedNAT,
|
||||
@ -630,7 +633,7 @@ impl DiscoveryContext {
|
||||
}
|
||||
// Didn't get a reply from a non-default port, which means we are also port restricted
|
||||
Some(DetectionResult {
|
||||
config: c_this.unlocked_inner.config,
|
||||
config: c_this.config,
|
||||
ddi: DetectedDialInfo::Detected(DialInfoDetail {
|
||||
dial_info: c_external_1.dial_info.clone(),
|
||||
class: DialInfoClass::PortRestrictedNAT,
|
||||
@ -696,7 +699,7 @@ impl DiscoveryContext {
|
||||
if let Some(external_mapped_dial_info) = this.try_upnp_port_mapping().await {
|
||||
// Got a port mapping, let's use it
|
||||
return Some(DetectionResult {
|
||||
config: this.unlocked_inner.config,
|
||||
config: this.config,
|
||||
ddi: DetectedDialInfo::Detected(DialInfoDetail {
|
||||
dial_info: external_mapped_dial_info.clone(),
|
||||
class: DialInfoClass::Mapped,
|
||||
@ -720,12 +723,7 @@ impl DiscoveryContext {
|
||||
.lock()
|
||||
.external_info
|
||||
.iter()
|
||||
.find_map(|ei| {
|
||||
self.unlocked_inner
|
||||
.intf_addrs
|
||||
.contains(&ei.address)
|
||||
.then_some(true)
|
||||
})
|
||||
.find_map(|ei| self.intf_addrs.contains(&ei.address).then_some(true))
|
||||
.unwrap_or_default();
|
||||
|
||||
if local_address_in_external_info {
|
||||
|
@ -119,6 +119,7 @@ struct NetworkUnlockedInner {
|
||||
|
||||
// Network
|
||||
interfaces: NetworkInterfaces,
|
||||
|
||||
// Background processes
|
||||
update_network_class_task: TickTask<EyreReport>,
|
||||
network_interfaces_task: TickTask<EyreReport>,
|
||||
@ -138,6 +139,14 @@ pub(super) struct Network {
|
||||
|
||||
impl_veilid_component_registry_accessor!(Network);
|
||||
|
||||
impl core::ops::Deref for Network {
|
||||
type Target = NetworkUnlockedInner;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.unlocked_inner
|
||||
}
|
||||
}
|
||||
|
||||
impl Network {
|
||||
fn new_inner() -> NetworkInner {
|
||||
NetworkInner {
|
||||
@ -322,7 +331,7 @@ impl Network {
|
||||
dial_info: DialInfo,
|
||||
data: Vec<u8>,
|
||||
) -> EyreResult<NetworkResult<()>> {
|
||||
let _guard = self.unlocked_inner.startup_lock.enter()?;
|
||||
let _guard = self.startup_lock.enter()?;
|
||||
|
||||
self.record_dial_info_failure(
|
||||
dial_info.clone(),
|
||||
@ -400,7 +409,7 @@ impl Network {
|
||||
data: Vec<u8>,
|
||||
timeout_ms: u32,
|
||||
) -> EyreResult<NetworkResult<Vec<u8>>> {
|
||||
let _guard = self.unlocked_inner.startup_lock.enter()?;
|
||||
let _guard = self.startup_lock.enter()?;
|
||||
|
||||
self.record_dial_info_failure(
|
||||
dial_info.clone(),
|
||||
@ -517,7 +526,7 @@ impl Network {
|
||||
flow: Flow,
|
||||
data: Vec<u8>,
|
||||
) -> EyreResult<SendDataToExistingFlowResult> {
|
||||
let _guard = self.unlocked_inner.startup_lock.enter()?;
|
||||
let _guard = self.startup_lock.enter()?;
|
||||
|
||||
let data_len = data.len();
|
||||
|
||||
@ -588,7 +597,7 @@ impl Network {
|
||||
dial_info: DialInfo,
|
||||
data: Vec<u8>,
|
||||
) -> EyreResult<NetworkResult<UniqueFlow>> {
|
||||
let _guard = self.unlocked_inner.startup_lock.enter()?;
|
||||
let _guard = self.startup_lock.enter()?;
|
||||
|
||||
self.record_dial_info_failure(
|
||||
dial_info.clone(),
|
||||
@ -776,7 +785,7 @@ impl Network {
|
||||
|
||||
#[instrument(level = "debug", err, skip_all)]
|
||||
pub async fn startup(&self) -> EyreResult<StartupDisposition> {
|
||||
let guard = self.unlocked_inner.startup_lock.startup()?;
|
||||
let guard = self.startup_lock.startup()?;
|
||||
|
||||
match self.startup_internal().await {
|
||||
Ok(StartupDisposition::Success) => {
|
||||
@ -802,7 +811,7 @@ impl Network {
|
||||
}
|
||||
|
||||
pub fn is_started(&self) -> bool {
|
||||
self.unlocked_inner.startup_lock.is_started()
|
||||
self.startup_lock.is_started()
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
@ -816,7 +825,7 @@ impl Network {
|
||||
|
||||
// Stop all tasks
|
||||
log_net!(debug "stopping update network class task");
|
||||
if let Err(e) = self.unlocked_inner.update_network_class_task.stop().await {
|
||||
if let Err(e) = self.update_network_class_task.stop().await {
|
||||
error!("update_network_class_task not cancelled: {}", e);
|
||||
}
|
||||
|
||||
@ -854,7 +863,7 @@ impl Network {
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub async fn shutdown(&self) {
|
||||
log_net!(debug "starting low level network shutdown");
|
||||
let Ok(guard) = self.unlocked_inner.startup_lock.shutdown().await else {
|
||||
let Ok(guard) = self.startup_lock.shutdown().await else {
|
||||
log_net!(debug "low level network is already shut down");
|
||||
return;
|
||||
};
|
||||
@ -870,7 +879,7 @@ impl Network {
|
||||
&self,
|
||||
punishment: Option<Box<dyn FnOnce() + Send + 'static>>,
|
||||
) {
|
||||
let Ok(_guard) = self.unlocked_inner.startup_lock.enter() else {
|
||||
let Ok(_guard) = self.startup_lock.enter() else {
|
||||
log_net!(debug "ignoring due to not started up");
|
||||
return;
|
||||
};
|
||||
@ -880,7 +889,7 @@ impl Network {
|
||||
}
|
||||
|
||||
pub fn needs_public_dial_info_check(&self) -> bool {
|
||||
let Ok(_guard) = self.unlocked_inner.startup_lock.enter() else {
|
||||
let Ok(_guard) = self.startup_lock.enter() else {
|
||||
log_net!(debug "ignoring due to not started up");
|
||||
return false;
|
||||
};
|
||||
|
@ -28,7 +28,7 @@ pub(super) struct NetworkState {
|
||||
|
||||
impl Network {
|
||||
fn make_stable_interface_addresses(&self) -> Vec<IpAddr> {
|
||||
let addrs = self.unlocked_inner.interfaces.stable_addresses();
|
||||
let addrs = self.interfaces.stable_addresses();
|
||||
let mut addrs: Vec<IpAddr> = addrs
|
||||
.into_iter()
|
||||
.filter(|addr| {
|
||||
@ -57,8 +57,7 @@ impl Network {
|
||||
|
||||
pub(super) async fn make_network_state(&self) -> EyreResult<NetworkState> {
|
||||
// refresh network interfaces
|
||||
self.unlocked_inner
|
||||
.interfaces
|
||||
self .interfaces
|
||||
.refresh()
|
||||
.await
|
||||
.wrap_err("failed to refresh network interfaces")?;
|
||||
@ -66,7 +65,7 @@ impl Network {
|
||||
// build the set of networks we should consider for the 'LocalNetwork' routing domain
|
||||
let mut local_networks: HashSet<(IpAddr, IpAddr)> = HashSet::new();
|
||||
|
||||
self.unlocked_inner
|
||||
self
|
||||
.interfaces
|
||||
.with_interfaces(|interfaces| {
|
||||
for intf in interfaces.values() {
|
||||
|
@ -7,46 +7,41 @@ use super::*;
|
||||
impl Network {
|
||||
pub fn setup_tasks(&self) {
|
||||
// Set update network class tick task
|
||||
{
|
||||
let this = self.clone();
|
||||
self.unlocked_inner
|
||||
.update_network_class_task
|
||||
.set_routine(move |s, l, t| {
|
||||
Box::pin(this.clone().update_network_class_task_routine(
|
||||
s,
|
||||
Timestamp::new(l),
|
||||
Timestamp::new(t),
|
||||
))
|
||||
});
|
||||
}
|
||||
let this = self.clone();
|
||||
self.update_network_class_task.set_routine(move |s, l, t| {
|
||||
let this = this.clone();
|
||||
Box::pin(async move {
|
||||
this.update_network_class_task_routine(s, Timestamp::new(l), Timestamp::new(t))
|
||||
.await
|
||||
})
|
||||
});
|
||||
|
||||
// Set network interfaces tick task
|
||||
{
|
||||
let this = self.clone();
|
||||
self.unlocked_inner
|
||||
.network_interfaces_task
|
||||
.set_routine(move |s, l, t| {
|
||||
Box::pin(this.clone().network_interfaces_task_routine(
|
||||
s,
|
||||
Timestamp::new(l),
|
||||
Timestamp::new(t),
|
||||
))
|
||||
});
|
||||
}
|
||||
let this = self.clone();
|
||||
self.network_interfaces_task.set_routine(move |s, l, t| {
|
||||
let this = this.clone();
|
||||
Box::pin(async move {
|
||||
this.network_interfaces_task_routine(s, Timestamp::new(l), Timestamp::new(t))
|
||||
.await
|
||||
})
|
||||
});
|
||||
|
||||
// Set upnp tick task
|
||||
{
|
||||
let this = self.clone();
|
||||
self.unlocked_inner.upnp_task.set_routine(move |s, l, t| {
|
||||
Box::pin(
|
||||
this.clone()
|
||||
.upnp_task_routine(s, Timestamp::new(l), Timestamp::new(t)),
|
||||
)
|
||||
self.upnp_task.set_routine(move |s, l, t| {
|
||||
let this = this.clone();
|
||||
Box::pin(async move {
|
||||
this.upnp_task_routine(s, Timestamp::new(l), Timestamp::new(t))
|
||||
.await
|
||||
})
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", target = "net", name = "Network::tick", skip_all, err)]
|
||||
pub async fn tick(&self) -> EyreResult<()> {
|
||||
let Ok(_guard) = self.unlocked_inner.startup_lock.enter() else {
|
||||
let Ok(_guard) = self.startup_lock.enter() else {
|
||||
log_net!(debug "ignoring due to not started up");
|
||||
return Ok(());
|
||||
};
|
||||
@ -65,7 +60,7 @@ impl Network {
|
||||
// If we need to figure out our network class, tick the task for it
|
||||
if detect_address_changes {
|
||||
// Check our network interfaces to see if they have changed
|
||||
self.unlocked_inner.network_interfaces_task.tick().await?;
|
||||
self.network_interfaces_task.tick().await?;
|
||||
|
||||
// Check our public dial info to see if it has changed
|
||||
let public_internet_network_class = self
|
||||
@ -95,14 +90,14 @@ impl Network {
|
||||
}
|
||||
|
||||
if has_at_least_two {
|
||||
self.unlocked_inner.update_network_class_task.tick().await?;
|
||||
self.update_network_class_task.tick().await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we need to tick upnp, do it
|
||||
if upnp {
|
||||
self.unlocked_inner.upnp_task.tick().await?;
|
||||
self.upnp_task.tick().await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
@ -8,7 +8,7 @@ impl Network {
|
||||
_l: Timestamp,
|
||||
_t: Timestamp,
|
||||
) -> EyreResult<()> {
|
||||
let _guard = self.unlocked_inner.network_task_lock.lock().await;
|
||||
let _guard = self.network_task_lock.lock().await;
|
||||
|
||||
self.update_network_state().await?;
|
||||
|
||||
|
@ -13,7 +13,7 @@ impl Network {
|
||||
l: Timestamp,
|
||||
t: Timestamp,
|
||||
) -> EyreResult<()> {
|
||||
let _guard = self.unlocked_inner.network_task_lock.lock().await;
|
||||
let _guard = self.network_task_lock.lock().await;
|
||||
|
||||
// Do the public dial info check
|
||||
let finished = self.do_public_dial_info_check(stop_token, l, t).await?;
|
||||
|
@ -3,12 +3,12 @@ use super::*;
|
||||
impl Network {
|
||||
#[instrument(parent = None, level = "trace", target = "net", skip_all, err)]
|
||||
pub(super) async fn upnp_task_routine(
|
||||
self,
|
||||
&self,
|
||||
_stop_token: StopToken,
|
||||
_l: Timestamp,
|
||||
_t: Timestamp,
|
||||
) -> EyreResult<()> {
|
||||
if !self.unlocked_inner.igd_manager.tick().await? {
|
||||
if !self.igd_manager.tick().await? {
|
||||
info!("upnp failed, restarting local network");
|
||||
let mut inner = self.inner.lock();
|
||||
inner.network_needs_restart = true;
|
||||
|
@ -67,20 +67,25 @@ struct NetworkInner {
|
||||
struct NetworkUnlockedInner {
|
||||
// Startup lock
|
||||
startup_lock: StartupLock,
|
||||
|
||||
// Accessors
|
||||
routing_table: RoutingTable,
|
||||
network_manager: NetworkManager,
|
||||
connection_manager: ConnectionManager,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(super) struct Network {
|
||||
config: VeilidConfig,
|
||||
registry: VeilidComponentRegistry,
|
||||
inner: Arc<Mutex<NetworkInner>>,
|
||||
unlocked_inner: Arc<NetworkUnlockedInner>,
|
||||
}
|
||||
|
||||
impl_veilid_component_registry_accessor!(Network);
|
||||
|
||||
impl core::ops::Deref for Network {
|
||||
type Target = NetworkUnlockedInner;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.unlocked_inner
|
||||
}
|
||||
}
|
||||
|
||||
impl Network {
|
||||
fn new_inner() -> NetworkInner {
|
||||
NetworkInner {
|
||||
@ -89,45 +94,20 @@ impl Network {
|
||||
}
|
||||
}
|
||||
|
||||
fn new_unlocked_inner(
|
||||
network_manager: NetworkManager,
|
||||
routing_table: RoutingTable,
|
||||
connection_manager: ConnectionManager,
|
||||
) -> NetworkUnlockedInner {
|
||||
fn new_unlocked_inner() -> NetworkUnlockedInner {
|
||||
NetworkUnlockedInner {
|
||||
startup_lock: StartupLock::new(),
|
||||
network_manager,
|
||||
routing_table,
|
||||
connection_manager,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new(
|
||||
network_manager: NetworkManager,
|
||||
routing_table: RoutingTable,
|
||||
connection_manager: ConnectionManager,
|
||||
) -> Self {
|
||||
pub fn new(registry: VeilidComponentRegistry) -> Self {
|
||||
Self {
|
||||
config: network_manager.config(),
|
||||
inner: Arc::new(Mutex::new(Self::new_inner())),
|
||||
unlocked_inner: Arc::new(Self::new_unlocked_inner(
|
||||
network_manager,
|
||||
routing_table,
|
||||
connection_manager,
|
||||
)),
|
||||
unlocked_inner: Arc::new(Self::new_unlocked_inner(registry.clone())),
|
||||
registry,
|
||||
}
|
||||
}
|
||||
|
||||
fn network_manager(&self) -> NetworkManager {
|
||||
self.unlocked_inner.network_manager.clone()
|
||||
}
|
||||
fn routing_table(&self) -> RoutingTable {
|
||||
self.unlocked_inner.routing_table.clone()
|
||||
}
|
||||
fn connection_manager(&self) -> ConnectionManager {
|
||||
self.unlocked_inner.connection_manager.clone()
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////
|
||||
|
||||
// Record DialInfo failures
|
||||
@ -210,7 +190,7 @@ impl Network {
|
||||
data: Vec<u8>,
|
||||
timeout_ms: u32,
|
||||
) -> EyreResult<NetworkResult<Vec<u8>>> {
|
||||
let _guard = self.unlocked_inner.startup_lock.enter()?;
|
||||
let _guard = self.startup_lock.enter()?;
|
||||
|
||||
self.record_dial_info_failure(dial_info.clone(), async move {
|
||||
let data_len = data.len();
|
||||
@ -271,7 +251,7 @@ impl Network {
|
||||
flow: Flow,
|
||||
data: Vec<u8>,
|
||||
) -> EyreResult<SendDataToExistingFlowResult> {
|
||||
let _guard = self.unlocked_inner.startup_lock.enter()?;
|
||||
let _guard = self.startup_lock.enter()?;
|
||||
|
||||
let data_len = data.len();
|
||||
match flow.protocol_type() {
|
||||
@ -320,7 +300,7 @@ impl Network {
|
||||
dial_info: DialInfo,
|
||||
data: Vec<u8>,
|
||||
) -> EyreResult<NetworkResult<UniqueFlow>> {
|
||||
let _guard = self.unlocked_inner.startup_lock.enter()?;
|
||||
let _guard = self.startup_lock.enter()?;
|
||||
|
||||
self.record_dial_info_failure(dial_info.clone(), async move {
|
||||
let data_len = data.len();
|
||||
@ -398,10 +378,7 @@ impl Network {
|
||||
self.inner.lock().protocol_config = protocol_config.clone();
|
||||
|
||||
// Start editing routing table
|
||||
let mut editor_public_internet = self
|
||||
.unlocked_inner
|
||||
.routing_table
|
||||
.edit_public_internet_routing_domain();
|
||||
let mut editor_public_internet = self.routing_table.edit_public_internet_routing_domain();
|
||||
|
||||
// set up the routing table's network config
|
||||
editor_public_internet.setup_network(
|
||||
@ -421,7 +398,7 @@ impl Network {
|
||||
|
||||
#[instrument(level = "debug", err, skip_all)]
|
||||
pub async fn startup(&self) -> EyreResult<StartupDisposition> {
|
||||
let guard = self.unlocked_inner.startup_lock.startup()?;
|
||||
let guard = self.startup_lock.startup()?;
|
||||
|
||||
match self.startup_internal().await {
|
||||
Ok(StartupDisposition::Success) => {
|
||||
@ -445,7 +422,7 @@ impl Network {
|
||||
}
|
||||
|
||||
pub fn is_started(&self) -> bool {
|
||||
self.unlocked_inner.startup_lock.is_started()
|
||||
self.startup_lock.is_started()
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
@ -456,7 +433,7 @@ impl Network {
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub async fn shutdown(&self) {
|
||||
log_net!(debug "starting low level network shutdown");
|
||||
let Ok(guard) = self.unlocked_inner.startup_lock.shutdown().await else {
|
||||
let Ok(guard) = self.startup_lock.shutdown().await else {
|
||||
log_net!(debug "low level network is already shut down");
|
||||
return;
|
||||
};
|
||||
@ -493,14 +470,14 @@ impl Network {
|
||||
&self,
|
||||
_punishment: Option<Box<dyn FnOnce() + Send + 'static>>,
|
||||
) {
|
||||
let Ok(_guard) = self.unlocked_inner.startup_lock.enter() else {
|
||||
let Ok(_guard) = self.startup_lock.enter() else {
|
||||
log_net!(debug "ignoring due to not started up");
|
||||
return;
|
||||
};
|
||||
}
|
||||
|
||||
pub fn needs_public_dial_info_check(&self) -> bool {
|
||||
let Ok(_guard) = self.unlocked_inner.startup_lock.enter() else {
|
||||
let Ok(_guard) = self.startup_lock.enter() else {
|
||||
log_net!(debug "ignoring due to not started up");
|
||||
return false;
|
||||
};
|
||||
@ -511,7 +488,7 @@ impl Network {
|
||||
//////////////////////////////////////////
|
||||
#[instrument(level = "trace", target = "net", name = "Network::tick", skip_all, err)]
|
||||
pub async fn tick(&self) -> EyreResult<()> {
|
||||
let Ok(_guard) = self.unlocked_inner.startup_lock.enter() else {
|
||||
let Ok(_guard) = self.startup_lock.enter() else {
|
||||
log_net!(debug "ignoring due to not started up");
|
||||
return Ok(());
|
||||
};
|
||||
|
@ -85,7 +85,7 @@ impl RoutingTable {
|
||||
let Some(vcrypto) = crypto.get(crypto_kind) else {
|
||||
return NetworkResult::invalid_message("unsupported cryptosystem");
|
||||
};
|
||||
let vcrypto = &*vcrypto;
|
||||
let vcrypto = &vcrypto;
|
||||
|
||||
let own_distance = vcrypto.distance(&own_node_id.value, &key.value);
|
||||
|
||||
@ -165,8 +165,8 @@ impl RoutingTable {
|
||||
|
||||
/// Determine if set of peers is closer to key_near than key_far is to key_near
|
||||
#[instrument(level = "trace", target = "rtab", skip_all, err)]
|
||||
pub fn verify_peers_closer<'a>(
|
||||
vcrypto: &'a (dyn CryptoSystem + Send + Sync),
|
||||
pub fn verify_peers_closer(
|
||||
vcrypto: &crypto::CryptoSystemGuard<'_>,
|
||||
key_far: TypedKey,
|
||||
key_near: TypedKey,
|
||||
peers: &[Arc<PeerInfo>],
|
||||
|
@ -40,7 +40,7 @@ impl RouteNode {
|
||||
|
||||
pub fn node_ref(
|
||||
&self,
|
||||
routing_table: RoutingTable,
|
||||
routing_table: &RoutingTable,
|
||||
crypto_kind: CryptoKind,
|
||||
) -> Option<NodeRef> {
|
||||
match self {
|
||||
|
@ -105,7 +105,6 @@ impl RoutingTable {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
let rpc = self.rpc_processor();
|
||||
// Get our publicinternet dial info
|
||||
let dids = self.all_filtered_dial_info_details(
|
||||
RoutingDomain::PublicInternet.into(),
|
||||
@ -183,8 +182,8 @@ impl RoutingTable {
|
||||
futurequeue.push_back(
|
||||
async move {
|
||||
log_rtab!("--> PublicInternet Relay ping to {:?}", relay_nr_filtered);
|
||||
let _ = self
|
||||
.rpc_processor()
|
||||
let rpc_processor = relay_nr_filtered.rpc_processor();
|
||||
let _ = rpc_processor
|
||||
.rpc_call_status(Destination::direct(relay_nr_filtered))
|
||||
.await?;
|
||||
Ok(())
|
||||
@ -225,13 +224,12 @@ impl RoutingTable {
|
||||
let watch_destinations = self.storage_manager().get_active_watch_nodes().await;
|
||||
|
||||
for watch_destination in watch_destinations {
|
||||
let registry = self.registry();
|
||||
futurequeue.push_back(
|
||||
async move {
|
||||
log_rtab!("--> Watch Keepalive ping to {:?}", watch_destination);
|
||||
let _ = self
|
||||
.rpc_processor()
|
||||
.rpc_call_status(watch_destination)
|
||||
.await?;
|
||||
let rpc_processor = registry.rpc_processor();
|
||||
let _ = rpc_processor.rpc_call_status(watch_destination).await?;
|
||||
Ok(())
|
||||
}
|
||||
.boxed(),
|
||||
@ -259,8 +257,8 @@ impl RoutingTable {
|
||||
async move {
|
||||
#[cfg(feature = "verbose-tracing")]
|
||||
log_rtab!(debug "--> PublicInternet Validator ping to {:?}", nr);
|
||||
let _ = self
|
||||
.rpc_processor()
|
||||
let rpc_processor = nr.rpc_processor();
|
||||
let _ = rpc_processor
|
||||
.rpc_call_status(Destination::direct(nr))
|
||||
.await?;
|
||||
Ok(())
|
||||
@ -280,8 +278,6 @@ impl RoutingTable {
|
||||
cur_ts: Timestamp,
|
||||
futurequeue: &mut VecDeque<PingValidatorFuture>,
|
||||
) -> EyreResult<()> {
|
||||
let rpc = self.rpc_processor();
|
||||
|
||||
// Get all nodes needing pings in the LocalNetwork routing domain
|
||||
let node_refs = self.get_nodes_needing_ping(RoutingDomain::LocalNetwork, cur_ts);
|
||||
|
||||
@ -292,10 +288,12 @@ impl RoutingTable {
|
||||
// Just do a single ping with the best protocol for all the nodes
|
||||
futurequeue.push_back(
|
||||
async move {
|
||||
let rpc = nr.rpc_processor();
|
||||
#[cfg(feature = "verbose-tracing")]
|
||||
log_rtab!(debug "--> LocalNetwork Validator ping to {:?}", nr);
|
||||
let _ = rpc.rpc_call_status(Destination::direct(nr)).await?;
|
||||
let rpc_processor = nr.rpc_processor();
|
||||
let _ = rpc_processor
|
||||
.rpc_call_status(Destination::direct(nr))
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
.boxed(),
|
||||
|
@ -7,7 +7,7 @@ const MAX_GET_VALUE_A_PEERS_LEN: usize = 20;
|
||||
pub(in crate::rpc_processor) struct ValidateGetValueContext {
|
||||
pub last_descriptor: Option<SignedValueDescriptor>,
|
||||
pub subkey: ValueSubkey,
|
||||
pub vcrypto: CryptoSystemVersion,
|
||||
pub crypto_kind: CryptoKind,
|
||||
}
|
||||
|
||||
impl fmt::Debug for ValidateGetValueContext {
|
||||
@ -15,7 +15,7 @@ impl fmt::Debug for ValidateGetValueContext {
|
||||
f.debug_struct("ValidateGetValueContext")
|
||||
.field("last_descriptor", &self.last_descriptor)
|
||||
.field("subkey", &self.subkey)
|
||||
.field("vcrypto", &self.vcrypto.kind().to_string())
|
||||
.field("crypto_kind", &self.crypto_kind)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
@ -106,8 +106,6 @@ impl RPCOperationGetValueA {
|
||||
}
|
||||
|
||||
pub fn validate(&mut self, validate_context: &RPCValidateContext) -> Result<(), RPCError> {
|
||||
let crypto = validate_context.crypto();
|
||||
|
||||
let question_context = validate_context
|
||||
.question_context
|
||||
.as_ref()
|
||||
@ -116,12 +114,15 @@ impl RPCOperationGetValueA {
|
||||
panic!("Wrong context type for GetValueA");
|
||||
};
|
||||
|
||||
let crypto = validate_context.crypto();
|
||||
let Some(vcrypto) = crypto.get(get_value_context.crypto_kind) else {
|
||||
return Err(RPCError::protocol("unsupported cryptosystem"));
|
||||
};
|
||||
|
||||
// Validate descriptor
|
||||
if let Some(descriptor) = &self.descriptor {
|
||||
// Ensure the descriptor itself validates
|
||||
descriptor
|
||||
.validate(get_value_context.vcrypto.clone())
|
||||
.map_err(RPCError::protocol)?;
|
||||
descriptor.validate(&vcrypto).map_err(RPCError::protocol)?;
|
||||
|
||||
// Ensure descriptor matches last one
|
||||
if let Some(last_descriptor) = &get_value_context.last_descriptor {
|
||||
@ -148,11 +149,7 @@ impl RPCOperationGetValueA {
|
||||
|
||||
// And the signed value data
|
||||
if !value
|
||||
.validate(
|
||||
descriptor.owner(),
|
||||
get_value_context.subkey,
|
||||
get_value_context.vcrypto.clone(),
|
||||
)
|
||||
.validate(descriptor.owner(), get_value_context.subkey, &vcrypto)
|
||||
.map_err(RPCError::protocol)?
|
||||
{
|
||||
return Err(RPCError::protocol("signed value data did not validate"));
|
||||
|
@ -9,14 +9,14 @@ const MAX_INSPECT_VALUE_A_PEERS_LEN: usize = 20;
|
||||
pub(in crate::rpc_processor) struct ValidateInspectValueContext {
|
||||
pub last_descriptor: Option<SignedValueDescriptor>,
|
||||
pub subkeys: ValueSubkeyRangeSet,
|
||||
pub vcrypto: CryptoSystemVersion,
|
||||
pub crypto_kind: CryptoKind,
|
||||
}
|
||||
|
||||
impl fmt::Debug for ValidateInspectValueContext {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("ValidateInspectValueContext")
|
||||
.field("last_descriptor", &self.last_descriptor)
|
||||
.field("vcrypto", &self.vcrypto.kind().to_string())
|
||||
.field("crypto_kind", &self.crypto_kind)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
@ -155,6 +155,11 @@ impl RPCOperationInspectValueA {
|
||||
panic!("Wrong context type for InspectValueA");
|
||||
};
|
||||
|
||||
let crypto = validate_context.crypto();
|
||||
let Some(vcrypto) = crypto.get(inspect_value_context.crypto_kind) else {
|
||||
return Err(RPCError::protocol("unsupported cryptosystem"));
|
||||
};
|
||||
|
||||
// Ensure seqs returned does not exceeed subkeys requested
|
||||
#[allow(clippy::unnecessary_cast)]
|
||||
if self.seqs.len() as u64 > inspect_value_context.subkeys.len() as u64 {
|
||||
@ -168,9 +173,7 @@ impl RPCOperationInspectValueA {
|
||||
// Validate descriptor
|
||||
if let Some(descriptor) = &self.descriptor {
|
||||
// Ensure the descriptor itself validates
|
||||
descriptor
|
||||
.validate(inspect_value_context.vcrypto.clone())
|
||||
.map_err(RPCError::protocol)?;
|
||||
descriptor.validate(&vcrypto).map_err(RPCError::protocol)?;
|
||||
|
||||
// Ensure descriptor matches last one
|
||||
if let Some(last_descriptor) = &inspect_value_context.last_descriptor {
|
||||
@ -182,7 +185,7 @@ impl RPCOperationInspectValueA {
|
||||
}
|
||||
}
|
||||
|
||||
PeerInfo::validate_vec(&mut self.peers, validate_context.crypto.clone());
|
||||
PeerInfo::validate_vec(&mut self.peers, &crypto);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -7,7 +7,7 @@ const MAX_SET_VALUE_A_PEERS_LEN: usize = 20;
|
||||
pub(in crate::rpc_processor) struct ValidateSetValueContext {
|
||||
pub descriptor: SignedValueDescriptor,
|
||||
pub subkey: ValueSubkey,
|
||||
pub vcrypto: CryptoSystemVersion,
|
||||
pub crypto_kind: CryptoKind,
|
||||
}
|
||||
|
||||
impl fmt::Debug for ValidateSetValueContext {
|
||||
@ -15,7 +15,7 @@ impl fmt::Debug for ValidateSetValueContext {
|
||||
f.debug_struct("ValidateSetValueContext")
|
||||
.field("descriptor", &self.descriptor)
|
||||
.field("subkey", &self.subkey)
|
||||
.field("vcrypto", &self.vcrypto.kind().to_string())
|
||||
.field("crypto_kind", &self.crypto_kind)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
@ -144,10 +144,15 @@ impl RPCOperationSetValueA {
|
||||
panic!("Wrong context type for SetValueA");
|
||||
};
|
||||
|
||||
let crypto = validate_context.crypto();
|
||||
let Some(vcrypto) = crypto.get(set_value_context.crypto_kind) else {
|
||||
return Err(RPCError::protocol("unsupported cryptosystem"));
|
||||
};
|
||||
|
||||
// Ensure the descriptor itself validates
|
||||
set_value_context
|
||||
.descriptor
|
||||
.validate(set_value_context.vcrypto.clone())
|
||||
.validate(&vcrypto)
|
||||
.map_err(RPCError::protocol)?;
|
||||
|
||||
if let Some(value) = &self.value {
|
||||
@ -156,7 +161,7 @@ impl RPCOperationSetValueA {
|
||||
.validate(
|
||||
set_value_context.descriptor.owner(),
|
||||
set_value_context.subkey,
|
||||
set_value_context.vcrypto.clone(),
|
||||
&vcrypto,
|
||||
)
|
||||
.map_err(RPCError::protocol)?
|
||||
{
|
||||
@ -164,7 +169,7 @@ impl RPCOperationSetValueA {
|
||||
}
|
||||
}
|
||||
|
||||
PeerInfo::validate_vec(&mut self.peers, validate_context.crypto.clone());
|
||||
PeerInfo::validate_vec(&mut self.peers, &crypto);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -22,7 +22,7 @@ impl RPCOperationWatchValueQ {
|
||||
count: u32,
|
||||
watch_id: Option<u64>,
|
||||
watcher: KeyPair,
|
||||
vcrypto: CryptoSystemVersion,
|
||||
vcrypto: &CryptoSystemGuard<'_>,
|
||||
) -> Result<Self, RPCError> {
|
||||
if subkeys.ranges_len() > MAX_WATCH_VALUE_Q_SUBKEY_RANGES_LEN {
|
||||
return Err(RPCError::protocol("WatchValueQ subkeys length too long"));
|
||||
@ -76,7 +76,8 @@ impl RPCOperationWatchValueQ {
|
||||
}
|
||||
|
||||
pub fn validate(&mut self, validate_context: &RPCValidateContext) -> Result<(), RPCError> {
|
||||
let Some(vcrypto) = validate_context.crypto.get(self.key.kind) else {
|
||||
let crypto = validate_context.crypto();
|
||||
let Some(vcrypto) = crypto.get(self.key.kind) else {
|
||||
return Err(RPCError::protocol("unsupported cryptosystem"));
|
||||
};
|
||||
|
||||
@ -270,7 +271,8 @@ impl RPCOperationWatchValueA {
|
||||
}
|
||||
|
||||
pub fn validate(&mut self, validate_context: &RPCValidateContext) -> Result<(), RPCError> {
|
||||
PeerInfo::validate_vec(&mut self.peers, validate_context.crypto.clone());
|
||||
let crypto = validate_context.crypto();
|
||||
PeerInfo::validate_vec(&mut self.peers, &crypto);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -123,7 +123,7 @@ impl Destination {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_target(&self, rss: RouteSpecStore) -> Result<Target, RPCError> {
|
||||
pub fn get_target(&self, routing_table: &RoutingTable) -> Result<Target, RPCError> {
|
||||
match self {
|
||||
Destination::Direct {
|
||||
node,
|
||||
@ -139,7 +139,8 @@ impl Destination {
|
||||
safety_selection: _,
|
||||
} => {
|
||||
// Add the remote private route if we're going to keep the id
|
||||
let route_id = rss
|
||||
let route_id = routing_table
|
||||
.route_spec_store()
|
||||
.add_remote_private_route(private_route.clone())
|
||||
.map_err(RPCError::protocol)?;
|
||||
|
||||
@ -150,7 +151,7 @@ impl Destination {
|
||||
|
||||
pub fn get_unsafe_routing_info(
|
||||
&self,
|
||||
routing_table: RoutingTable,
|
||||
routing_table: &RoutingTable,
|
||||
) -> Option<UnsafeRoutingInfo> {
|
||||
// If there's a safety route in use, the safety route will be responsible for the routing
|
||||
match self.get_safety_selection() {
|
||||
|
@ -155,40 +155,54 @@ impl RPCProcessor {
|
||||
}
|
||||
}
|
||||
|
||||
/////////////////////////////////////
|
||||
/// Initialization
|
||||
|
||||
async fn init_async(&self) -> EyreResult<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn post_init_async(&self) -> EyreResult<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn pre_terminate_async(&self) {
|
||||
// Ensure things have shut down
|
||||
assert!(
|
||||
self.startup_lock.is_shut_down(),
|
||||
"should have shut down by now"
|
||||
);
|
||||
}
|
||||
|
||||
async fn terminate_async(&self) {}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
pub async fn startup(&self) -> EyreResult<()> {
|
||||
log_rpc!(debug "startup rpc processor");
|
||||
let guard = self.unlocked_inner.startup_lock.startup()?;
|
||||
let guard = self.startup_lock.startup()?;
|
||||
{
|
||||
let mut inner = self.inner.lock();
|
||||
|
||||
let channel = flume::bounded(self.unlocked_inner.queue_size as usize);
|
||||
let channel = flume::bounded(self.queue_size as usize);
|
||||
inner.send_channel = Some(channel.0.clone());
|
||||
inner.stop_source = Some(StopSource::new());
|
||||
|
||||
// spin up N workers
|
||||
log_rpc!(
|
||||
"Spinning up {} RPC workers",
|
||||
self.unlocked_inner.concurrency
|
||||
);
|
||||
for task_n in 0..self.unlocked_inner.concurrency {
|
||||
let this = self.clone();
|
||||
log_rpc!("Spinning up {} RPC workers", self.concurrency);
|
||||
for task_n in 0..self.concurrency {
|
||||
let registry = self.registry();
|
||||
let receiver = channel.1.clone();
|
||||
let jh = spawn(
|
||||
&format!("rpc worker {}", task_n),
|
||||
Self::rpc_worker(this, inner.stop_source.as_ref().unwrap().token(), receiver),
|
||||
);
|
||||
let stop_token = inner.stop_source.as_ref().unwrap().token();
|
||||
let jh = spawn(&format!("rpc worker {}", task_n), async move {
|
||||
let this = registry.rpc_processor();
|
||||
this.rpc_worker(stop_token, receiver).await
|
||||
});
|
||||
inner.worker_join_handles.push(jh);
|
||||
}
|
||||
}
|
||||
|
||||
// Inform storage manager we are up
|
||||
self.storage_manager()
|
||||
.set_rpc_processor(Some(self.clone()))
|
||||
.await;
|
||||
|
||||
guard.success();
|
||||
Ok(())
|
||||
}
|
||||
@ -196,14 +210,11 @@ impl RPCProcessor {
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub async fn shutdown(&self) {
|
||||
log_rpc!(debug "starting rpc processor shutdown");
|
||||
let Ok(guard) = self.unlocked_inner.startup_lock.shutdown().await else {
|
||||
let Ok(guard) = self.startup_lock.shutdown().await else {
|
||||
log_rpc!(debug "rpc processor already shut down");
|
||||
return;
|
||||
};
|
||||
|
||||
// Stop storage manager from using us
|
||||
self.storage_manager().set_rpc_processor(None).await;
|
||||
|
||||
// Stop the rpc workers
|
||||
let mut unord = FuturesUnordered::new();
|
||||
{
|
||||
@ -233,9 +244,7 @@ impl RPCProcessor {
|
||||
|
||||
/// Get waiting app call id for debugging purposes
|
||||
pub fn get_app_call_ids(&self) -> Vec<OperationId> {
|
||||
self.unlocked_inner
|
||||
.waiting_app_call_table
|
||||
.get_operation_ids()
|
||||
self.waiting_app_call_table.get_operation_ids()
|
||||
}
|
||||
|
||||
/// Determine if a SignedNodeInfo can be placed into the specified routing domain
|
||||
@ -264,12 +273,13 @@ impl RPCProcessor {
|
||||
let Some(peer_info) = sender_peer_info.opt_peer_info.clone() else {
|
||||
return Ok(NetworkResult::value(None));
|
||||
};
|
||||
let address_filter = self.network_manager().address_filter();
|
||||
|
||||
// Ensure the sender peer info is for the actual sender specified in the envelope
|
||||
if !peer_info.node_ids().contains(&sender_node_id) {
|
||||
// Attempted to update peer info for the wrong node id
|
||||
address_filter.punish_node_id(sender_node_id, PunishmentReason::WrongSenderPeerInfo);
|
||||
self.network_manager()
|
||||
.address_filter()
|
||||
.punish_node_id(sender_node_id, PunishmentReason::WrongSenderPeerInfo);
|
||||
|
||||
return Ok(NetworkResult::invalid_message(
|
||||
"attempt to update peer info for non-sender node id",
|
||||
@ -282,7 +292,7 @@ impl RPCProcessor {
|
||||
// Don't punish for this because in the case of hairpin NAT
|
||||
// you can legally get LocalNetwork PeerInfo when you expect PublicInternet PeerInfo
|
||||
//
|
||||
// address_filter.punish_node_id(
|
||||
// self.network_manager().address_filter().punish_node_id(
|
||||
// sender_node_id,
|
||||
// PunishmentReason::FailedToVerifySenderPeerInfo,
|
||||
// );
|
||||
@ -294,7 +304,7 @@ impl RPCProcessor {
|
||||
{
|
||||
Ok(v) => v.unfiltered(),
|
||||
Err(e) => {
|
||||
address_filter.punish_node_id(
|
||||
self.network_manager().address_filter().punish_node_id(
|
||||
sender_node_id,
|
||||
PunishmentReason::FailedToRegisterSenderPeerInfo,
|
||||
);
|
||||
@ -329,17 +339,17 @@ impl RPCProcessor {
|
||||
|
||||
// Routine to call to generate fanout
|
||||
let call_routine = |next_node: NodeRef| {
|
||||
let this = self.clone();
|
||||
let registry = self.registry();
|
||||
async move {
|
||||
let this = registry.rpc_processor();
|
||||
let v = network_result_try!(
|
||||
this.clone()
|
||||
.rpc_call_find_node(
|
||||
Destination::direct(next_node.routing_domain_filtered(routing_domain))
|
||||
.with_safety(safety_selection),
|
||||
node_id,
|
||||
vec![],
|
||||
)
|
||||
.await?
|
||||
this.rpc_call_find_node(
|
||||
Destination::direct(next_node.routing_domain_filtered(routing_domain))
|
||||
.with_safety(safety_selection),
|
||||
node_id,
|
||||
vec![],
|
||||
)
|
||||
.await?
|
||||
);
|
||||
Ok(NetworkResult::value(FanoutCallOutput {
|
||||
peer_info_list: v.answer,
|
||||
@ -364,8 +374,9 @@ impl RPCProcessor {
|
||||
};
|
||||
|
||||
// Call the fanout
|
||||
let routing_table = self.routing_table();
|
||||
let fanout_call = FanoutCall::new(
|
||||
routing_table.clone(),
|
||||
&routing_table,
|
||||
node_id,
|
||||
count,
|
||||
fanout,
|
||||
@ -387,11 +398,12 @@ impl RPCProcessor {
|
||||
node_id: TypedKey,
|
||||
safety_selection: SafetySelection,
|
||||
) -> SendPinBoxFuture<Result<Option<NodeRef>, RPCError>> {
|
||||
let this = self.clone();
|
||||
let registry = self.registry();
|
||||
Box::pin(
|
||||
async move {
|
||||
let this = registry.rpc_processor();
|
||||
|
||||
let _guard = this
|
||||
.unlocked_inner
|
||||
.startup_lock
|
||||
.enter()
|
||||
.map_err(RPCError::map_try_again("not started up"))?;
|
||||
@ -415,7 +427,7 @@ impl RPCProcessor {
|
||||
}
|
||||
|
||||
// If nobody knows where this node is, ask the DHT for it
|
||||
let (node_count, _consensus_count, fanout, timeout) = this.with_config(|c| {
|
||||
let (node_count, _consensus_count, fanout, timeout) = this.config().with(|c| {
|
||||
(
|
||||
c.network.dht.max_find_node_count as usize,
|
||||
c.network.dht.resolve_node_count as usize,
|
||||
@ -458,7 +470,6 @@ impl RPCProcessor {
|
||||
) -> Result<TimeoutOr<(Message, TimestampDuration)>, RPCError> {
|
||||
let id = waitable_reply.handle.id();
|
||||
let out = self
|
||||
.unlocked_inner
|
||||
.waiting_rpc_table
|
||||
.wait_for_op(waitable_reply.handle, waitable_reply.timeout_us)
|
||||
.await;
|
||||
@ -541,6 +552,7 @@ impl RPCProcessor {
|
||||
message_data: Vec<u8>,
|
||||
) -> RPCNetworkResult<RenderedOperation> {
|
||||
let routing_table = self.routing_table();
|
||||
let crypto = self.crypto();
|
||||
let rss = routing_table.route_spec_store();
|
||||
|
||||
// Get useful private route properties
|
||||
@ -548,7 +560,7 @@ impl RPCProcessor {
|
||||
let pr_hop_count = remote_private_route.hop_count;
|
||||
let pr_pubkey = remote_private_route.public_key.value;
|
||||
let crypto_kind = remote_private_route.crypto_kind();
|
||||
let Some(vcrypto) = self.crypto().get(crypto_kind) else {
|
||||
let Some(vcrypto) = crypto.get(crypto_kind) else {
|
||||
return Err(RPCError::internal(
|
||||
"crypto not available for selected private route",
|
||||
));
|
||||
@ -750,6 +762,7 @@ impl RPCProcessor {
|
||||
/// And send our timestamp of the target's node info so they can determine if they should update us on their next rpc
|
||||
#[instrument(level = "trace", target = "rpc", skip_all)]
|
||||
fn get_sender_peer_info(&self, dest: &Destination) -> SenderPeerInfo {
|
||||
let routing_table = self.routing_table();
|
||||
// Don't do this if the sender is to remain private
|
||||
// Otherwise we would be attaching the original sender's identity to the final destination,
|
||||
// thus defeating the purpose of the safety route entirely :P
|
||||
@ -757,7 +770,7 @@ impl RPCProcessor {
|
||||
opt_node,
|
||||
opt_relay: _,
|
||||
opt_routing_domain,
|
||||
}) = dest.get_unsafe_routing_info(self.routing_table())
|
||||
}) = dest.get_unsafe_routing_info(&routing_table)
|
||||
else {
|
||||
return SenderPeerInfo::default();
|
||||
};
|
||||
@ -813,13 +826,15 @@ impl RPCProcessor {
|
||||
|
||||
// If safety route was in use, record failure to send there
|
||||
if let Some(sr_pubkey) = &safety_route {
|
||||
let rss = self.routing_table().route_spec_store();
|
||||
rss.with_route_stats_mut(send_ts, sr_pubkey, |s| s.record_send_failed());
|
||||
self.routing_table()
|
||||
.route_spec_store()
|
||||
.with_route_stats_mut(send_ts, sr_pubkey, |s| s.record_send_failed());
|
||||
} else {
|
||||
// If no safety route was in use, then it's the private route's fault if we have one
|
||||
if let Some(pr_pubkey) = &remote_private_route {
|
||||
let rss = self.routing_table().route_spec_store();
|
||||
rss.with_route_stats_mut(send_ts, pr_pubkey, |s| s.record_send_failed());
|
||||
self.routing_table()
|
||||
.route_spec_store()
|
||||
.with_route_stats_mut(send_ts, pr_pubkey, |s| s.record_send_failed());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -844,7 +859,8 @@ impl RPCProcessor {
|
||||
return;
|
||||
}
|
||||
// Get route spec store
|
||||
let rss = self.routing_table().route_spec_store();
|
||||
let routing_table = self.routing_table();
|
||||
let rss = routing_table.route_spec_store();
|
||||
|
||||
// If safety route was used, record question lost there
|
||||
if let Some(sr_pubkey) = &safety_route {
|
||||
@ -891,7 +907,8 @@ impl RPCProcessor {
|
||||
}
|
||||
|
||||
// Get route spec store
|
||||
let rss = self.routing_table().route_spec_store();
|
||||
let routing_table = self.routing_table();
|
||||
let rss = routing_table.route_spec_store();
|
||||
|
||||
// If safety route was used, record send there
|
||||
if let Some(sr_pubkey) = &safety_route {
|
||||
@ -928,7 +945,8 @@ impl RPCProcessor {
|
||||
return;
|
||||
}
|
||||
// Get route spec store
|
||||
let rss = self.routing_table().route_spec_store();
|
||||
let routing_table = self.routing_table();
|
||||
let rss = routing_table.route_spec_store();
|
||||
|
||||
// Get latency for all local routes
|
||||
let mut total_local_latency = TimestampDuration::new(0u64);
|
||||
@ -982,7 +1000,6 @@ impl RPCProcessor {
|
||||
// This is fine because if we sent with a local safety route,
|
||||
// then we must have received with a local private route too, per the design rules
|
||||
if let Some(sr_pubkey) = &safety_route {
|
||||
let rss = self.routing_table().route_spec_store();
|
||||
rss.with_route_stats_mut(send_ts, sr_pubkey, |s| {
|
||||
s.record_latency(total_latency / 2u64);
|
||||
});
|
||||
@ -1001,6 +1018,9 @@ impl RPCProcessor {
|
||||
let recv_ts = msg.header.timestamp;
|
||||
let bytes = msg.header.body_len;
|
||||
|
||||
let routing_table = self.routing_table();
|
||||
let rss = routing_table.route_spec_store();
|
||||
|
||||
// Process messages based on how they were received
|
||||
match &msg.header.detail {
|
||||
// Process direct messages
|
||||
@ -1011,8 +1031,6 @@ impl RPCProcessor {
|
||||
}
|
||||
// Process messages that arrived with no private route (private route stub)
|
||||
RPCMessageHeaderDetail::SafetyRouted(d) => {
|
||||
let rss = self.routing_table().route_spec_store();
|
||||
|
||||
// This may record nothing if the remote safety route is not also
|
||||
// a remote private route that been imported, but that's okay
|
||||
rss.with_route_stats_mut(recv_ts, &d.remote_safety_route, |s| {
|
||||
@ -1021,8 +1039,6 @@ impl RPCProcessor {
|
||||
}
|
||||
// Process messages that arrived to our private route
|
||||
RPCMessageHeaderDetail::PrivateRouted(d) => {
|
||||
let rss = self.routing_table().route_spec_store();
|
||||
|
||||
// This may record nothing if the remote safety route is not also
|
||||
// a remote private route that been imported, but that's okay
|
||||
// it could also be a node id if no remote safety route was used
|
||||
@ -1071,13 +1087,10 @@ impl RPCProcessor {
|
||||
|
||||
// Calculate answer timeout
|
||||
// Timeout is number of hops times the timeout per hop
|
||||
let timeout_us = self.unlocked_inner.timeout_us * (hop_count as u64);
|
||||
let timeout_us = self.timeout_us * (hop_count as u64);
|
||||
|
||||
// Set up op id eventual
|
||||
let handle = self
|
||||
.unlocked_inner
|
||||
.waiting_rpc_table
|
||||
.add_op_waiter(op_id, context);
|
||||
let handle = self.waiting_rpc_table.add_op_waiter(op_id, context);
|
||||
|
||||
// Send question
|
||||
let bytes: ByteCount = (message.len() as u64).into();
|
||||
@ -1315,16 +1328,14 @@ impl RPCProcessor {
|
||||
// If we received an answer for a question we did not ask, this will return an error
|
||||
let question_context = if let RPCOperationKind::Answer(_) = operation.kind() {
|
||||
let op_id = operation.op_id();
|
||||
self.unlocked_inner
|
||||
.waiting_rpc_table
|
||||
.get_op_context(op_id)?
|
||||
self.waiting_rpc_table.get_op_context(op_id)?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Validate the RPC operation
|
||||
let validate_context = RPCValidateContext {
|
||||
crypto: self.crypto(),
|
||||
registry: self.registry(),
|
||||
// rpc_processor: self.clone(),
|
||||
question_context,
|
||||
};
|
||||
@ -1336,8 +1347,6 @@ impl RPCProcessor {
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
#[instrument(level = "trace", target = "rpc", skip_all)]
|
||||
async fn process_rpc_message(&self, encoded_msg: MessageEncoded) -> RPCNetworkResult<()> {
|
||||
let address_filter = self.network_manager().address_filter();
|
||||
|
||||
// Decode operation appropriately based on header detail
|
||||
let msg = match &encoded_msg.header.detail {
|
||||
RPCMessageHeaderDetail::Direct(detail) => {
|
||||
@ -1355,7 +1364,7 @@ impl RPCProcessor {
|
||||
log_rpc!(debug "Invalid RPC Operation: {}", e);
|
||||
|
||||
// Punish nodes that send direct undecodable crap
|
||||
address_filter.punish_node_id(
|
||||
self.network_manager().address_filter().punish_node_id(
|
||||
sender_node_id,
|
||||
PunishmentReason::FailedToDecodeOperation,
|
||||
);
|
||||
@ -1422,7 +1431,7 @@ impl RPCProcessor {
|
||||
log_rpc!(debug "Dropping routed RPC: {}", e);
|
||||
|
||||
// XXX: Punish routes that send routed undecodable crap
|
||||
// address_filter.punish_route_id(xxx, PunishmentReason::FailedToDecodeRoutedMessage);
|
||||
// self.network_manager().address_filter().punish_route_id(xxx, PunishmentReason::FailedToDecodeRoutedMessage);
|
||||
return Ok(NetworkResult::invalid_message(e));
|
||||
}
|
||||
};
|
||||
@ -1497,11 +1506,7 @@ impl RPCProcessor {
|
||||
},
|
||||
RPCOperationKind::Answer(_) => {
|
||||
let op_id = msg.operation.op_id();
|
||||
if let Err(e) = self
|
||||
.unlocked_inner
|
||||
.waiting_rpc_table
|
||||
.complete_op_waiter(op_id, msg)
|
||||
{
|
||||
if let Err(e) = self.waiting_rpc_table.complete_op_waiter(op_id, msg) {
|
||||
match e {
|
||||
RPCError::Unimplemented(_) | RPCError::Internal(_) => {
|
||||
log_rpc!(error "Could not complete rpc operation: id = {}: {}", op_id, e);
|
||||
@ -1524,7 +1529,7 @@ impl RPCProcessor {
|
||||
}
|
||||
|
||||
async fn rpc_worker(
|
||||
self,
|
||||
&self,
|
||||
stop_token: StopToken,
|
||||
receiver: flume::Receiver<(Span, MessageEncoded)>,
|
||||
) {
|
||||
@ -1560,7 +1565,6 @@ impl RPCProcessor {
|
||||
body: Vec<u8>,
|
||||
) -> EyreResult<()> {
|
||||
let _guard = self
|
||||
.unlocked_inner
|
||||
.startup_lock
|
||||
.enter()
|
||||
.map_err(RPCError::map_try_again("not started up"))?;
|
||||
|
@ -5,12 +5,11 @@ impl RPCProcessor {
|
||||
// Can be sent via all methods including relays and routes
|
||||
#[instrument(level = "trace", target = "rpc", skip(self, message), fields(message.len = message.len(), ret.latency, ret.len), err)]
|
||||
pub async fn rpc_call_app_call(
|
||||
self,
|
||||
&self,
|
||||
dest: Destination,
|
||||
message: Vec<u8>,
|
||||
) -> RPCNetworkResult<Answer<Vec<u8>>> {
|
||||
let _guard = self
|
||||
.unlocked_inner
|
||||
.startup_lock
|
||||
.enter()
|
||||
.map_err(RPCError::map_try_again("not started up"))?;
|
||||
@ -117,22 +116,18 @@ impl RPCProcessor {
|
||||
.map(|nr| nr.node_ids().get(crypto_kind).unwrap());
|
||||
|
||||
// Register a waiter for this app call
|
||||
let handle = self
|
||||
.unlocked_inner
|
||||
.waiting_app_call_table
|
||||
.add_op_waiter(op_id, ());
|
||||
let handle = self.waiting_app_call_table.add_op_waiter(op_id, ());
|
||||
|
||||
// Pass the call up through the update callback
|
||||
let message_q = app_call_q.destructure();
|
||||
(self.unlocked_inner.update_callback)(VeilidUpdate::AppCall(Box::new(VeilidAppCall::new(
|
||||
(self.update_callback())(VeilidUpdate::AppCall(Box::new(VeilidAppCall::new(
|
||||
sender, route_id, message_q, op_id,
|
||||
))));
|
||||
|
||||
// Wait for an app call answer to come back from the app
|
||||
let res = self
|
||||
.unlocked_inner
|
||||
.waiting_app_call_table
|
||||
.wait_for_op(handle, self.unlocked_inner.timeout_us)
|
||||
.wait_for_op(handle, self.timeout_us)
|
||||
.await?;
|
||||
let (message_a, _latency) = match res {
|
||||
TimeoutOr::Timeout => {
|
||||
@ -158,12 +153,10 @@ impl RPCProcessor {
|
||||
#[instrument(level = "trace", target = "rpc", skip_all)]
|
||||
pub fn app_call_reply(&self, call_id: OperationId, message: Vec<u8>) -> Result<(), RPCError> {
|
||||
let _guard = self
|
||||
.unlocked_inner
|
||||
.startup_lock
|
||||
.enter()
|
||||
.map_err(RPCError::map_try_again("not started up"))?;
|
||||
self.unlocked_inner
|
||||
.waiting_app_call_table
|
||||
self.waiting_app_call_table
|
||||
.complete_op_waiter(call_id, message)
|
||||
.map_err(RPCError::ignore)
|
||||
}
|
||||
|
@ -5,12 +5,11 @@ impl RPCProcessor {
|
||||
// Can be sent via all methods including relays and routes
|
||||
#[instrument(level = "trace", target = "rpc", skip(self, message), fields(message.len = message.len()), err)]
|
||||
pub async fn rpc_call_app_message(
|
||||
self,
|
||||
&self,
|
||||
dest: Destination,
|
||||
message: Vec<u8>,
|
||||
) -> RPCNetworkResult<()> {
|
||||
let _guard = self
|
||||
.unlocked_inner
|
||||
.startup_lock
|
||||
.enter()
|
||||
.map_err(RPCError::map_try_again("not started up"))?;
|
||||
@ -81,9 +80,9 @@ impl RPCProcessor {
|
||||
|
||||
// Pass the message up through the update callback
|
||||
let message = app_message.destructure();
|
||||
(self.unlocked_inner.update_callback)(VeilidUpdate::AppMessage(Box::new(
|
||||
VeilidAppMessage::new(sender, route_id, message),
|
||||
)));
|
||||
(self.update_callback())(VeilidUpdate::AppMessage(Box::new(VeilidAppMessage::new(
|
||||
sender, route_id, message,
|
||||
))));
|
||||
|
||||
Ok(NetworkResult::value(()))
|
||||
}
|
||||
|
@ -9,13 +9,12 @@ impl RPCProcessor {
|
||||
/// the identity of the node and defeat the private route.
|
||||
#[instrument(level = "trace", target = "rpc", skip(self), err)]
|
||||
pub async fn rpc_call_find_node(
|
||||
self,
|
||||
&self,
|
||||
dest: Destination,
|
||||
node_id: TypedKey,
|
||||
capabilities: Vec<Capability>,
|
||||
) -> RPCNetworkResult<Answer<Vec<Arc<PeerInfo>>>> {
|
||||
let _guard = self
|
||||
.unlocked_inner
|
||||
.startup_lock
|
||||
.enter()
|
||||
.map_err(RPCError::map_try_again("not started up"))?;
|
||||
|
@ -24,14 +24,13 @@ impl RPCProcessor {
|
||||
ret.latency
|
||||
),err)]
|
||||
pub async fn rpc_call_get_value(
|
||||
self,
|
||||
&self,
|
||||
dest: Destination,
|
||||
key: TypedKey,
|
||||
subkey: ValueSubkey,
|
||||
last_descriptor: Option<SignedValueDescriptor>,
|
||||
) -> RPCNetworkResult<Answer<GetValueAnswer>> {
|
||||
let _guard = self
|
||||
.unlocked_inner
|
||||
.startup_lock
|
||||
.enter()
|
||||
.map_err(RPCError::map_try_again("not started up"))?;
|
||||
@ -45,7 +44,8 @@ impl RPCProcessor {
|
||||
};
|
||||
|
||||
// Get the target node id
|
||||
let Some(vcrypto) = self.crypto().get(key.kind) else {
|
||||
let crypto = self.crypto();
|
||||
let Some(vcrypto) = crypto.get(key.kind) else {
|
||||
return Err(RPCError::internal("unsupported cryptosystem"));
|
||||
};
|
||||
let Some(target_node_id) = target_node_ids.get(key.kind) else {
|
||||
@ -74,7 +74,7 @@ impl RPCProcessor {
|
||||
let question_context = QuestionContext::GetValue(ValidateGetValueContext {
|
||||
last_descriptor,
|
||||
subkey,
|
||||
vcrypto: vcrypto.clone(),
|
||||
crypto_kind: vcrypto.kind(),
|
||||
});
|
||||
|
||||
log_dht!(debug "{}", debug_string);
|
||||
@ -137,7 +137,7 @@ impl RPCProcessor {
|
||||
}
|
||||
|
||||
// Validate peers returned are, in fact, closer to the key than the node we sent this to
|
||||
let valid = match RoutingTable::verify_peers_closer(vcrypto, target_node_id, key, &peers) {
|
||||
let valid = match RoutingTable::verify_peers_closer(&vcrypto, target_node_id, key, &peers) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
return Ok(NetworkResult::invalid_message(format!(
|
||||
@ -231,7 +231,9 @@ impl RPCProcessor {
|
||||
}
|
||||
|
||||
// See if we would have accepted this as a set
|
||||
let set_value_count = self.with_config(|c| c.network.dht.set_value_count as usize);
|
||||
let set_value_count = self
|
||||
.config()
|
||||
.with(|c| c.network.dht.set_value_count as usize);
|
||||
let (get_result_value, get_result_descriptor) =
|
||||
if closer_to_key_peers.len() >= set_value_count {
|
||||
// Not close enough
|
||||
|
@ -26,14 +26,13 @@ impl RPCProcessor {
|
||||
),err)
|
||||
]
|
||||
pub async fn rpc_call_inspect_value(
|
||||
self,
|
||||
&self,
|
||||
dest: Destination,
|
||||
key: TypedKey,
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
last_descriptor: Option<SignedValueDescriptor>,
|
||||
) -> RPCNetworkResult<Answer<InspectValueAnswer>> {
|
||||
let _guard = self
|
||||
.unlocked_inner
|
||||
.startup_lock
|
||||
.enter()
|
||||
.map_err(RPCError::map_try_again("not started up"))?;
|
||||
@ -47,7 +46,8 @@ impl RPCProcessor {
|
||||
};
|
||||
|
||||
// Get the target node id
|
||||
let Some(vcrypto) = self.crypto().get(key.kind) else {
|
||||
let crypto = self.crypto();
|
||||
let Some(vcrypto) = crypto.get(key.kind) else {
|
||||
return Err(RPCError::internal("unsupported cryptosystem"));
|
||||
};
|
||||
let Some(target_node_id) = target_node_ids.get(key.kind) else {
|
||||
@ -77,7 +77,7 @@ impl RPCProcessor {
|
||||
let question_context = QuestionContext::InspectValue(ValidateInspectValueContext {
|
||||
last_descriptor,
|
||||
subkeys,
|
||||
vcrypto: vcrypto.clone(),
|
||||
crypto_kind: vcrypto.kind(),
|
||||
});
|
||||
|
||||
log_dht!(debug "{}", debug_string);
|
||||
@ -127,7 +127,7 @@ impl RPCProcessor {
|
||||
}
|
||||
|
||||
// Validate peers returned are, in fact, closer to the key than the node we sent this to
|
||||
let valid = match RoutingTable::verify_peers_closer(vcrypto, target_node_id, key, &peers) {
|
||||
let valid = match RoutingTable::verify_peers_closer(&vcrypto, target_node_id, key, &peers) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
return Ok(NetworkResult::invalid_message(format!(
|
||||
@ -212,7 +212,9 @@ impl RPCProcessor {
|
||||
}
|
||||
|
||||
// See if we would have accepted this as a set
|
||||
let set_value_count = self.with_config(|c| c.network.dht.set_value_count as usize);
|
||||
let set_value_count = self
|
||||
.config()
|
||||
.with(|c| c.network.dht.set_value_count as usize);
|
||||
|
||||
let (inspect_result_seqs, inspect_result_descriptor) =
|
||||
if closer_to_key_peers.len() >= set_value_count {
|
||||
|
@ -5,12 +5,11 @@ impl RPCProcessor {
|
||||
// Can be sent via all methods including relays and routes
|
||||
#[instrument(level = "trace", target = "rpc", skip(self, receipt), ret, err)]
|
||||
pub async fn rpc_call_return_receipt<D: AsRef<[u8]>>(
|
||||
self,
|
||||
&self,
|
||||
dest: Destination,
|
||||
receipt: D,
|
||||
) -> RPCNetworkResult<()> {
|
||||
let _guard = self
|
||||
.unlocked_inner
|
||||
.startup_lock
|
||||
.enter()
|
||||
.map_err(RPCError::map_try_again("not started up"))?;
|
||||
|
@ -9,7 +9,7 @@ impl RPCProcessor {
|
||||
safety_route: SafetyRoute,
|
||||
) -> RPCNetworkResult<()> {
|
||||
// Make sure hop count makes sense
|
||||
if safety_route.hop_count as usize > self.unlocked_inner.max_route_hop_count {
|
||||
if safety_route.hop_count as usize > self.max_route_hop_count {
|
||||
return Ok(NetworkResult::invalid_message(
|
||||
"Safety route hop count too high to process",
|
||||
));
|
||||
@ -26,9 +26,10 @@ impl RPCProcessor {
|
||||
}
|
||||
|
||||
// Get next hop node ref
|
||||
let routing_table = self.routing_table();
|
||||
let Some(next_hop_nr) = route_hop
|
||||
.node
|
||||
.node_ref(self.routing_table(), safety_route.public_key.kind)
|
||||
.node_ref(&routing_table, safety_route.public_key.kind)
|
||||
else {
|
||||
return Ok(NetworkResult::invalid_message(format!(
|
||||
"could not get route node hop ref: {}",
|
||||
@ -65,15 +66,16 @@ impl RPCProcessor {
|
||||
next_private_route: PrivateRoute,
|
||||
) -> RPCNetworkResult<()> {
|
||||
// Make sure hop count makes sense
|
||||
if next_private_route.hop_count as usize > self.unlocked_inner.max_route_hop_count {
|
||||
if next_private_route.hop_count as usize > self.max_route_hop_count {
|
||||
return Ok(NetworkResult::invalid_message(
|
||||
"Private route hop count too high to process",
|
||||
));
|
||||
}
|
||||
|
||||
// Get next hop node ref
|
||||
let routing_table = self.routing_table();
|
||||
let Some(next_hop_nr) =
|
||||
next_route_node.node_ref(self.routing_table(), safety_route_public_key.kind)
|
||||
next_route_node.node_ref(&routing_table, safety_route_public_key.kind)
|
||||
else {
|
||||
return Ok(NetworkResult::invalid_message(format!(
|
||||
"could not get route node hop ref: {}",
|
||||
@ -110,7 +112,7 @@ impl RPCProcessor {
|
||||
fn process_safety_routed_operation(
|
||||
&self,
|
||||
detail: RPCMessageHeaderDetailDirect,
|
||||
vcrypto: CryptoSystemVersion,
|
||||
vcrypto: &CryptoSystemGuard<'_>,
|
||||
routed_operation: RoutedOperation,
|
||||
remote_sr_pubkey: TypedKey,
|
||||
) -> RPCNetworkResult<()> {
|
||||
@ -156,7 +158,7 @@ impl RPCProcessor {
|
||||
fn process_private_routed_operation(
|
||||
&self,
|
||||
detail: RPCMessageHeaderDetailDirect,
|
||||
vcrypto: CryptoSystemVersion,
|
||||
vcrypto: &CryptoSystemGuard<'_>,
|
||||
routed_operation: RoutedOperation,
|
||||
remote_sr_pubkey: TypedKey,
|
||||
pr_pubkey: TypedKey,
|
||||
@ -170,7 +172,8 @@ impl RPCProcessor {
|
||||
|
||||
// Look up the private route and ensure it's one in our spec store
|
||||
// Ensure the route is validated, and construct a return safetyspec that matches the inbound preferences
|
||||
let rss = self.routing_table().route_spec_store();
|
||||
let routing_table = self.routing_table();
|
||||
let rss = routing_table.route_spec_store();
|
||||
let preferred_route = rss.get_route_id_for_key(&pr_pubkey.value);
|
||||
|
||||
let Some((secret_key, safety_spec)) = rss.with_signature_validated_route(
|
||||
@ -230,7 +233,7 @@ impl RPCProcessor {
|
||||
fn process_routed_operation(
|
||||
&self,
|
||||
detail: RPCMessageHeaderDetailDirect,
|
||||
vcrypto: CryptoSystemVersion,
|
||||
vcrypto: &CryptoSystemGuard<'_>,
|
||||
routed_operation: RoutedOperation,
|
||||
remote_sr_pubkey: TypedKey,
|
||||
pr_pubkey: TypedKey,
|
||||
@ -330,8 +333,9 @@ impl RPCProcessor {
|
||||
routed_operation: &mut RoutedOperation,
|
||||
) -> RPCNetworkResult<RouteHop> {
|
||||
// Get crypto kind
|
||||
let crypto = self.crypto();
|
||||
let crypto_kind = pr_pubkey.kind;
|
||||
let Some(vcrypto) = self.crypto().get(crypto_kind) else {
|
||||
let Some(vcrypto) = crypto.get(crypto_kind) else {
|
||||
return Ok(NetworkResult::invalid_message(
|
||||
"private route hop data crypto is not supported",
|
||||
));
|
||||
@ -370,9 +374,7 @@ impl RPCProcessor {
|
||||
};
|
||||
|
||||
// Validate the RouteHop
|
||||
route_hop
|
||||
.validate(self.crypto())
|
||||
.map_err(RPCError::protocol)?;
|
||||
route_hop.validate(&crypto).map_err(RPCError::protocol)?;
|
||||
|
||||
// Sign the operation if this is not our last hop
|
||||
// as the last hop is already signed by the envelope
|
||||
@ -392,6 +394,7 @@ impl RPCProcessor {
|
||||
pub(super) async fn process_route(&self, msg: Message) -> RPCNetworkResult<()> {
|
||||
// Ignore if disabled
|
||||
let routing_table = self.routing_table();
|
||||
let crypto = self.crypto();
|
||||
|
||||
let Some(published_peer_info) =
|
||||
routing_table.get_published_peer_info(msg.header.routing_domain())
|
||||
@ -431,7 +434,7 @@ impl RPCProcessor {
|
||||
|
||||
// Get crypto kind
|
||||
let crypto_kind = route.safety_route().crypto_kind();
|
||||
let Some(vcrypto) = self.crypto().get(crypto_kind) else {
|
||||
let Some(vcrypto) = crypto.get(crypto_kind) else {
|
||||
return Ok(NetworkResult::invalid_message(
|
||||
"routed operation crypto is not supported",
|
||||
));
|
||||
@ -497,7 +500,7 @@ impl RPCProcessor {
|
||||
};
|
||||
|
||||
// Validate the private route
|
||||
if private_route.validate(self.crypto()).is_err() {
|
||||
if private_route.validate(&crypto).is_err() {
|
||||
return Ok(NetworkResult::invalid_message(
|
||||
"failed to validate private route",
|
||||
));
|
||||
@ -534,7 +537,7 @@ impl RPCProcessor {
|
||||
};
|
||||
|
||||
// Validate the route hop
|
||||
if route_hop.validate(self.crypto()).is_err() {
|
||||
if route_hop.validate(&crypto).is_err() {
|
||||
return Ok(NetworkResult::invalid_message(
|
||||
"failed to validate route hop",
|
||||
));
|
||||
@ -617,7 +620,7 @@ impl RPCProcessor {
|
||||
// No hops left, time to process the routed operation
|
||||
network_result_try!(self.process_routed_operation(
|
||||
detail,
|
||||
vcrypto,
|
||||
&vcrypto,
|
||||
routed_operation,
|
||||
safety_route.public_key,
|
||||
private_route.public_key,
|
||||
|
@ -26,7 +26,7 @@ impl RPCProcessor {
|
||||
ret.latency
|
||||
), err)]
|
||||
pub async fn rpc_call_set_value(
|
||||
self,
|
||||
&self,
|
||||
dest: Destination,
|
||||
key: TypedKey,
|
||||
subkey: ValueSubkey,
|
||||
@ -35,7 +35,6 @@ impl RPCProcessor {
|
||||
send_descriptor: bool,
|
||||
) -> RPCNetworkResult<Answer<SetValueAnswer>> {
|
||||
let _guard = self
|
||||
.unlocked_inner
|
||||
.startup_lock
|
||||
.enter()
|
||||
.map_err(RPCError::map_try_again("not started up"))?;
|
||||
@ -49,7 +48,8 @@ impl RPCProcessor {
|
||||
};
|
||||
|
||||
// Get the target node id
|
||||
let Some(vcrypto) = self.crypto().get(key.kind) else {
|
||||
let crypto = self.crypto();
|
||||
let Some(vcrypto) = crypto.get(key.kind) else {
|
||||
return Err(RPCError::internal("unsupported cryptosystem"));
|
||||
};
|
||||
let Some(target_node_id) = target_node_ids.get(key.kind) else {
|
||||
@ -84,7 +84,7 @@ impl RPCProcessor {
|
||||
let question_context = QuestionContext::SetValue(ValidateSetValueContext {
|
||||
descriptor,
|
||||
subkey,
|
||||
vcrypto: vcrypto.clone(),
|
||||
crypto_kind: vcrypto.kind(),
|
||||
});
|
||||
|
||||
if debug_target_enabled!("dht") {
|
||||
@ -149,7 +149,7 @@ impl RPCProcessor {
|
||||
}
|
||||
|
||||
// Validate peers returned are, in fact, closer to the key than the node we sent this to
|
||||
let valid = match RoutingTable::verify_peers_closer(vcrypto, target_node_id, key, &peers) {
|
||||
let valid = match RoutingTable::verify_peers_closer(&vcrypto, target_node_id, key, &peers) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
return Ok(NetworkResult::invalid_message(format!(
|
||||
@ -225,8 +225,7 @@ impl RPCProcessor {
|
||||
|
||||
// Get target for ValueChanged notifications
|
||||
let dest = network_result_try!(self.get_respond_to_destination(&msg));
|
||||
let rss = routing_table.route_spec_store();
|
||||
let target = dest.get_target(rss)?;
|
||||
let target = dest.get_target(&routing_table)?;
|
||||
|
||||
// Get the nodes that we know about that are closer to the the key than our own node
|
||||
let closer_to_key_peers = network_result_try!(
|
||||
@ -247,7 +246,9 @@ impl RPCProcessor {
|
||||
log_rpc!(debug "{}", debug_string);
|
||||
|
||||
// If there are less than 'set_value_count' peers that are closer, then store here too
|
||||
let set_value_count = self.with_config(|c| c.network.dht.set_value_count as usize);
|
||||
let set_value_count = self
|
||||
.config()
|
||||
.with(|c| c.network.dht.set_value_count as usize);
|
||||
|
||||
let (set, new_value) = if closer_to_key_peers.len() >= set_value_count {
|
||||
// Not close enough
|
||||
|
@ -5,12 +5,11 @@ impl RPCProcessor {
|
||||
// Can be sent via relays but not routes. For routed 'signal' like capabilities, use AppMessage.
|
||||
#[instrument(level = "trace", target = "rpc", skip(self), ret, err)]
|
||||
pub async fn rpc_call_signal(
|
||||
self,
|
||||
&self,
|
||||
dest: Destination,
|
||||
signal_info: SignalInfo,
|
||||
) -> RPCNetworkResult<()> {
|
||||
let _guard = self
|
||||
.unlocked_inner
|
||||
.startup_lock
|
||||
.enter()
|
||||
.map_err(RPCError::map_try_again("not started up"))?;
|
||||
|
@ -18,22 +18,22 @@ impl RPCProcessor {
|
||||
// private -> nothing
|
||||
#[instrument(level = "trace", target = "rpc", skip(self), ret, err)]
|
||||
pub async fn rpc_call_status(
|
||||
self,
|
||||
&self,
|
||||
dest: Destination,
|
||||
) -> RPCNetworkResult<Answer<StatusResult>> {
|
||||
let _guard = self
|
||||
.unlocked_inner
|
||||
.startup_lock
|
||||
.enter()
|
||||
.map_err(RPCError::map_try_again("not started up"))?;
|
||||
|
||||
// Determine routing domain and node status to send
|
||||
let routing_table = self.routing_table();
|
||||
let (opt_target_nr, routing_domain, node_status) = if let Some(UnsafeRoutingInfo {
|
||||
opt_node,
|
||||
opt_relay,
|
||||
opt_routing_domain,
|
||||
}) =
|
||||
dest.get_unsafe_routing_info(self.routing_table())
|
||||
dest.get_unsafe_routing_info(&routing_table)
|
||||
{
|
||||
let Some(routing_domain) = opt_routing_domain else {
|
||||
// Because this exits before calling 'question()',
|
||||
|
@ -4,26 +4,25 @@ impl RPCProcessor {
|
||||
// Can only be sent directly, not via relays or routes
|
||||
#[instrument(level = "trace", target = "rpc", skip(self), ret, err)]
|
||||
pub async fn rpc_call_validate_dial_info(
|
||||
self,
|
||||
&self,
|
||||
peer: NodeRef,
|
||||
dial_info: DialInfo,
|
||||
redirect: bool,
|
||||
) -> Result<bool, RPCError> {
|
||||
let _guard = self
|
||||
.unlocked_inner
|
||||
.startup_lock
|
||||
.enter()
|
||||
.map_err(RPCError::map_try_again("not started up"))?;
|
||||
let stop_token = self
|
||||
.unlocked_inner
|
||||
.startup_lock
|
||||
.stop_token()
|
||||
.ok_or(RPCError::try_again("not started up"))?;
|
||||
|
||||
let network_manager = self.network_manager();
|
||||
|
||||
let validate_dial_info_receipt_time_ms =
|
||||
self.with_config(|c| c.network.dht.validate_dial_info_receipt_time_ms as u64);
|
||||
let validate_dial_info_receipt_time_ms = self
|
||||
.config()
|
||||
.with(|c| c.network.dht.validate_dial_info_receipt_time_ms as u64);
|
||||
|
||||
let receipt_time = TimestampDuration::new_ms(validate_dial_info_receipt_time_ms);
|
||||
|
||||
@ -130,7 +129,9 @@ impl RPCProcessor {
|
||||
// an ipv6 address
|
||||
let sender_node_id = detail.envelope.get_sender_typed_id();
|
||||
let routing_domain = detail.routing_domain;
|
||||
let node_count = self.with_config(|c| c.network.dht.max_find_node_count as usize);
|
||||
let node_count = self
|
||||
.config()
|
||||
.with(|c| c.network.dht.max_find_node_count as usize);
|
||||
|
||||
// Filter on nodes that can validate dial info, and can reach a specific dial info
|
||||
let outbound_dial_info_entry_filter =
|
||||
|
@ -5,7 +5,7 @@ impl RPCProcessor {
|
||||
// Can be sent via all methods including relays and routes but never over a safety route
|
||||
#[instrument(level = "trace", target = "rpc", skip(self, value), err)]
|
||||
pub async fn rpc_call_value_changed(
|
||||
self,
|
||||
&self,
|
||||
dest: Destination,
|
||||
key: TypedKey,
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
@ -14,7 +14,6 @@ impl RPCProcessor {
|
||||
value: Option<SignedValueData>,
|
||||
) -> RPCNetworkResult<()> {
|
||||
let _guard = self
|
||||
.unlocked_inner
|
||||
.startup_lock
|
||||
.enter()
|
||||
.map_err(RPCError::map_try_again("not started up"))?;
|
||||
|
@ -23,7 +23,7 @@ impl RPCProcessor {
|
||||
),err)]
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub async fn rpc_call_watch_value(
|
||||
self,
|
||||
&self,
|
||||
dest: Destination,
|
||||
key: TypedKey,
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
@ -33,7 +33,6 @@ impl RPCProcessor {
|
||||
watch_id: Option<u64>,
|
||||
) -> RPCNetworkResult<Answer<WatchValueAnswer>> {
|
||||
let _guard = self
|
||||
.unlocked_inner
|
||||
.startup_lock
|
||||
.enter()
|
||||
.map_err(RPCError::map_try_again("not started up"))?;
|
||||
@ -47,7 +46,8 @@ impl RPCProcessor {
|
||||
};
|
||||
|
||||
// Get the target node id
|
||||
let Some(vcrypto) = self.crypto().get(key.kind) else {
|
||||
let crypto = self.crypto();
|
||||
let Some(vcrypto) = crypto.get(key.kind) else {
|
||||
return Err(RPCError::internal("unsupported cryptosystem"));
|
||||
};
|
||||
let Some(target_node_id) = target_node_ids.get(key.kind) else {
|
||||
@ -77,7 +77,7 @@ impl RPCProcessor {
|
||||
count,
|
||||
watch_id,
|
||||
watcher,
|
||||
vcrypto.clone(),
|
||||
&vcrypto,
|
||||
)?;
|
||||
let question = RPCQuestion::new(
|
||||
network_result_try!(self.get_destination_respond_to(&dest)?),
|
||||
@ -150,7 +150,7 @@ impl RPCProcessor {
|
||||
}
|
||||
|
||||
// Validate peers returned are, in fact, closer to the key than the node we sent this to
|
||||
let valid = match RoutingTable::verify_peers_closer(vcrypto, target_node_id, key, &peers) {
|
||||
let valid = match RoutingTable::verify_peers_closer(&vcrypto, target_node_id, key, &peers) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
return Ok(NetworkResult::invalid_message(format!(
|
||||
@ -230,8 +230,7 @@ impl RPCProcessor {
|
||||
|
||||
// Get target for ValueChanged notifications
|
||||
let dest = network_result_try!(self.get_respond_to_destination(&msg));
|
||||
let rss = routing_table.route_spec_store();
|
||||
let target = dest.get_target(rss)?;
|
||||
let target = dest.get_target(&routing_table)?;
|
||||
|
||||
if debug_target_enabled!("dht") {
|
||||
let debug_string = format!(
|
||||
@ -257,7 +256,9 @@ impl RPCProcessor {
|
||||
.find_preferred_peers_closer_to_key(routing_domain, key, vec![CAP_DHT, CAP_DHT_WATCH]));
|
||||
|
||||
// See if we would have accepted this as a set, same set_value_count for watches
|
||||
let set_value_count = self.with_config(|c| c.network.dht.set_value_count as usize);
|
||||
let set_value_count = self
|
||||
.config()
|
||||
.with(|c| c.network.dht.set_value_count as usize);
|
||||
let (ret_accepted, ret_expiration, ret_watch_id) =
|
||||
if closer_to_key_peers.len() >= set_value_count {
|
||||
// Not close enough, not accepted
|
||||
|
@ -28,13 +28,11 @@ impl StorageManager {
|
||||
#[instrument(level = "trace", target = "dht", skip_all, err)]
|
||||
pub(super) async fn outbound_get_value(
|
||||
&self,
|
||||
rpc_processor: &RPCProcessor,
|
||||
key: TypedKey,
|
||||
subkey: ValueSubkey,
|
||||
safety_selection: SafetySelection,
|
||||
last_get_result: GetResult,
|
||||
) -> VeilidAPIResult<flume::Receiver<VeilidAPIResult<OutboundGetValueResult>>> {
|
||||
let routing_table = self.routing_table();
|
||||
let routing_domain = RoutingDomain::PublicInternet;
|
||||
|
||||
// Get the DHT parameters for 'GetValue'
|
||||
@ -49,9 +47,8 @@ impl StorageManager {
|
||||
|
||||
// Get the nodes we know are caching this value to seed the fanout
|
||||
let init_fanout_queue = {
|
||||
let inner = self.inner.lock().await;
|
||||
inner
|
||||
.get_value_nodes(key)?
|
||||
self.get_value_nodes(key)
|
||||
.await?
|
||||
.unwrap_or_default()
|
||||
.into_iter()
|
||||
.filter(|x| {
|
||||
@ -90,10 +87,9 @@ impl StorageManager {
|
||||
let registry = registry.clone();
|
||||
let last_descriptor = last_get_result.opt_descriptor.clone();
|
||||
async move {
|
||||
let rpc_processor = registry.lookup::<RPCProcessor>().unwrap();
|
||||
let rpc_processor = registry.rpc_processor();
|
||||
let gva = network_result_try!(
|
||||
rpc_processor
|
||||
.clone()
|
||||
.rpc_call_get_value(
|
||||
Destination::direct(next_node.routing_domain_filtered(routing_domain))
|
||||
.with_safety(safety_selection),
|
||||
@ -234,12 +230,14 @@ impl StorageManager {
|
||||
};
|
||||
|
||||
// Call the fanout in a spawned task
|
||||
let registry = self.registry();
|
||||
spawn(
|
||||
"outbound_get_value fanout",
|
||||
Box::pin(
|
||||
async move {
|
||||
let routing_table = registry.routing_table();
|
||||
let fanout_call = FanoutCall::new(
|
||||
routing_table.clone(),
|
||||
&routing_table,
|
||||
key,
|
||||
key_count,
|
||||
fanout,
|
||||
@ -293,22 +291,21 @@ impl StorageManager {
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", target = "dht", skip_all)]
|
||||
pub(super) fn process_deferred_outbound_get_value_result_inner(
|
||||
pub(super) fn process_deferred_outbound_get_value_result(
|
||||
&self,
|
||||
inner: &mut StorageManagerInner,
|
||||
res_rx: flume::Receiver<Result<get_value::OutboundGetValueResult, VeilidAPIError>>,
|
||||
key: TypedKey,
|
||||
subkey: ValueSubkey,
|
||||
last_seq: ValueSeqNum,
|
||||
) {
|
||||
let registry = self.registry();
|
||||
Self::process_deferred_results_inner(inner,
|
||||
self.process_deferred_results(
|
||||
res_rx,
|
||||
Box::new(
|
||||
move |result: VeilidAPIResult<get_value::OutboundGetValueResult>| -> SendPinBoxFuture<bool> {
|
||||
let registry=registry.clone();
|
||||
Box::pin(async move {
|
||||
let this = registry.lookup::<StorageManager>().unwrap();
|
||||
let this = registry.storage_manager();
|
||||
let result = match result {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
@ -335,9 +332,7 @@ impl StorageManager {
|
||||
// if the sequence number changed since our first partial update
|
||||
// Send with a max count as this is not attached to any watch
|
||||
if last_seq != value_data.seq() {
|
||||
if let Err(e) = this.update_callback_value_change(key,ValueSubkeyRangeSet::single(subkey), u32::MAX, Some(value_data)).await {
|
||||
log_rtab!(debug "Failed sending deferred fanout value change: {}", e);
|
||||
}
|
||||
this.update_callback_value_change(key,ValueSubkeyRangeSet::single(subkey), u32::MAX, Some(value_data));
|
||||
}
|
||||
|
||||
// Return done
|
||||
@ -376,7 +371,7 @@ impl StorageManager {
|
||||
|
||||
// If we got a new value back then write it to the opened record
|
||||
if Some(get_result_value.value_data().seq()) != opt_last_seq {
|
||||
Self::handle_set_local_value(
|
||||
Self::handle_set_local_value_inner(
|
||||
&mut *inner,
|
||||
key,
|
||||
subkey,
|
||||
|
@ -52,14 +52,12 @@ impl StorageManager {
|
||||
#[instrument(level = "trace", target = "dht", skip_all, err)]
|
||||
pub(super) async fn outbound_inspect_value(
|
||||
&self,
|
||||
rpc_processor: &RPCProcessor,
|
||||
key: TypedKey,
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
safety_selection: SafetySelection,
|
||||
local_inspect_result: InspectResult,
|
||||
use_set_scope: bool,
|
||||
) -> VeilidAPIResult<OutboundInspectValueResult> {
|
||||
let routing_table = self.routing_table();
|
||||
let routing_domain = RoutingDomain::PublicInternet;
|
||||
|
||||
// Get the DHT parameters for 'InspectValue'
|
||||
@ -84,9 +82,8 @@ impl StorageManager {
|
||||
|
||||
// Get the nodes we know are caching this value to seed the fanout
|
||||
let init_fanout_queue = {
|
||||
let inner = self.inner.lock().await;
|
||||
inner
|
||||
.get_value_nodes(key)?
|
||||
self.get_value_nodes(key)
|
||||
.await?
|
||||
.unwrap_or_default()
|
||||
.into_iter()
|
||||
.filter(|x| {
|
||||
@ -118,125 +115,130 @@ impl StorageManager {
|
||||
}));
|
||||
|
||||
// Routine to call to generate fanout
|
||||
let call_routine = |next_node: NodeRef| {
|
||||
let rpc_processor = rpc_processor.clone();
|
||||
let call_routine = {
|
||||
let context = context.clone();
|
||||
let opt_descriptor = local_inspect_result.opt_descriptor.clone();
|
||||
let subkeys = subkeys.clone();
|
||||
async move {
|
||||
let iva = network_result_try!(
|
||||
rpc_processor
|
||||
.clone()
|
||||
.rpc_call_inspect_value(
|
||||
Destination::direct(next_node.routing_domain_filtered(routing_domain)).with_safety(safety_selection),
|
||||
key,
|
||||
subkeys.clone(),
|
||||
opt_descriptor.map(|x| (*x).clone()),
|
||||
)
|
||||
.await?
|
||||
);
|
||||
let answer = iva.answer;
|
||||
let registry = self.registry();
|
||||
move |next_node: NodeRef| {
|
||||
let context = context.clone();
|
||||
let registry = registry.clone();
|
||||
let opt_descriptor = local_inspect_result.opt_descriptor.clone();
|
||||
let subkeys = subkeys.clone();
|
||||
async move {
|
||||
let rpc_processor = registry.rpc_processor();
|
||||
|
||||
// Keep the descriptor if we got one. If we had a last_descriptor it will
|
||||
// already be validated by rpc_call_inspect_value
|
||||
if let Some(descriptor) = answer.descriptor {
|
||||
let mut ctx = context.lock();
|
||||
if ctx.opt_descriptor_info.is_none() {
|
||||
// Get the descriptor info. This also truncates the subkeys list to what can be returned from the network.
|
||||
let descriptor_info =
|
||||
match DescriptorInfo::new(Arc::new(descriptor.clone()), &subkeys) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
return Ok(NetworkResult::invalid_message(e));
|
||||
}
|
||||
};
|
||||
ctx.opt_descriptor_info = Some(descriptor_info);
|
||||
}
|
||||
}
|
||||
let iva = network_result_try!(
|
||||
rpc_processor
|
||||
.rpc_call_inspect_value(
|
||||
Destination::direct(next_node.routing_domain_filtered(routing_domain)).with_safety(safety_selection),
|
||||
key,
|
||||
subkeys.clone(),
|
||||
opt_descriptor.map(|x| (*x).clone()),
|
||||
)
|
||||
.await?
|
||||
);
|
||||
let answer = iva.answer;
|
||||
|
||||
// Keep the value if we got one and it is newer and it passes schema validation
|
||||
if !answer.seqs.is_empty() {
|
||||
log_dht!(debug "Got seqs back: len={}", answer.seqs.len());
|
||||
let mut ctx = context.lock();
|
||||
|
||||
// Ensure we have a schema and descriptor etc
|
||||
let Some(descriptor_info) = &ctx.opt_descriptor_info else {
|
||||
// Got a value but no descriptor for it
|
||||
// Move to the next node
|
||||
return Ok(NetworkResult::invalid_message(
|
||||
"Got inspection with no descriptor",
|
||||
));
|
||||
};
|
||||
|
||||
// Get number of subkeys from schema and ensure we are getting the
|
||||
// right number of sequence numbers betwen that and what we asked for
|
||||
#[allow(clippy::unnecessary_cast)]
|
||||
if answer.seqs.len() as u64 != descriptor_info.subkeys.len() as u64 {
|
||||
// Not the right number of sequence numbers
|
||||
// Move to the next node
|
||||
return Ok(NetworkResult::invalid_message(format!(
|
||||
"wrong number of seqs returned {} (wanted {})",
|
||||
answer.seqs.len(),
|
||||
descriptor_info.subkeys.len()
|
||||
)));
|
||||
// Keep the descriptor if we got one. If we had a last_descriptor it will
|
||||
// already be validated by rpc_call_inspect_value
|
||||
if let Some(descriptor) = answer.descriptor {
|
||||
let mut ctx = context.lock();
|
||||
if ctx.opt_descriptor_info.is_none() {
|
||||
// Get the descriptor info. This also truncates the subkeys list to what can be returned from the network.
|
||||
let descriptor_info =
|
||||
match DescriptorInfo::new(Arc::new(descriptor.clone()), &subkeys) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
return Ok(NetworkResult::invalid_message(e));
|
||||
}
|
||||
};
|
||||
ctx.opt_descriptor_info = Some(descriptor_info);
|
||||
}
|
||||
}
|
||||
|
||||
// If we have a prior seqs list, merge in the new seqs
|
||||
if ctx.seqcounts.is_empty() {
|
||||
ctx.seqcounts = answer
|
||||
.seqs
|
||||
.iter()
|
||||
.map(|s| SubkeySeqCount {
|
||||
seq: *s,
|
||||
// One node has shown us the newest sequence numbers so far
|
||||
value_nodes: if *s == ValueSeqNum::MAX {
|
||||
vec![]
|
||||
} else {
|
||||
vec![next_node.clone()]
|
||||
},
|
||||
})
|
||||
.collect();
|
||||
} else {
|
||||
if ctx.seqcounts.len() != answer.seqs.len() {
|
||||
return Err(RPCError::internal(
|
||||
"seqs list length should always be equal by now",
|
||||
// Keep the value if we got one and it is newer and it passes schema validation
|
||||
if !answer.seqs.is_empty() {
|
||||
log_dht!(debug "Got seqs back: len={}", answer.seqs.len());
|
||||
let mut ctx = context.lock();
|
||||
|
||||
// Ensure we have a schema and descriptor etc
|
||||
let Some(descriptor_info) = &ctx.opt_descriptor_info else {
|
||||
// Got a value but no descriptor for it
|
||||
// Move to the next node
|
||||
return Ok(NetworkResult::invalid_message(
|
||||
"Got inspection with no descriptor",
|
||||
));
|
||||
};
|
||||
|
||||
// Get number of subkeys from schema and ensure we are getting the
|
||||
// right number of sequence numbers betwen that and what we asked for
|
||||
#[allow(clippy::unnecessary_cast)]
|
||||
if answer.seqs.len() as u64 != descriptor_info.subkeys.len() as u64 {
|
||||
// Not the right number of sequence numbers
|
||||
// Move to the next node
|
||||
return Ok(NetworkResult::invalid_message(format!(
|
||||
"wrong number of seqs returned {} (wanted {})",
|
||||
answer.seqs.len(),
|
||||
descriptor_info.subkeys.len()
|
||||
)));
|
||||
}
|
||||
for pair in ctx.seqcounts.iter_mut().zip(answer.seqs.iter()) {
|
||||
let ctx_seqcnt = pair.0;
|
||||
let answer_seq = *pair.1;
|
||||
|
||||
// If we already have consensus for this subkey, don't bother updating it any more
|
||||
// While we may find a better sequence number if we keep looking, this does not mimic the behavior
|
||||
// of get and set unless we stop here
|
||||
if ctx_seqcnt.value_nodes.len() >= consensus_count {
|
||||
continue;
|
||||
// If we have a prior seqs list, merge in the new seqs
|
||||
if ctx.seqcounts.is_empty() {
|
||||
ctx.seqcounts = answer
|
||||
.seqs
|
||||
.iter()
|
||||
.map(|s| SubkeySeqCount {
|
||||
seq: *s,
|
||||
// One node has shown us the newest sequence numbers so far
|
||||
value_nodes: if *s == ValueSeqNum::MAX {
|
||||
vec![]
|
||||
} else {
|
||||
vec![next_node.clone()]
|
||||
},
|
||||
})
|
||||
.collect();
|
||||
} else {
|
||||
if ctx.seqcounts.len() != answer.seqs.len() {
|
||||
return Err(RPCError::internal(
|
||||
"seqs list length should always be equal by now",
|
||||
));
|
||||
}
|
||||
for pair in ctx.seqcounts.iter_mut().zip(answer.seqs.iter()) {
|
||||
let ctx_seqcnt = pair.0;
|
||||
let answer_seq = *pair.1;
|
||||
|
||||
// If the new seq isn't undefined and is better than the old seq (either greater or old is undefined)
|
||||
// Then take that sequence number and note that we have gotten newer sequence numbers so we keep
|
||||
// looking for consensus
|
||||
// If the sequence number matches the old sequence number, then we keep the value node for reference later
|
||||
if answer_seq != ValueSeqNum::MAX {
|
||||
if ctx_seqcnt.seq == ValueSeqNum::MAX || answer_seq > ctx_seqcnt.seq
|
||||
{
|
||||
// One node has shown us the latest sequence numbers so far
|
||||
ctx_seqcnt.seq = answer_seq;
|
||||
ctx_seqcnt.value_nodes = vec![next_node.clone()];
|
||||
} else if answer_seq == ctx_seqcnt.seq {
|
||||
// Keep the nodes that showed us the latest values
|
||||
ctx_seqcnt.value_nodes.push(next_node.clone());
|
||||
// If we already have consensus for this subkey, don't bother updating it any more
|
||||
// While we may find a better sequence number if we keep looking, this does not mimic the behavior
|
||||
// of get and set unless we stop here
|
||||
if ctx_seqcnt.value_nodes.len() >= consensus_count {
|
||||
continue;
|
||||
}
|
||||
|
||||
// If the new seq isn't undefined and is better than the old seq (either greater or old is undefined)
|
||||
// Then take that sequence number and note that we have gotten newer sequence numbers so we keep
|
||||
// looking for consensus
|
||||
// If the sequence number matches the old sequence number, then we keep the value node for reference later
|
||||
if answer_seq != ValueSeqNum::MAX {
|
||||
if ctx_seqcnt.seq == ValueSeqNum::MAX || answer_seq > ctx_seqcnt.seq
|
||||
{
|
||||
// One node has shown us the latest sequence numbers so far
|
||||
ctx_seqcnt.seq = answer_seq;
|
||||
ctx_seqcnt.value_nodes = vec![next_node.clone()];
|
||||
} else if answer_seq == ctx_seqcnt.seq {
|
||||
// Keep the nodes that showed us the latest values
|
||||
ctx_seqcnt.value_nodes.push(next_node.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Return peers if we have some
|
||||
log_network_result!(debug "InspectValue fanout call returned peers {}", answer.peers.len());
|
||||
// Return peers if we have some
|
||||
log_network_result!(debug "InspectValue fanout call returned peers {}", answer.peers.len());
|
||||
|
||||
Ok(NetworkResult::value(FanoutCallOutput { peer_info_list: answer.peers}))
|
||||
}.instrument(tracing::trace_span!("outbound_inspect_value fanout call"))
|
||||
Ok(NetworkResult::value(FanoutCallOutput { peer_info_list: answer.peers}))
|
||||
}.instrument(tracing::trace_span!("outbound_inspect_value fanout call"))
|
||||
}
|
||||
};
|
||||
|
||||
// Routine to call to check if we're done at each step
|
||||
@ -257,8 +259,9 @@ impl StorageManager {
|
||||
};
|
||||
|
||||
// Call the fanout
|
||||
let routing_table = self.routing_table();
|
||||
let fanout_call = FanoutCall::new(
|
||||
routing_table.clone(),
|
||||
&routing_table,
|
||||
key,
|
||||
key_count,
|
||||
fanout,
|
||||
@ -325,14 +328,14 @@ impl StorageManager {
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
want_descriptor: bool,
|
||||
) -> VeilidAPIResult<NetworkResult<InspectResult>> {
|
||||
let mut inner = self.lock().await?;
|
||||
let mut inner = self.inner.lock().await;
|
||||
|
||||
// See if this is a remote or local value
|
||||
let (_is_local, inspect_result) = {
|
||||
// See if the subkey we are getting has a last known local value
|
||||
let mut local_inspect_result = inner
|
||||
.handle_inspect_local_value(key, subkeys.clone(), true)
|
||||
.await?;
|
||||
let mut local_inspect_result =
|
||||
Self::handle_inspect_local_value_inner(&mut *inner, key, subkeys.clone(), true)
|
||||
.await?;
|
||||
// If this is local, it must have a descriptor already
|
||||
if local_inspect_result.opt_descriptor.is_some() {
|
||||
if !want_descriptor {
|
||||
@ -341,9 +344,13 @@ impl StorageManager {
|
||||
(true, local_inspect_result)
|
||||
} else {
|
||||
// See if the subkey we are getting has a last known remote value
|
||||
let remote_inspect_result = inner
|
||||
.handle_inspect_remote_value(key, subkeys, want_descriptor)
|
||||
.await?;
|
||||
let remote_inspect_result = Self::handle_inspect_remote_value_inner(
|
||||
&mut *inner,
|
||||
key,
|
||||
subkeys,
|
||||
want_descriptor,
|
||||
)
|
||||
.await?;
|
||||
(false, remote_inspect_result)
|
||||
}
|
||||
};
|
||||
|
@ -61,8 +61,6 @@ struct StorageManagerInner {
|
||||
pub metadata_db: Option<TableDB>,
|
||||
/// Background processing task (not part of attachment manager tick tree so it happens when detached too)
|
||||
pub tick_future: Option<SendPinBoxFuture<()>>,
|
||||
/// Deferred result processor
|
||||
pub deferred_result_processor: DeferredStreamProcessor,
|
||||
}
|
||||
|
||||
impl fmt::Debug for StorageManagerInner {
|
||||
@ -75,7 +73,6 @@ impl fmt::Debug for StorageManagerInner {
|
||||
.field("offline_subkey_writes", &self.offline_subkey_writes)
|
||||
//.field("metadata_db", &self.metadata_db)
|
||||
//.field("tick_future", &self.tick_future)
|
||||
.field("deferred_result_processor", &self.deferred_result_processor)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
@ -93,6 +90,9 @@ pub(crate) struct StorageManager {
|
||||
|
||||
// Anonymous watch keys
|
||||
anonymous_watch_keys: TypedKeyPairGroup,
|
||||
|
||||
/// Deferred result processor
|
||||
deferred_result_processor: DeferredStreamProcessor,
|
||||
}
|
||||
|
||||
impl fmt::Debug for StorageManager {
|
||||
@ -111,6 +111,7 @@ impl fmt::Debug for StorageManager {
|
||||
// "check_watched_records_task",
|
||||
// &self.check_watched_records_task,
|
||||
// )
|
||||
.field("deferred_result_processor", &self.deferred_result_processor)
|
||||
.field("anonymous_watch_keys", &self.anonymous_watch_keys)
|
||||
.finish()
|
||||
}
|
||||
@ -124,7 +125,7 @@ impl StorageManager {
|
||||
}
|
||||
|
||||
pub fn new(registry: VeilidComponentRegistry) -> StorageManager {
|
||||
let crypto = registry.lookup::<Crypto>().unwrap();
|
||||
let crypto = registry.crypto();
|
||||
|
||||
// Generate keys to use for anonymous watches
|
||||
let mut anonymous_watch_keys = TypedKeyPairGroup::new();
|
||||
@ -161,6 +162,7 @@ impl StorageManager {
|
||||
),
|
||||
|
||||
anonymous_watch_keys,
|
||||
deferred_result_processor: DeferredStreamProcessor::new(),
|
||||
};
|
||||
|
||||
this.setup_tasks();
|
||||
@ -224,14 +226,16 @@ impl StorageManager {
|
||||
let remote_record_store =
|
||||
RecordStore::try_create(&table_store, "remote", remote_limits).await?;
|
||||
|
||||
let mut inner = self.inner.lock().await;
|
||||
inner.metadata_db = Some(metadata_db);
|
||||
inner.local_record_store = Some(local_record_store);
|
||||
inner.remote_record_store = Some(remote_record_store);
|
||||
Self::load_metadata(&mut *inner).await?;
|
||||
{
|
||||
let mut inner = self.inner.lock().await;
|
||||
inner.metadata_db = Some(metadata_db);
|
||||
inner.local_record_store = Some(local_record_store);
|
||||
inner.remote_record_store = Some(remote_record_store);
|
||||
Self::load_metadata(&mut *inner).await?;
|
||||
}
|
||||
|
||||
// Start deferred results processors
|
||||
inner.deferred_result_processor.init().await;
|
||||
self.deferred_result_processor.init().await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@ -245,7 +249,7 @@ impl StorageManager {
|
||||
let tick_future = interval("storage manager tick", 1000, move || {
|
||||
let registry = registry.clone();
|
||||
async move {
|
||||
let this = registry.lookup::<StorageManager>().unwrap();
|
||||
let this = registry.storage_manager();
|
||||
if let Err(e) = this.tick().await {
|
||||
log_stor!(warn "storage manager tick failed: {}", e);
|
||||
}
|
||||
@ -276,13 +280,13 @@ impl StorageManager {
|
||||
async fn terminate_async(&self) {
|
||||
log_stor!(debug "starting storage manager shutdown");
|
||||
|
||||
// Stop deferred result processor
|
||||
self.deferred_result_processor.terminate().await;
|
||||
|
||||
// Terminate and release the storage manager
|
||||
{
|
||||
let mut inner = self.inner.lock().await;
|
||||
|
||||
// Stop deferred result processor
|
||||
inner.deferred_result_processor.terminate().await;
|
||||
|
||||
// Final flush on record stores
|
||||
if let Some(mut local_record_store) = inner.local_record_store.take() {
|
||||
if let Err(e) = local_record_store.flush().await {
|
||||
@ -334,28 +338,22 @@ impl StorageManager {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(super) fn get_ready_rpc_processor(&self) -> Option<VeilidComponentGuard<'_, RPCProcessor>> {
|
||||
let Some(rpc_processor) = self.registry().lookup::<RPCProcessor>() else {
|
||||
return None;
|
||||
};
|
||||
|
||||
// Check if we have published peer info
|
||||
// Note, this is a best-effort check, subject to race conditions on the network's state
|
||||
let Some(routing_table) = self.registry().lookup::<RoutingTable>() else {
|
||||
return None;
|
||||
};
|
||||
routing_table.get_published_peer_info(RoutingDomain::PublicInternet)?;
|
||||
|
||||
// Return the RPC processor if we think we're ready to send messages
|
||||
Some(rpc_processor)
|
||||
}
|
||||
|
||||
pub(super) async fn has_offline_subkey_writes(&self) -> bool {
|
||||
!self.inner.lock().await.offline_subkey_writes.is_empty()
|
||||
}
|
||||
|
||||
pub(super) fn online_writes_ready(&self) -> bool {
|
||||
self.get_ready_rpc_processor().is_some()
|
||||
pub(super) fn dht_is_online(&self) -> bool {
|
||||
// Check if we have published peer info
|
||||
// Note, this is a best-effort check, subject to race conditions on the network's state
|
||||
if self
|
||||
.routing_table()
|
||||
.get_published_peer_info(RoutingDomain::PublicInternet)
|
||||
.is_none()
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
/// Get the set of nodes in our active watches
|
||||
@ -441,9 +439,7 @@ impl StorageManager {
|
||||
}
|
||||
|
||||
// No record yet, try to get it from the network
|
||||
|
||||
// Get rpc processor and drop mutex so we don't block while getting the value from the network
|
||||
let Some(rpc_processor) = self.get_ready_rpc_processor() else {
|
||||
if !self.dht_is_online() {
|
||||
apibail_try_again!("offline, try again later");
|
||||
};
|
||||
|
||||
@ -454,13 +450,7 @@ impl StorageManager {
|
||||
// Use the safety selection we opened the record with
|
||||
let subkey: ValueSubkey = 0;
|
||||
let res_rx = self
|
||||
.outbound_get_value(
|
||||
&rpc_processor,
|
||||
key,
|
||||
subkey,
|
||||
safety_selection,
|
||||
GetResult::default(),
|
||||
)
|
||||
.outbound_get_value(key, subkey, safety_selection, GetResult::default())
|
||||
.await?;
|
||||
// Wait for the first result
|
||||
let Ok(result) = res_rx.recv_async().await else {
|
||||
@ -480,29 +470,34 @@ impl StorageManager {
|
||||
.map(|s| s.value_data().seq());
|
||||
|
||||
// Reopen inner to store value we just got
|
||||
let mut inner = self.lock().await?;
|
||||
let out = {
|
||||
let mut inner = self.inner.lock().await;
|
||||
|
||||
// Check again to see if we have a local record already or not
|
||||
// because waiting for the outbound_get_value action could result in the key being opened
|
||||
// via some parallel process
|
||||
// Check again to see if we have a local record already or not
|
||||
// because waiting for the outbound_get_value action could result in the key being opened
|
||||
// via some parallel process
|
||||
|
||||
if let Some(res) = inner
|
||||
.open_existing_record(key, writer, safety_selection)
|
||||
.await?
|
||||
{
|
||||
return Ok(res);
|
||||
}
|
||||
if let Some(res) =
|
||||
Self::open_existing_record_inner(&mut *inner, key, writer, safety_selection).await?
|
||||
{
|
||||
return Ok(res);
|
||||
}
|
||||
|
||||
// Open the new record
|
||||
let out = inner
|
||||
.open_new_record(key, writer, subkey, result.get_result, safety_selection)
|
||||
.await;
|
||||
// Open the new record
|
||||
Self::open_new_record_inner(
|
||||
&mut *inner,
|
||||
key,
|
||||
writer,
|
||||
subkey,
|
||||
result.get_result,
|
||||
safety_selection,
|
||||
)
|
||||
.await
|
||||
};
|
||||
|
||||
if out.is_ok() {
|
||||
if let Some(last_seq) = opt_last_seq {
|
||||
self.process_deferred_outbound_get_value_result_inner(
|
||||
&mut inner, res_rx, key, subkey, last_seq,
|
||||
);
|
||||
self.process_deferred_outbound_get_value_result(res_rx, key, subkey, last_seq);
|
||||
}
|
||||
}
|
||||
out
|
||||
@ -511,53 +506,54 @@ impl StorageManager {
|
||||
/// Close an opened local record
|
||||
#[instrument(level = "trace", target = "stor", skip_all)]
|
||||
pub async fn close_record(&self, key: TypedKey) -> VeilidAPIResult<()> {
|
||||
let (opt_opened_record, opt_rpc_processor) = {
|
||||
// Attempt to close the record, returning the opened record if it wasn't already closed
|
||||
let opened_record = {
|
||||
let mut inner = self.inner.lock().await;
|
||||
(
|
||||
Self::close_record_inner(&mut *inner, key)?,
|
||||
self.get_ready_rpc_processor(),
|
||||
)
|
||||
let Some(opened_record) = Self::close_record_inner(&mut *inner, key)? else {
|
||||
return Ok(());
|
||||
};
|
||||
opened_record
|
||||
};
|
||||
|
||||
// See if we have an active watch on the closed record
|
||||
let Some(active_watch) = opened_record.active_watch() else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
// Send a one-time cancel request for the watch if we have one and we're online
|
||||
if let Some(opened_record) = opt_opened_record {
|
||||
if let Some(active_watch) = opened_record.active_watch() {
|
||||
if let Some(rpc_processor) = opt_rpc_processor {
|
||||
// Use the safety selection we opened the record with
|
||||
// Use the writer we opened with as the 'watcher' as well
|
||||
let opt_owvresult = match self
|
||||
.outbound_watch_value_cancel(
|
||||
rpc_processor,
|
||||
key,
|
||||
ValueSubkeyRangeSet::full(),
|
||||
opened_record.safety_selection(),
|
||||
opened_record.writer().cloned(),
|
||||
active_watch.id,
|
||||
active_watch.watch_node,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
log_stor!(debug
|
||||
"close record watch cancel failed: {}", e
|
||||
);
|
||||
None
|
||||
}
|
||||
};
|
||||
if let Some(owvresult) = opt_owvresult {
|
||||
if owvresult.expiration_ts.as_u64() != 0 {
|
||||
log_stor!(debug
|
||||
"close record watch cancel should have zero expiration"
|
||||
);
|
||||
}
|
||||
} else {
|
||||
log_stor!(debug "close record watch cancel unsuccessful");
|
||||
}
|
||||
} else {
|
||||
log_stor!(debug "skipping last-ditch watch cancel because we are offline");
|
||||
}
|
||||
if !self.dht_is_online() {
|
||||
log_stor!(debug "skipping last-ditch watch cancel because we are offline");
|
||||
return Ok(());
|
||||
}
|
||||
// Use the safety selection we opened the record with
|
||||
// Use the writer we opened with as the 'watcher' as well
|
||||
let opt_owvresult = match self
|
||||
.outbound_watch_value_cancel(
|
||||
key,
|
||||
ValueSubkeyRangeSet::full(),
|
||||
opened_record.safety_selection(),
|
||||
opened_record.writer().cloned(),
|
||||
active_watch.id,
|
||||
active_watch.watch_node,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
log_stor!(debug
|
||||
"close record watch cancel failed: {}", e
|
||||
);
|
||||
None
|
||||
}
|
||||
};
|
||||
if let Some(owvresult) = opt_owvresult {
|
||||
if owvresult.expiration_ts.as_u64() != 0 {
|
||||
log_stor!(debug
|
||||
"close record watch cancel should have zero expiration"
|
||||
);
|
||||
}
|
||||
} else {
|
||||
log_stor!(debug "close record watch cancel unsuccessful");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -607,9 +603,7 @@ impl StorageManager {
|
||||
}
|
||||
|
||||
// Refresh if we can
|
||||
|
||||
// Get rpc processor and drop mutex so we don't block while getting the value from the network
|
||||
let Some(rpc_processor) = self.get_ready_rpc_processor() else {
|
||||
if !self.dht_is_online() {
|
||||
// Return the existing value if we have one if we aren't online
|
||||
if let Some(last_get_result_value) = last_get_result.opt_value {
|
||||
return Ok(Some(last_get_result_value.value_data().clone()));
|
||||
@ -627,13 +621,7 @@ impl StorageManager {
|
||||
.as_ref()
|
||||
.map(|v| v.value_data().seq());
|
||||
let res_rx = self
|
||||
.outbound_get_value(
|
||||
&rpc_processor,
|
||||
key,
|
||||
subkey,
|
||||
safety_selection,
|
||||
last_get_result,
|
||||
)
|
||||
.outbound_get_value(key, subkey, safety_selection, last_get_result)
|
||||
.await?;
|
||||
|
||||
// Wait for the first result
|
||||
@ -651,14 +639,7 @@ impl StorageManager {
|
||||
if let Some(out) = &out {
|
||||
// If there's more to process, do it in the background
|
||||
if partial {
|
||||
let mut inner = self.inner.lock().await;
|
||||
self.process_deferred_outbound_get_value_result_inner(
|
||||
&mut inner,
|
||||
res_rx,
|
||||
key,
|
||||
subkey,
|
||||
out.seq(),
|
||||
);
|
||||
self.process_deferred_outbound_get_value_result(res_rx, key, subkey, out.seq());
|
||||
}
|
||||
}
|
||||
|
||||
@ -751,8 +732,7 @@ impl StorageManager {
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Get rpc processor and drop mutex so we don't block while getting the value from the network
|
||||
let Some(rpc_processor) = self.get_ready_rpc_processor() else {
|
||||
if !self.dht_is_online() {
|
||||
log_stor!(debug "Writing subkey offline: {}:{} len={}", key, subkey, signed_value_data.value_data().data().len() );
|
||||
// Add to offline writes to flush
|
||||
Self::add_offline_subkey_write_inner(&mut *inner, key, subkey, safety_selection);
|
||||
@ -767,7 +747,6 @@ impl StorageManager {
|
||||
// Use the safety selection we opened the record with
|
||||
let res_rx = match self
|
||||
.outbound_set_value(
|
||||
&rpc_processor,
|
||||
key,
|
||||
subkey,
|
||||
safety_selection,
|
||||
@ -805,9 +784,7 @@ impl StorageManager {
|
||||
|
||||
// If there's more to process, do it in the background
|
||||
if partial {
|
||||
let mut inner = self.lock().await?;
|
||||
self.process_deferred_outbound_set_value_result_inner(
|
||||
&mut inner,
|
||||
self.process_deferred_outbound_set_value_result(
|
||||
res_rx,
|
||||
key,
|
||||
subkey,
|
||||
@ -829,7 +806,7 @@ impl StorageManager {
|
||||
expiration: Timestamp,
|
||||
count: u32,
|
||||
) -> VeilidAPIResult<Timestamp> {
|
||||
let inner = self.lock().await?;
|
||||
let inner = self.inner.lock().await;
|
||||
|
||||
// Get the safety selection and the writer we opened this record
|
||||
// and whatever active watch id and watch node we may have in case this is a watch update
|
||||
@ -864,7 +841,7 @@ impl StorageManager {
|
||||
let subkeys = schema.truncate_subkeys(&subkeys, None);
|
||||
|
||||
// Get rpc processor and drop mutex so we don't block while requesting the watch from the network
|
||||
let Some(rpc_processor) = Self::get_ready_rpc_processor(&inner) else {
|
||||
if !self.dht_is_online() {
|
||||
apibail_try_again!("offline, try again later");
|
||||
};
|
||||
|
||||
@ -875,7 +852,6 @@ impl StorageManager {
|
||||
// Use the writer we opened with as the 'watcher' as well
|
||||
let opt_owvresult = self
|
||||
.outbound_watch_value(
|
||||
rpc_processor,
|
||||
key,
|
||||
subkeys.clone(),
|
||||
expiration,
|
||||
@ -892,7 +868,7 @@ impl StorageManager {
|
||||
};
|
||||
|
||||
// Clear any existing watch if the watch succeeded or got cancelled
|
||||
let mut inner = self.lock().await?;
|
||||
let mut inner = self.inner.lock().await;
|
||||
let Some(opened_record) = inner.opened_records.get_mut(&key) else {
|
||||
apibail_generic!("record not open");
|
||||
};
|
||||
@ -953,7 +929,7 @@ impl StorageManager {
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
) -> VeilidAPIResult<bool> {
|
||||
let (subkeys, active_watch) = {
|
||||
let inner = self.lock().await?;
|
||||
let inner = self.inner.lock().await;
|
||||
let Some(opened_record) = inner.opened_records.get(&key) else {
|
||||
apibail_generic!("record not open");
|
||||
};
|
||||
@ -1015,7 +991,7 @@ impl StorageManager {
|
||||
subkeys
|
||||
};
|
||||
|
||||
let mut inner = self.lock().await?;
|
||||
let mut inner = self.inner.lock().await;
|
||||
let safety_selection = {
|
||||
let Some(opened_record) = inner.opened_records.get(&key) else {
|
||||
apibail_generic!("record not open");
|
||||
@ -1024,9 +1000,8 @@ impl StorageManager {
|
||||
};
|
||||
|
||||
// See if the requested record is our local record store
|
||||
let mut local_inspect_result = inner
|
||||
.handle_inspect_local_value(key, subkeys.clone(), true)
|
||||
.await?;
|
||||
let mut local_inspect_result =
|
||||
Self::handle_inspect_local_value_inner(&mut *inner, key, subkeys.clone(), true).await?;
|
||||
|
||||
#[allow(clippy::unnecessary_cast)]
|
||||
{
|
||||
@ -1059,7 +1034,7 @@ impl StorageManager {
|
||||
}
|
||||
|
||||
// Get rpc processor and drop mutex so we don't block while getting the value from the network
|
||||
let Some(rpc_processor) = Self::get_ready_rpc_processor(&inner) else {
|
||||
if !self.dht_is_online() {
|
||||
apibail_try_again!("offline, try again later");
|
||||
};
|
||||
|
||||
@ -1076,7 +1051,6 @@ impl StorageManager {
|
||||
// Get the inspect record report from the network
|
||||
let result = self
|
||||
.outbound_inspect_value(
|
||||
rpc_processor,
|
||||
key,
|
||||
subkeys,
|
||||
safety_selection,
|
||||
@ -1107,14 +1081,21 @@ impl StorageManager {
|
||||
}
|
||||
|
||||
// Keep the list of nodes that returned a value for later reference
|
||||
let mut inner = self.lock().await?;
|
||||
let mut inner = self.inner.lock().await;
|
||||
let results_iter = result
|
||||
.inspect_result
|
||||
.subkeys
|
||||
.iter()
|
||||
.zip(result.fanout_results.iter());
|
||||
|
||||
inner.process_fanout_results(key, results_iter, false);
|
||||
Self::process_fanout_results_inner(
|
||||
&mut *inner,
|
||||
key,
|
||||
results_iter,
|
||||
false,
|
||||
self.config()
|
||||
.with(|c| c.network.dht.set_value_count as usize),
|
||||
);
|
||||
|
||||
Ok(DHTRecordReport::new(
|
||||
result.inspect_result.subkeys,
|
||||
@ -1127,15 +1108,12 @@ impl StorageManager {
|
||||
// Send single value change out to the network
|
||||
#[instrument(level = "trace", target = "stor", skip(self), err)]
|
||||
async fn send_value_change(&self, vc: ValueChangedInfo) -> VeilidAPIResult<()> {
|
||||
let rpc_processor = {
|
||||
let inner = self.inner.lock().await;
|
||||
if let Some(rpc_processor) = Self::get_ready_rpc_processor(&inner) {
|
||||
rpc_processor.clone()
|
||||
} else {
|
||||
apibail_try_again!("network is not available");
|
||||
}
|
||||
if !self.dht_is_online() {
|
||||
apibail_try_again!("network is not available");
|
||||
};
|
||||
|
||||
let rpc_processor = self.rpc_processor();
|
||||
|
||||
let dest = rpc_processor
|
||||
.resolve_target_to_destination(
|
||||
vc.target,
|
||||
@ -1153,7 +1131,7 @@ impl StorageManager {
|
||||
}
|
||||
|
||||
// Send a value change up through the callback
|
||||
#[instrument(level = "trace", target = "stor", skip(self, value), err)]
|
||||
#[instrument(level = "trace", target = "stor", skip(self, value))]
|
||||
fn update_callback_value_change(
|
||||
&self,
|
||||
key: TypedKey,
|
||||
@ -1266,7 +1244,7 @@ impl StorageManager {
|
||||
let signed_value_descriptor = Arc::new(SignedValueDescriptor::make_signature(
|
||||
owner.key,
|
||||
schema_data,
|
||||
vcrypto.clone(),
|
||||
&vcrypto,
|
||||
owner.secret,
|
||||
)?);
|
||||
|
||||
@ -1369,9 +1347,9 @@ impl StorageManager {
|
||||
None => {
|
||||
// If we don't have a local record yet, check to see if we have a remote record
|
||||
// if so, migrate it to a local record
|
||||
let Some(v) = inner
|
||||
.move_remote_record_to_local_inner(key, safety_selection)
|
||||
.await?
|
||||
let Some(v) =
|
||||
Self::move_remote_record_to_local_inner(&mut *inner, key, safety_selection)
|
||||
.await?
|
||||
else {
|
||||
// No remote record either
|
||||
return Ok(None);
|
||||
@ -1475,19 +1453,15 @@ impl StorageManager {
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", target = "stor", skip_all, err)]
|
||||
pub fn get_value_nodes_inner(
|
||||
inner: &mut StorageManagerInner,
|
||||
key: TypedKey,
|
||||
) -> VeilidAPIResult<Option<Vec<NodeRef>>> {
|
||||
pub async fn get_value_nodes(&self, key: TypedKey) -> VeilidAPIResult<Option<Vec<NodeRef>>> {
|
||||
let inner = self.inner.lock().await;
|
||||
// Get local record store
|
||||
let Some(local_record_store) = inner.local_record_store.as_ref() else {
|
||||
apibail_not_initialized!();
|
||||
};
|
||||
|
||||
// Get routing table to see if we still know about these nodes
|
||||
let Some(routing_table) = self.opt_rpc_processor.as_ref().map(|r| r.routing_table()) else {
|
||||
apibail_try_again!("offline, try again later");
|
||||
};
|
||||
let routing_table = self.routing_table();
|
||||
|
||||
let opt_value_nodes = local_record_store.peek_record(key, |r| {
|
||||
let d = r.detail();
|
||||
@ -1755,13 +1729,12 @@ impl StorageManager {
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", target = "stor", skip_all)]
|
||||
pub(super) fn process_deferred_results_inner<T: Send + 'static>(
|
||||
inner: &mut StorageManagerInner,
|
||||
pub(super) fn process_deferred_results<T: Send + 'static>(
|
||||
&self,
|
||||
receiver: flume::Receiver<T>,
|
||||
handler: impl FnMut(T) -> SendPinBoxFuture<bool> + Send + 'static,
|
||||
) -> bool {
|
||||
inner
|
||||
.deferred_result_processor
|
||||
self.deferred_result_processor
|
||||
.add(receiver.into_stream(), handler)
|
||||
}
|
||||
}
|
||||
|
@ -34,9 +34,6 @@ impl StorageManager {
|
||||
value: Arc<SignedValueData>,
|
||||
descriptor: Arc<SignedValueDescriptor>,
|
||||
) -> VeilidAPIResult<flume::Receiver<VeilidAPIResult<OutboundSetValueResult>>> {
|
||||
|
||||
xxx switch this to registry mechanism
|
||||
|
||||
let routing_domain = RoutingDomain::PublicInternet;
|
||||
|
||||
// Get the DHT parameters for 'SetValue'
|
||||
@ -53,8 +50,8 @@ impl StorageManager {
|
||||
|
||||
// Get the nodes we know are caching this value to seed the fanout
|
||||
let init_fanout_queue = {
|
||||
let inner = self.inner.lock().await;
|
||||
Self::get_value_nodes_inner(&mut *inner, key)?
|
||||
self.get_value_nodes(key)
|
||||
.await?
|
||||
.unwrap_or_default()
|
||||
.into_iter()
|
||||
.filter(|x| {
|
||||
@ -81,13 +78,15 @@ impl StorageManager {
|
||||
// Routine to call to generate fanout
|
||||
let call_routine = {
|
||||
let context = context.clone();
|
||||
let rpc_processor = rpc_processor.clone();
|
||||
let registry = self.registry();
|
||||
|
||||
move |next_node: NodeRef| {
|
||||
let rpc_processor = rpc_processor.clone();
|
||||
let registry = registry.clone();
|
||||
let context = context.clone();
|
||||
let descriptor = descriptor.clone();
|
||||
async move {
|
||||
let rpc_processor = registry.rpc_processor();
|
||||
|
||||
let send_descriptor = true; // xxx check if next_node needs the descriptor or not, see issue #203
|
||||
|
||||
// get most recent value to send
|
||||
@ -99,7 +98,6 @@ impl StorageManager {
|
||||
// send across the wire
|
||||
let sva = network_result_try!(
|
||||
rpc_processor
|
||||
.clone()
|
||||
.rpc_call_set_value(
|
||||
Destination::direct(next_node.routing_domain_filtered(routing_domain))
|
||||
.with_safety(safety_selection),
|
||||
@ -236,12 +234,14 @@ impl StorageManager {
|
||||
};
|
||||
|
||||
// Call the fanout in a spawned task
|
||||
let registry = self.registry();
|
||||
spawn(
|
||||
"outbound_set_value fanout",
|
||||
Box::pin(
|
||||
async move {
|
||||
let routing_table = registry.routing_table();
|
||||
let fanout_call = FanoutCall::new(
|
||||
routing_table.clone(),
|
||||
&routing_table,
|
||||
key,
|
||||
key_count,
|
||||
fanout,
|
||||
@ -292,9 +292,8 @@ impl StorageManager {
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", target = "dht", skip_all)]
|
||||
pub(super) fn process_deferred_outbound_set_value_result_inner(
|
||||
pub(super) fn process_deferred_outbound_set_value_result(
|
||||
&self,
|
||||
inner: &mut StorageManagerInner,
|
||||
res_rx: flume::Receiver<Result<set_value::OutboundSetValueResult, VeilidAPIError>>,
|
||||
key: TypedKey,
|
||||
subkey: ValueSubkey,
|
||||
@ -303,7 +302,7 @@ impl StorageManager {
|
||||
) {
|
||||
let registry = self.registry();
|
||||
let last_value_data = Arc::new(Mutex::new(last_value_data));
|
||||
Self::process_deferred_results_inner(inner,
|
||||
self.process_deferred_results(
|
||||
res_rx,
|
||||
Box::new(
|
||||
move |result: VeilidAPIResult<set_value::OutboundSetValueResult>| -> SendPinBoxFuture<bool> {
|
||||
@ -348,9 +347,7 @@ impl StorageManager {
|
||||
}
|
||||
};
|
||||
if changed {
|
||||
if let Err(e) = this.update_callback_value_change(key,ValueSubkeyRangeSet::single(subkey), u32::MAX, Some(value_data)).await {
|
||||
log_rtab!(debug "Failed sending deferred fanout value change: {}", e);
|
||||
}
|
||||
this.update_callback_value_change(key,ValueSubkeyRangeSet::single(subkey), u32::MAX, Some(value_data));
|
||||
}
|
||||
|
||||
// Return done
|
||||
@ -371,29 +368,37 @@ impl StorageManager {
|
||||
result: set_value::OutboundSetValueResult,
|
||||
) -> Result<Option<ValueData>, VeilidAPIError> {
|
||||
// Regain the lock after network access
|
||||
let mut inner = self.lock().await?;
|
||||
let mut inner = self.inner.lock().await;
|
||||
|
||||
// Report on fanout result offline
|
||||
let was_offline = self.check_fanout_set_offline(key, subkey, &result.fanout_result);
|
||||
if was_offline {
|
||||
// Failed to write, try again later
|
||||
inner.add_offline_subkey_write(key, subkey, safety_selection);
|
||||
Self::add_offline_subkey_write_inner(&mut *inner, key, subkey, safety_selection);
|
||||
}
|
||||
|
||||
// Keep the list of nodes that returned a value for later reference
|
||||
inner.process_fanout_results(key, core::iter::once((subkey, &result.fanout_result)), true);
|
||||
Self::process_fanout_results_inner(
|
||||
&mut *inner,
|
||||
key,
|
||||
core::iter::once((subkey, &result.fanout_result)),
|
||||
true,
|
||||
self.config()
|
||||
.with(|c| c.network.dht.set_value_count as usize),
|
||||
);
|
||||
|
||||
// Return the new value if it differs from what was asked to set
|
||||
if result.signed_value_data.value_data() != &last_value_data {
|
||||
// Record the newer value and send and update since it is different than what we just set
|
||||
inner
|
||||
.handle_set_local_value(
|
||||
key,
|
||||
subkey,
|
||||
result.signed_value_data.clone(),
|
||||
WatchUpdateMode::UpdateAll,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Self::handle_set_local_value_inner(
|
||||
&mut *inner,
|
||||
key,
|
||||
subkey,
|
||||
result.signed_value_data.clone(),
|
||||
WatchUpdateMode::UpdateAll,
|
||||
)
|
||||
.await?;
|
||||
|
||||
return Ok(Some(result.signed_value_data.value_data().clone()));
|
||||
}
|
||||
@ -414,18 +419,20 @@ impl StorageManager {
|
||||
descriptor: Option<Arc<SignedValueDescriptor>>,
|
||||
target: Target,
|
||||
) -> VeilidAPIResult<NetworkResult<Option<Arc<SignedValueData>>>> {
|
||||
let mut inner = self.lock().await?;
|
||||
let mut inner = self.inner.lock().await;
|
||||
|
||||
// See if this is a remote or local value
|
||||
let (is_local, last_get_result) = {
|
||||
// See if the subkey we are modifying has a last known local value
|
||||
let last_get_result = inner.handle_get_local_value(key, subkey, true).await?;
|
||||
let last_get_result =
|
||||
Self::handle_get_local_value_inner(&mut *inner, key, subkey, true).await?;
|
||||
// If this is local, it must have a descriptor already
|
||||
if last_get_result.opt_descriptor.is_some() {
|
||||
(true, last_get_result)
|
||||
} else {
|
||||
// See if the subkey we are modifying has a last known remote value
|
||||
let last_get_result = inner.handle_get_remote_value(key, subkey, true).await?;
|
||||
let last_get_result =
|
||||
Self::handle_get_remote_value_inner(&mut *inner, key, subkey, true).await?;
|
||||
(false, last_get_result)
|
||||
}
|
||||
};
|
||||
@ -485,19 +492,24 @@ impl StorageManager {
|
||||
|
||||
// Do the set and return no new value
|
||||
let res = if is_local {
|
||||
inner
|
||||
.handle_set_local_value(key, subkey, value, WatchUpdateMode::ExcludeTarget(target))
|
||||
.await
|
||||
Self::handle_set_local_value_inner(
|
||||
&mut *inner,
|
||||
key,
|
||||
subkey,
|
||||
value,
|
||||
WatchUpdateMode::ExcludeTarget(target),
|
||||
)
|
||||
.await
|
||||
} else {
|
||||
inner
|
||||
.handle_set_remote_value(
|
||||
key,
|
||||
subkey,
|
||||
value,
|
||||
actual_descriptor,
|
||||
WatchUpdateMode::ExcludeTarget(target),
|
||||
)
|
||||
.await
|
||||
Self::handle_set_remote_value_inner(
|
||||
&mut *inner,
|
||||
key,
|
||||
subkey,
|
||||
value,
|
||||
actual_descriptor,
|
||||
WatchUpdateMode::ExcludeTarget(target),
|
||||
)
|
||||
.await
|
||||
};
|
||||
match res {
|
||||
Ok(()) => {}
|
||||
|
@ -13,10 +13,6 @@ impl StorageManager {
|
||||
let mut inner = self.inner.lock().await;
|
||||
|
||||
let routing_table = self.routing_table();
|
||||
let Some(rss) = routing_table.route_spec_store() else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
let update_callback = self.update_callback();
|
||||
|
||||
let cur_ts = Timestamp::now();
|
||||
@ -36,7 +32,11 @@ impl StorageManager {
|
||||
// See if the private route we're using is dead
|
||||
if !is_dead {
|
||||
if let Some(value_changed_route) = active_watch.opt_value_changed_route {
|
||||
if rss.get_route_id_for_key(&value_changed_route).is_none() {
|
||||
if routing_table
|
||||
.route_spec_store()
|
||||
.get_route_id_for_key(&value_changed_route)
|
||||
.is_none()
|
||||
{
|
||||
// Route we would receive value changes on is dead
|
||||
is_dead = true;
|
||||
}
|
||||
|
@ -66,7 +66,7 @@ impl StorageManager {
|
||||
self.check_watched_records_task.tick().await?;
|
||||
|
||||
// Run online-only tasks
|
||||
if self.online_writes_ready() {
|
||||
if self.dht_is_online() {
|
||||
// Run offline subkey writes task if there's work to be done
|
||||
if self.has_offline_subkey_writes().await {
|
||||
self.offline_subkey_writes_task.tick().await?;
|
||||
|
@ -41,7 +41,7 @@ impl StorageManager {
|
||||
subkey: ValueSubkey,
|
||||
safety_selection: SafetySelection,
|
||||
) -> EyreResult<OfflineSubkeyWriteResult> {
|
||||
let Some(rpc_processor) = self.get_ready_rpc_processor() else {
|
||||
if !self.dht_is_online() {
|
||||
// Cancel this operation because we're offline
|
||||
return Ok(OfflineSubkeyWriteResult::Cancelled);
|
||||
};
|
||||
@ -65,14 +65,7 @@ impl StorageManager {
|
||||
};
|
||||
log_stor!(debug "Offline subkey write: {}:{} len={}", key, subkey, value.value_data().data().len());
|
||||
let osvres = self
|
||||
.outbound_set_value(
|
||||
&rpc_processor,
|
||||
key,
|
||||
subkey,
|
||||
safety_selection,
|
||||
value.clone(),
|
||||
descriptor,
|
||||
)
|
||||
.outbound_set_value(key, subkey, safety_selection, value.clone(), descriptor)
|
||||
.await;
|
||||
match osvres {
|
||||
Ok(res_rx) => {
|
||||
|
@ -25,7 +25,6 @@ impl StorageManager {
|
||||
#[instrument(level = "trace", target = "dht", skip_all, err)]
|
||||
pub(super) async fn outbound_watch_value_cancel(
|
||||
&self,
|
||||
rpc_processor: RPCProcessor,
|
||||
key: TypedKey,
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
safety_selection: SafetySelection,
|
||||
@ -37,17 +36,11 @@ impl StorageManager {
|
||||
|
||||
// Get the appropriate watcher key, if anonymous use a static anonymous watch key
|
||||
// which lives for the duration of the app's runtime
|
||||
let watcher = opt_watcher.unwrap_or_else(|| {
|
||||
self.unlocked_inner
|
||||
.anonymous_watch_keys
|
||||
.get(key.kind)
|
||||
.unwrap()
|
||||
.value
|
||||
});
|
||||
let watcher =
|
||||
opt_watcher.unwrap_or_else(|| self.anonymous_watch_keys.get(key.kind).unwrap().value);
|
||||
|
||||
let wva = VeilidAPIError::from_network_result(
|
||||
rpc_processor
|
||||
.clone()
|
||||
self.rpc_processor()
|
||||
.rpc_call_watch_value(
|
||||
Destination::direct(watch_node.routing_domain_filtered(routing_domain))
|
||||
.with_safety(safety_selection),
|
||||
@ -80,7 +73,6 @@ impl StorageManager {
|
||||
#[instrument(target = "dht", level = "debug", skip_all, err)]
|
||||
pub(super) async fn outbound_watch_value_change(
|
||||
&self,
|
||||
rpc_processor: RPCProcessor,
|
||||
key: TypedKey,
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
expiration: Timestamp,
|
||||
@ -101,17 +93,11 @@ impl StorageManager {
|
||||
|
||||
// Get the appropriate watcher key, if anonymous use a static anonymous watch key
|
||||
// which lives for the duration of the app's runtime
|
||||
let watcher = opt_watcher.unwrap_or_else(|| {
|
||||
self.unlocked_inner
|
||||
.anonymous_watch_keys
|
||||
.get(key.kind)
|
||||
.unwrap()
|
||||
.value
|
||||
});
|
||||
let watcher =
|
||||
opt_watcher.unwrap_or_else(|| self.anonymous_watch_keys.get(key.kind).unwrap().value);
|
||||
|
||||
let wva = VeilidAPIError::from_network_result(
|
||||
rpc_processor
|
||||
.clone()
|
||||
self.rpc_processor()
|
||||
.rpc_call_watch_value(
|
||||
Destination::direct(watch_node.routing_domain_filtered(routing_domain))
|
||||
.with_safety(safety_selection),
|
||||
@ -149,7 +135,6 @@ impl StorageManager {
|
||||
#[instrument(level = "trace", target = "dht", skip_all, err)]
|
||||
pub(super) async fn outbound_watch_value(
|
||||
&self,
|
||||
rpc_processor: RPCProcessor,
|
||||
key: TypedKey,
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
expiration: Timestamp,
|
||||
@ -171,7 +156,6 @@ impl StorageManager {
|
||||
};
|
||||
return self
|
||||
.outbound_watch_value_cancel(
|
||||
rpc_processor,
|
||||
key,
|
||||
subkeys,
|
||||
safety_selection,
|
||||
@ -190,7 +174,6 @@ impl StorageManager {
|
||||
};
|
||||
if let Some(res) = self
|
||||
.outbound_watch_value_change(
|
||||
rpc_processor.clone(),
|
||||
key,
|
||||
subkeys.clone(),
|
||||
expiration,
|
||||
@ -209,7 +192,6 @@ impl StorageManager {
|
||||
// Otherwise, treat this like a new watch
|
||||
}
|
||||
|
||||
let routing_table = rpc_processor.routing_table();
|
||||
let routing_domain = RoutingDomain::PublicInternet;
|
||||
|
||||
// Get the DHT parameters for 'WatchValue', some of which are the same for 'SetValue' operations
|
||||
@ -223,19 +205,13 @@ impl StorageManager {
|
||||
|
||||
// Get the appropriate watcher key, if anonymous use a static anonymous watch key
|
||||
// which lives for the duration of the app's runtime
|
||||
let watcher = opt_watcher.unwrap_or_else(|| {
|
||||
self.unlocked_inner
|
||||
.anonymous_watch_keys
|
||||
.get(key.kind)
|
||||
.unwrap()
|
||||
.value
|
||||
});
|
||||
let watcher =
|
||||
opt_watcher.unwrap_or_else(|| self.anonymous_watch_keys.get(key.kind).unwrap().value);
|
||||
|
||||
// Get the nodes we know are caching this value to seed the fanout
|
||||
let init_fanout_queue = {
|
||||
let inner = self.inner.lock().await;
|
||||
inner
|
||||
.get_value_nodes(key)?
|
||||
self.get_value_nodes(key)
|
||||
.await?
|
||||
.unwrap_or_default()
|
||||
.into_iter()
|
||||
.filter(|x| {
|
||||
@ -252,55 +228,60 @@ impl StorageManager {
|
||||
}));
|
||||
|
||||
// Routine to call to generate fanout
|
||||
let call_routine = |next_node: NodeRef| {
|
||||
let rpc_processor = rpc_processor.clone();
|
||||
let call_routine = {
|
||||
let context = context.clone();
|
||||
let subkeys = subkeys.clone();
|
||||
let registry = self.registry();
|
||||
move |next_node: NodeRef| {
|
||||
let context = context.clone();
|
||||
let registry = registry.clone();
|
||||
|
||||
async move {
|
||||
let wva = network_result_try!(
|
||||
rpc_processor
|
||||
.clone()
|
||||
.rpc_call_watch_value(
|
||||
Destination::direct(next_node.routing_domain_filtered(routing_domain)).with_safety(safety_selection),
|
||||
key,
|
||||
subkeys,
|
||||
expiration,
|
||||
count,
|
||||
watcher,
|
||||
None
|
||||
)
|
||||
.await?
|
||||
);
|
||||
let subkeys = subkeys.clone();
|
||||
|
||||
// Keep answer if we got one
|
||||
// (accepted means the node could provide an answer, not that the watch is active)
|
||||
if wva.answer.accepted {
|
||||
let mut done = false;
|
||||
if wva.answer.expiration_ts.as_u64() > 0 {
|
||||
// If the expiration time is greater than zero this watch is active
|
||||
log_dht!(debug "Watch created: id={} expiration_ts={} ({})", wva.answer.watch_id, display_ts(wva.answer.expiration_ts.as_u64()), next_node);
|
||||
done = true;
|
||||
} else {
|
||||
// If the returned expiration time is zero, this watch was cancelled or rejected
|
||||
// If we are asking to cancel then check_done will stop after the first node
|
||||
async move {
|
||||
let rpc_processor = registry.rpc_processor();
|
||||
let wva = network_result_try!(
|
||||
rpc_processor
|
||||
.rpc_call_watch_value(
|
||||
Destination::direct(next_node.routing_domain_filtered(routing_domain)).with_safety(safety_selection),
|
||||
key,
|
||||
subkeys,
|
||||
expiration,
|
||||
count,
|
||||
watcher,
|
||||
None
|
||||
)
|
||||
.await?
|
||||
);
|
||||
|
||||
// Keep answer if we got one
|
||||
// (accepted means the node could provide an answer, not that the watch is active)
|
||||
if wva.answer.accepted {
|
||||
let mut done = false;
|
||||
if wva.answer.expiration_ts.as_u64() > 0 {
|
||||
// If the expiration time is greater than zero this watch is active
|
||||
log_dht!(debug "Watch created: id={} expiration_ts={} ({})", wva.answer.watch_id, display_ts(wva.answer.expiration_ts.as_u64()), next_node);
|
||||
done = true;
|
||||
} else {
|
||||
// If the returned expiration time is zero, this watch was cancelled or rejected
|
||||
// If we are asking to cancel then check_done will stop after the first node
|
||||
}
|
||||
if done {
|
||||
let mut ctx = context.lock();
|
||||
ctx.opt_watch_value_result = Some(OutboundWatchValueResult {
|
||||
expiration_ts: wva.answer.expiration_ts,
|
||||
watch_id: wva.answer.watch_id,
|
||||
watch_node: next_node.clone(),
|
||||
opt_value_changed_route: wva.reply_private_route,
|
||||
});
|
||||
}
|
||||
}
|
||||
if done {
|
||||
let mut ctx = context.lock();
|
||||
ctx.opt_watch_value_result = Some(OutboundWatchValueResult {
|
||||
expiration_ts: wva.answer.expiration_ts,
|
||||
watch_id: wva.answer.watch_id,
|
||||
watch_node: next_node.clone(),
|
||||
opt_value_changed_route: wva.reply_private_route,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Return peers if we have some
|
||||
log_network_result!(debug "WatchValue fanout call returned peers {} ({})", wva.answer.peers.len(), next_node);
|
||||
// Return peers if we have some
|
||||
log_network_result!(debug "WatchValue fanout call returned peers {} ({})", wva.answer.peers.len(), next_node);
|
||||
|
||||
Ok(NetworkResult::value(FanoutCallOutput{peer_info_list: wva.answer.peers}))
|
||||
}.instrument(tracing::trace_span!("outbound_watch_value call routine"))
|
||||
Ok(NetworkResult::value(FanoutCallOutput{peer_info_list: wva.answer.peers}))
|
||||
}.instrument(tracing::trace_span!("outbound_watch_value call routine"))
|
||||
}
|
||||
};
|
||||
|
||||
// Routine to call to check if we're done at each step
|
||||
@ -317,8 +298,9 @@ impl StorageManager {
|
||||
// Use a fixed fanout concurrency of 1 because we only want one watch
|
||||
// Use a longer timeout (timeout_us * set_value_count) because we may need to try multiple nodes
|
||||
// and each one might take timeout_us time.
|
||||
let routing_table = self.routing_table();
|
||||
let fanout_call = FanoutCall::new(
|
||||
routing_table.clone(),
|
||||
&routing_table,
|
||||
key,
|
||||
key_count,
|
||||
1,
|
||||
@ -380,7 +362,7 @@ impl StorageManager {
|
||||
params: WatchParameters,
|
||||
watch_id: Option<u64>,
|
||||
) -> VeilidAPIResult<NetworkResult<WatchResult>> {
|
||||
let mut inner = self.lock().await?;
|
||||
let mut inner = self.inner.lock().await;
|
||||
|
||||
// Validate input
|
||||
if params.count == 0 && (watch_id.unwrap_or_default() == 0) {
|
||||
@ -426,7 +408,7 @@ impl StorageManager {
|
||||
) -> VeilidAPIResult<NetworkResult<()>> {
|
||||
// Update local record store with new value
|
||||
let (is_value_seq_newer, value) = {
|
||||
let mut inner = self.lock().await?;
|
||||
let mut inner = self.inner.lock().await;
|
||||
|
||||
// Don't process update if the record is closed
|
||||
let Some(opened_record) = inner.opened_records.get_mut(&key) else {
|
||||
@ -483,9 +465,9 @@ impl StorageManager {
|
||||
apibail_internal!("should not have value without first subkey");
|
||||
};
|
||||
|
||||
let last_get_result = inner
|
||||
.handle_get_local_value(key, first_subkey, true)
|
||||
.await?;
|
||||
let last_get_result =
|
||||
Self::handle_get_local_value_inner(&mut *inner, key, first_subkey, true)
|
||||
.await?;
|
||||
|
||||
let descriptor = last_get_result.opt_descriptor.unwrap();
|
||||
let schema = descriptor.schema()?;
|
||||
@ -513,14 +495,14 @@ impl StorageManager {
|
||||
}
|
||||
}
|
||||
if is_value_seq_newer {
|
||||
inner
|
||||
.handle_set_local_value(
|
||||
key,
|
||||
first_subkey,
|
||||
value.clone(),
|
||||
WatchUpdateMode::NoUpdate,
|
||||
)
|
||||
.await?;
|
||||
Self::handle_set_local_value_inner(
|
||||
&mut *inner,
|
||||
key,
|
||||
first_subkey,
|
||||
value.clone(),
|
||||
WatchUpdateMode::NoUpdate,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
||||
@ -539,8 +521,7 @@ impl StorageManager {
|
||||
} else {
|
||||
None
|
||||
};
|
||||
self.update_callback_value_change(key, subkeys, count, value)
|
||||
.await?;
|
||||
self.update_callback_value_change(key, subkeys, count, value);
|
||||
}
|
||||
|
||||
Ok(NetworkResult::value(()))
|
||||
|
@ -20,23 +20,24 @@ impl CryptInfo {
|
||||
}
|
||||
|
||||
pub struct TableDBUnlockedInner {
|
||||
table: String,
|
||||
registry: VeilidComponentRegistry,
|
||||
table: String,
|
||||
database: Database,
|
||||
// Encryption and decryption key will be the same unless configured for an in-place migration
|
||||
encrypt_info: Option<CryptInfo>,
|
||||
decrypt_info: Option<CryptInfo>,
|
||||
}
|
||||
impl_veilid_component_registry_accessor!(TableDBUnlockedInner);
|
||||
|
||||
impl fmt::Debug for TableDBUnlockedInner {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "TableDBInner(table={})", self.table)
|
||||
write!(f, "TableDBUnlockedInner(table={})", self.table)
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for TableDBUnlockedInner {
|
||||
fn drop(&mut self) {
|
||||
let table_store = self.registry.lookup::<TableStore>().unwrap();
|
||||
let table_store = self.table_store();
|
||||
table_store.on_table_db_drop(self.table.clone());
|
||||
}
|
||||
}
|
||||
@ -100,8 +101,9 @@ impl TableDB {
|
||||
}
|
||||
|
||||
pub(super) fn crypto(&self) -> VeilidComponentGuard<'_, Crypto> {
|
||||
self.unlocked_inner.registry.lookup::<Crypto>().unwrap()
|
||||
self.unlocked_inner.crypto()
|
||||
}
|
||||
|
||||
/// Get the internal name of the table
|
||||
pub fn table_name(&self) -> String {
|
||||
self.unlocked_inner.table.clone()
|
||||
|
@ -78,7 +78,8 @@ pub async fn test_create_dht_record_with_owner(api: VeilidAPI) {
|
||||
.with_safety(SafetySelection::Unsafe(Sequencing::EnsureOrdered))
|
||||
.unwrap();
|
||||
|
||||
let cs = api.crypto().unwrap().get(CRYPTO_KIND_VLD0).unwrap();
|
||||
let crypto = api.crypto().unwrap();
|
||||
let cs = crypto.get(CRYPTO_KIND_VLD0).unwrap();
|
||||
let owner_keypair = cs.generate_keypair();
|
||||
|
||||
let rec = rc
|
||||
@ -104,7 +105,7 @@ pub async fn test_get_dht_record_key(api: VeilidAPI) {
|
||||
.with_safety(SafetySelection::Unsafe(Sequencing::EnsureOrdered))
|
||||
.unwrap();
|
||||
|
||||
let crypto = api.crypto();
|
||||
let crypto = api.crypto().unwrap();
|
||||
let cs = crypto.get(CRYPTO_KIND_VLD0).unwrap();
|
||||
let owner_keypair = cs.generate_keypair();
|
||||
let schema = DHTSchema::dflt(1).unwrap();
|
||||
@ -332,7 +333,8 @@ pub async fn test_open_writer_dht_value(api: VeilidAPI) {
|
||||
// 3. Try writing to subkey 1, expect error
|
||||
// 4. Try writing to subkey 0, expect error
|
||||
|
||||
let cs = api.crypto().unwrap().get(key.kind).unwrap();
|
||||
let crypto = api.crypto().unwrap();
|
||||
let cs = crypto.get(key.kind).unwrap();
|
||||
assert!(cs.validate_keypair(owner, secret));
|
||||
let other_keypair = cs.generate_keypair();
|
||||
|
||||
|
@ -15,7 +15,7 @@ async fn shutdown(api: VeilidAPI) {
|
||||
trace!("test_table_store: finished");
|
||||
}
|
||||
|
||||
pub async fn test_protected_store(ps: ProtectedStore) {
|
||||
pub async fn test_protected_store(ps: &ProtectedStore) {
|
||||
info!("testing protected store");
|
||||
|
||||
let _ = ps.remove_user_secret("_test_key").await;
|
||||
@ -81,7 +81,7 @@ pub async fn test_protected_store(ps: ProtectedStore) {
|
||||
pub async fn test_all() {
|
||||
let api = startup().await;
|
||||
let ps = api.protected_store().unwrap();
|
||||
test_protected_store(ps.clone()).await;
|
||||
test_protected_store(&ps).await;
|
||||
|
||||
shutdown(api).await;
|
||||
}
|
||||
|
@ -136,33 +136,13 @@ impl VeilidAPI {
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
// Internal Accessors
|
||||
pub(crate) fn attachment_manager(
|
||||
&self,
|
||||
) -> VeilidAPIResult<VeilidComponentGuard<'_, AttachmentManager>> {
|
||||
|
||||
pub(crate) fn core_context(&self) -> VeilidAPIResult<VeilidCoreContext> {
|
||||
let inner = self.inner.lock();
|
||||
let Some(context) = &inner.context else {
|
||||
return Err(VeilidAPIError::NotInitialized);
|
||||
};
|
||||
context
|
||||
.registry()
|
||||
.lookup::<AttachmentManager>()
|
||||
.ok_or(VeilidAPIError::NotInitialized)
|
||||
}
|
||||
pub(crate) fn network_manager(&self) -> VeilidAPIResult<NetworkManager> {
|
||||
self.attachment_manager().map(|a| a.network_manager())
|
||||
}
|
||||
pub(crate) fn rpc_processor(&self) -> VeilidAPIResult<RPCProcessor> {
|
||||
self.network_manager()
|
||||
.map(|a| a.opt_rpc_processor())?
|
||||
.ok_or(VeilidAPIError::NotInitialized)
|
||||
}
|
||||
pub(crate) fn routing_table(&self) -> VeilidAPIResult<RoutingTable> {
|
||||
self.attachment_manager()
|
||||
.map(|a| a.network_manager().routing_table())
|
||||
}
|
||||
pub(crate) fn storage_manager(&self) -> VeilidAPIResult<StorageManager> {
|
||||
self.attachment_manager()
|
||||
.map(|a| a.network_manager().storage_manager())
|
||||
Ok(context.clone())
|
||||
}
|
||||
|
||||
pub(crate) fn with_debug_cache<R, F: FnOnce(&mut DebugCache) -> R>(&self, callback: F) -> R {
|
||||
@ -175,7 +155,7 @@ impl VeilidAPI {
|
||||
|
||||
/// Get a full copy of the current state of Veilid.
|
||||
pub async fn get_state(&self) -> VeilidAPIResult<VeilidState> {
|
||||
let attachment_manager = self.attachment_manager()?;
|
||||
let attachment_manager = self.core_context()?.attachment_manager();
|
||||
let network_manager = attachment_manager.network_manager();
|
||||
let config = self.config()?;
|
||||
|
||||
@ -196,7 +176,7 @@ impl VeilidAPI {
|
||||
event!(target: "veilid_api", Level::DEBUG,
|
||||
"VeilidAPI::attach()");
|
||||
|
||||
let attachment_manager = self.attachment_manager()?;
|
||||
let attachment_manager = self.core_context()?.attachment_manager();
|
||||
if !attachment_manager.attach().await {
|
||||
apibail_generic!("Already attached");
|
||||
}
|
||||
@ -209,7 +189,7 @@ impl VeilidAPI {
|
||||
event!(target: "veilid_api", Level::DEBUG,
|
||||
"VeilidAPI::detach()");
|
||||
|
||||
let attachment_manager = self.attachment_manager()?;
|
||||
let attachment_manager = self.core_context()?.attachment_manager();
|
||||
if !attachment_manager.detach().await {
|
||||
apibail_generic!("Already detached");
|
||||
}
|
||||
|
@ -95,7 +95,7 @@ fn get_dht_report_scope(text: &str) -> Option<DHTReportScope> {
|
||||
}
|
||||
|
||||
fn get_route_id(
|
||||
rss: RouteSpecStore,
|
||||
registry: VeilidComponentRegistry,
|
||||
allow_allocated: bool,
|
||||
allow_remote: bool,
|
||||
) -> impl Fn(&str) -> Option<RouteId> {
|
||||
@ -103,6 +103,9 @@ fn get_route_id(
|
||||
if text.is_empty() {
|
||||
return None;
|
||||
}
|
||||
let routing_table = registry.routing_table();
|
||||
let rss = routing_table.route_spec_store();
|
||||
|
||||
match RouteId::from_str(text).ok() {
|
||||
Some(key) => {
|
||||
if allow_allocated {
|
||||
@ -153,11 +156,13 @@ fn get_dht_schema(text: &str) -> Option<VeilidAPIResult<DHTSchema>> {
|
||||
Some(deserialize_json::<DHTSchema>(text))
|
||||
}
|
||||
|
||||
fn get_safety_selection(routing_table: RoutingTable) -> impl Fn(&str) -> Option<SafetySelection> {
|
||||
fn get_safety_selection(
|
||||
registry: VeilidComponentRegistry,
|
||||
) -> impl Fn(&str) -> Option<SafetySelection> {
|
||||
move |text| {
|
||||
let rss = routing_table.route_spec_store();
|
||||
let default_route_hop_count =
|
||||
routing_table.with_config(|c| c.network.rpc.default_route_hop_count as usize);
|
||||
let default_route_hop_count = registry
|
||||
.config()
|
||||
.with(|c| c.network.rpc.default_route_hop_count as usize);
|
||||
|
||||
if !text.is_empty() && &text[0..1] == "-" {
|
||||
// Unsafe
|
||||
@ -172,7 +177,7 @@ fn get_safety_selection(routing_table: RoutingTable) -> impl Fn(&str) -> Option<
|
||||
let mut sequencing = Sequencing::default();
|
||||
for x in text.split(',') {
|
||||
let x = x.trim();
|
||||
if let Some(pr) = get_route_id(rss.clone(), true, false)(x) {
|
||||
if let Some(pr) = get_route_id(registry.clone(), true, false)(x) {
|
||||
preferred_route = Some(pr)
|
||||
}
|
||||
if let Some(n) = get_number(x) {
|
||||
@ -229,9 +234,9 @@ fn get_keypair(text: &str) -> Option<KeyPair> {
|
||||
KeyPair::from_str(text).ok()
|
||||
}
|
||||
|
||||
fn get_crypto_system_version(
|
||||
crypto: &Crypto,
|
||||
) -> impl FnOnce(&str) -> Option<CryptoSystemGuard<'_>> {
|
||||
fn get_crypto_system_version<'a>(
|
||||
crypto: &'a Crypto,
|
||||
) -> impl FnOnce(&str) -> Option<CryptoSystemGuard<'a>> {
|
||||
move |text| {
|
||||
let kindstr = get_string(text)?;
|
||||
let kind = CryptoKind::from_str(&kindstr).ok()?;
|
||||
@ -252,12 +257,12 @@ fn get_dht_key_no_safety(text: &str) -> Option<TypedKey> {
|
||||
}
|
||||
|
||||
fn get_dht_key(
|
||||
routing_table: RoutingTable,
|
||||
registry: VeilidComponentRegistry,
|
||||
) -> impl FnOnce(&str) -> Option<(TypedKey, Option<SafetySelection>)> {
|
||||
move |text| {
|
||||
// Safety selection
|
||||
let (text, ss) = if let Some((first, second)) = text.split_once('+') {
|
||||
let ss = get_safety_selection(routing_table.clone())(second)?;
|
||||
let ss = get_safety_selection(registry)(second)?;
|
||||
(first, Some(ss))
|
||||
} else {
|
||||
(text, None)
|
||||
@ -279,7 +284,7 @@ fn get_dht_key(
|
||||
}
|
||||
|
||||
fn resolve_filtered_node_ref(
|
||||
routing_table: RoutingTable,
|
||||
registry: VeilidComponentRegistry,
|
||||
safety_selection: SafetySelection,
|
||||
) -> impl FnOnce(&str) -> SendPinBoxFuture<Option<FilteredNodeRef>> {
|
||||
move |text| {
|
||||
@ -292,14 +297,14 @@ fn resolve_filtered_node_ref(
|
||||
|
||||
let nr = if let Some(key) = get_public_key(text) {
|
||||
let node_id = TypedKey::new(best_crypto_kind(), key);
|
||||
routing_table
|
||||
registry
|
||||
.rpc_processor()
|
||||
.resolve_node(node_id, safety_selection)
|
||||
.await
|
||||
.ok()
|
||||
.flatten()?
|
||||
} else if let Some(node_id) = get_typed_key(text) {
|
||||
routing_table
|
||||
registry
|
||||
.rpc_processor()
|
||||
.resolve_node(node_id, safety_selection)
|
||||
.await
|
||||
@ -317,8 +322,9 @@ fn resolve_filtered_node_ref(
|
||||
}
|
||||
}
|
||||
|
||||
fn get_node_ref(routing_table: RoutingTable) -> impl FnOnce(&str) -> Option<NodeRef> {
|
||||
fn get_node_ref(registry: VeilidComponentRegistry) -> impl FnOnce(&str) -> Option<NodeRef> {
|
||||
move |text| {
|
||||
let routing_table = registry.routing_table();
|
||||
let nr = if let Some(key) = get_public_key(text) {
|
||||
routing_table.lookup_any_node_ref(key).ok().flatten()?
|
||||
} else if let Some(node_id) = get_typed_key(text) {
|
||||
@ -331,9 +337,11 @@ fn get_node_ref(routing_table: RoutingTable) -> impl FnOnce(&str) -> Option<Node
|
||||
}
|
||||
|
||||
fn get_filtered_node_ref(
|
||||
routing_table: RoutingTable,
|
||||
registry: VeilidComponentRegistry,
|
||||
) -> impl FnOnce(&str) -> Option<FilteredNodeRef> {
|
||||
move |text| {
|
||||
let routing_table = registry.routing_table();
|
||||
|
||||
// Safety selection
|
||||
let (text, seq) = if let Some((first, second)) = text.split_once('+') {
|
||||
let seq = get_sequencing(second)?;
|
||||
@ -560,19 +568,19 @@ impl VeilidAPI {
|
||||
)?;
|
||||
}
|
||||
// Dump routing table bucket info
|
||||
let routing_table = self.network_manager()?.routing_table();
|
||||
let routing_table = self.core_context()?.routing_table();
|
||||
Ok(routing_table.debug_info_buckets(min_state))
|
||||
}
|
||||
|
||||
async fn debug_dialinfo(&self, _args: String) -> VeilidAPIResult<String> {
|
||||
// Dump routing table dialinfo
|
||||
let routing_table = self.network_manager()?.routing_table();
|
||||
let routing_table = self.core_context()?.routing_table();
|
||||
Ok(routing_table.debug_info_dialinfo())
|
||||
}
|
||||
async fn debug_peerinfo(&self, args: String) -> VeilidAPIResult<String> {
|
||||
// Dump routing table peerinfo
|
||||
let args: Vec<String> = args.split_whitespace().map(|s| s.to_owned()).collect();
|
||||
let routing_table = self.network_manager()?.routing_table();
|
||||
let routing_table = self.core_context()?.routing_table();
|
||||
|
||||
let mut ai = 0;
|
||||
let mut opt_routing_domain = None;
|
||||
@ -603,7 +611,7 @@ impl VeilidAPI {
|
||||
|
||||
async fn debug_txtrecord(&self, _args: String) -> VeilidAPIResult<String> {
|
||||
// Dump routing table txt record
|
||||
let routing_table = self.network_manager()?.routing_table();
|
||||
let routing_table = self.core_context()?.routing_table();
|
||||
Ok(routing_table.debug_info_txtrecord().await)
|
||||
}
|
||||
|
||||
@ -648,7 +656,7 @@ impl VeilidAPI {
|
||||
}
|
||||
|
||||
// Dump routing table entries
|
||||
let routing_table = self.network_manager()?.routing_table();
|
||||
let routing_table = self.core_context()?.routing_table();
|
||||
Ok(match fastest {
|
||||
true => routing_table.debug_info_entries_fastest(min_state, capabilities, 100000),
|
||||
false => routing_table.debug_info_entries(min_state, capabilities),
|
||||
@ -657,31 +665,30 @@ impl VeilidAPI {
|
||||
|
||||
async fn debug_entry(&self, args: String) -> VeilidAPIResult<String> {
|
||||
let args: Vec<String> = args.split_whitespace().map(|s| s.to_owned()).collect();
|
||||
let routing_table = self.network_manager()?.routing_table();
|
||||
let registry = self.core_context()?.registry();
|
||||
|
||||
let node_ref = get_debug_argument_at(
|
||||
&args,
|
||||
0,
|
||||
"debug_entry",
|
||||
"node_id",
|
||||
get_node_ref(routing_table),
|
||||
get_node_ref(registry.clone()),
|
||||
)?;
|
||||
|
||||
// Dump routing table entry
|
||||
let routing_table = self.network_manager()?.routing_table();
|
||||
Ok(routing_table.debug_info_entry(node_ref))
|
||||
Ok(registry.routing_table().debug_info_entry(node_ref))
|
||||
}
|
||||
|
||||
async fn debug_relay(&self, args: String) -> VeilidAPIResult<String> {
|
||||
let args: Vec<String> = args.split_whitespace().map(|s| s.to_owned()).collect();
|
||||
let routing_table = self.network_manager()?.routing_table();
|
||||
let registry = self.core_context()?.registry();
|
||||
|
||||
let relay_node = get_debug_argument_at(
|
||||
&args,
|
||||
0,
|
||||
"debug_relay",
|
||||
"node_id",
|
||||
get_node_ref(routing_table),
|
||||
get_node_ref(registry.clone()),
|
||||
)
|
||||
.ok();
|
||||
|
||||
@ -696,7 +703,7 @@ impl VeilidAPI {
|
||||
.unwrap_or(RoutingDomain::PublicInternet);
|
||||
|
||||
// Dump routing table entry
|
||||
let routing_table = self.network_manager()?.routing_table();
|
||||
let routing_table = registry.routing_table();
|
||||
match routing_domain {
|
||||
RoutingDomain::LocalNetwork => {
|
||||
let mut editor = routing_table.edit_local_network_routing_domain();
|
||||
@ -717,8 +724,8 @@ impl VeilidAPI {
|
||||
|
||||
async fn debug_nodeinfo(&self, _args: String) -> VeilidAPIResult<String> {
|
||||
// Dump routing table entry
|
||||
let routing_table = self.network_manager()?.routing_table();
|
||||
let nodeinfo = routing_table.debug_info_nodeinfo();
|
||||
let registry = self.core_context()?.registry();
|
||||
let nodeinfo = registry.routing_table().debug_info_nodeinfo();
|
||||
|
||||
// Dump core state
|
||||
let state = self.get_state().await?;
|
||||
@ -741,7 +748,7 @@ impl VeilidAPI {
|
||||
|
||||
// Dump connection table
|
||||
let connman =
|
||||
if let Some(connection_manager) = self.network_manager()?.opt_connection_manager() {
|
||||
if let Some(connection_manager) = registry.network_manager().opt_connection_manager() {
|
||||
connection_manager.debug_print().await
|
||||
} else {
|
||||
"Connection manager unavailable when detached".to_owned()
|
||||
@ -752,8 +759,8 @@ impl VeilidAPI {
|
||||
|
||||
async fn debug_nodeid(&self, _args: String) -> VeilidAPIResult<String> {
|
||||
// Dump routing table entry
|
||||
let routing_table = self.network_manager()?.routing_table();
|
||||
let nodeid = routing_table.debug_info_nodeid();
|
||||
let registry = self.core_context()?.registry();
|
||||
let nodeid = registry.routing_table().debug_info_nodeid();
|
||||
Ok(nodeid)
|
||||
}
|
||||
|
||||
@ -810,8 +817,8 @@ impl VeilidAPI {
|
||||
apibail_internal!("Must be attached to restart network");
|
||||
}
|
||||
|
||||
let netman = self.network_manager()?;
|
||||
netman.restart_network();
|
||||
let registry = self.core_context()?.registry();
|
||||
registry.network_manager().restart_network();
|
||||
|
||||
Ok("Network restarted".to_owned())
|
||||
} else {
|
||||
@ -820,6 +827,8 @@ impl VeilidAPI {
|
||||
}
|
||||
|
||||
async fn debug_purge(&self, args: String) -> VeilidAPIResult<String> {
|
||||
let registry = self.core_context()?.registry();
|
||||
|
||||
let args: Vec<String> = args.split_whitespace().map(|s| s.to_owned()).collect();
|
||||
if !args.is_empty() {
|
||||
if args[0] == "buckets" {
|
||||
@ -830,20 +839,18 @@ impl VeilidAPI {
|
||||
) {
|
||||
apibail_internal!("Must be detached to purge");
|
||||
}
|
||||
self.network_manager()?.routing_table().purge_buckets();
|
||||
registry.routing_table().purge_buckets();
|
||||
Ok("Buckets purged".to_owned())
|
||||
} else if args[0] == "connections" {
|
||||
// Purge connection table
|
||||
let opt_connection_manager = self.network_manager()?.opt_connection_manager();
|
||||
let opt_connection_manager = registry.network_manager().opt_connection_manager();
|
||||
|
||||
if let Some(connection_manager) = &opt_connection_manager {
|
||||
connection_manager.shutdown().await;
|
||||
}
|
||||
|
||||
// Eliminate last_connections from routing table entries
|
||||
self.network_manager()?
|
||||
.routing_table()
|
||||
.purge_last_connections();
|
||||
registry.routing_table().purge_last_connections();
|
||||
|
||||
if let Some(connection_manager) = &opt_connection_manager {
|
||||
connection_manager
|
||||
@ -857,8 +864,7 @@ impl VeilidAPI {
|
||||
self.with_debug_cache(|dc| {
|
||||
dc.imported_routes.clear();
|
||||
});
|
||||
let rss = self.network_manager()?.routing_table().route_spec_store();
|
||||
match rss.purge().await {
|
||||
match registry.routing_table().route_spec_store().purge().await {
|
||||
Ok(_) => Ok("Routes purged".to_owned()),
|
||||
Err(e) => Ok(format!("Routes purged but failed to save: {}", e)),
|
||||
}
|
||||
@ -906,18 +912,18 @@ impl VeilidAPI {
|
||||
async fn debug_contact(&self, args: String) -> VeilidAPIResult<String> {
|
||||
let args: Vec<String> = args.split_whitespace().map(|s| s.to_owned()).collect();
|
||||
|
||||
let network_manager = self.network_manager()?;
|
||||
let routing_table = network_manager.routing_table();
|
||||
let registry = self.core_context()?.registry();
|
||||
|
||||
let node_ref = get_debug_argument_at(
|
||||
&args,
|
||||
0,
|
||||
"debug_contact",
|
||||
"node_ref",
|
||||
get_filtered_node_ref(routing_table),
|
||||
get_filtered_node_ref(registry.clone()),
|
||||
)?;
|
||||
|
||||
let cm = network_manager
|
||||
let cm = registry
|
||||
.network_manager()
|
||||
.get_node_contact_method(node_ref)
|
||||
.map_err(VeilidAPIError::internal)?;
|
||||
|
||||
@ -925,9 +931,8 @@ impl VeilidAPI {
|
||||
}
|
||||
|
||||
async fn debug_resolve(&self, args: String) -> VeilidAPIResult<String> {
|
||||
let netman = self.network_manager()?;
|
||||
let routing_table = netman.routing_table();
|
||||
let Some(_rpc) = netman.opt_rpc_processor() else {
|
||||
let registry = self.core_context()?.registry();
|
||||
if !registry.attachment_manager().is_attached() {
|
||||
apibail_internal!("Must be attached first");
|
||||
};
|
||||
|
||||
@ -938,10 +943,11 @@ impl VeilidAPI {
|
||||
0,
|
||||
"debug_resolve",
|
||||
"destination",
|
||||
self.clone().get_destination(routing_table.clone()),
|
||||
self.clone().get_destination(registry.clone()),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let routing_table = registry.routing_table();
|
||||
match &dest {
|
||||
Destination::Direct {
|
||||
node: target,
|
||||
@ -958,7 +964,7 @@ impl VeilidAPI {
|
||||
} => Ok(format!(
|
||||
"Destination: {:#?}\nTarget Entry:\n{}\nRelay Entry:\n{}\n",
|
||||
&dest,
|
||||
routing_table.clone().debug_info_entry(target.clone()),
|
||||
routing_table.debug_info_entry(target.clone()),
|
||||
routing_table.debug_info_entry(relay.unfiltered())
|
||||
)),
|
||||
Destination::PrivateRoute {
|
||||
@ -969,9 +975,8 @@ impl VeilidAPI {
|
||||
}
|
||||
|
||||
async fn debug_ping(&self, args: String) -> VeilidAPIResult<String> {
|
||||
let netman = self.network_manager()?;
|
||||
let routing_table = netman.routing_table();
|
||||
let Some(rpc) = netman.opt_rpc_processor() else {
|
||||
let registry = self.core_context()?.registry();
|
||||
if !registry.attachment_manager().is_attached() {
|
||||
apibail_internal!("Must be attached first");
|
||||
};
|
||||
|
||||
@ -982,12 +987,13 @@ impl VeilidAPI {
|
||||
0,
|
||||
"debug_ping",
|
||||
"destination",
|
||||
self.clone().get_destination(routing_table),
|
||||
self.clone().get_destination(registry.clone()),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Send a StatusQ
|
||||
let out = match rpc
|
||||
let rpc_processor = registry.rpc_processor();
|
||||
let out = match rpc_processor
|
||||
.rpc_call_status(dest)
|
||||
.await
|
||||
.map_err(VeilidAPIError::internal)?
|
||||
@ -1002,9 +1008,8 @@ impl VeilidAPI {
|
||||
}
|
||||
|
||||
async fn debug_app_message(&self, args: String) -> VeilidAPIResult<String> {
|
||||
let netman = self.network_manager()?;
|
||||
let routing_table = netman.routing_table();
|
||||
let Some(rpc) = netman.opt_rpc_processor() else {
|
||||
let registry = self.core_context()?.registry();
|
||||
if !registry.attachment_manager().is_attached() {
|
||||
apibail_internal!("Must be attached first");
|
||||
};
|
||||
|
||||
@ -1015,15 +1020,17 @@ impl VeilidAPI {
|
||||
arg,
|
||||
"debug_app_message",
|
||||
"destination",
|
||||
self.clone().get_destination(routing_table),
|
||||
self.clone().get_destination(registry.clone()),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let data = get_debug_argument(&rest, "debug_app_message", "data", get_data)?;
|
||||
let data_len = data.len();
|
||||
|
||||
// Send a AppMessage
|
||||
let out = match rpc
|
||||
// Send an AppMessage
|
||||
let rpc_processor = registry.rpc_processor();
|
||||
|
||||
let out = match rpc_processor
|
||||
.rpc_call_app_message(dest, data)
|
||||
.await
|
||||
.map_err(VeilidAPIError::internal)?
|
||||
@ -1038,9 +1045,8 @@ impl VeilidAPI {
|
||||
}
|
||||
|
||||
async fn debug_app_call(&self, args: String) -> VeilidAPIResult<String> {
|
||||
let netman = self.network_manager()?;
|
||||
let routing_table = netman.routing_table();
|
||||
let Some(rpc) = netman.opt_rpc_processor() else {
|
||||
let registry = self.core_context()?.registry();
|
||||
if !registry.attachment_manager().is_attached() {
|
||||
apibail_internal!("Must be attached first");
|
||||
};
|
||||
|
||||
@ -1051,15 +1057,17 @@ impl VeilidAPI {
|
||||
arg,
|
||||
"debug_app_call",
|
||||
"destination",
|
||||
self.clone().get_destination(routing_table),
|
||||
self.clone().get_destination(registry.clone()),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let data = get_debug_argument(&rest, "debug_app_call", "data", get_data)?;
|
||||
let data_len = data.len();
|
||||
|
||||
// Send a AppMessage
|
||||
let out = match rpc
|
||||
// Send an AppCall
|
||||
let rpc_processor = registry.rpc_processor();
|
||||
|
||||
let out = match rpc_processor
|
||||
.rpc_call_app_call(dest, data)
|
||||
.await
|
||||
.map_err(VeilidAPIError::internal)?
|
||||
@ -1078,8 +1086,8 @@ impl VeilidAPI {
|
||||
}
|
||||
|
||||
async fn debug_app_reply(&self, args: String) -> VeilidAPIResult<String> {
|
||||
let netman = self.network_manager()?;
|
||||
let Some(rpc) = netman.opt_rpc_processor() else {
|
||||
let registry = self.core_context()?.registry();
|
||||
if !registry.attachment_manager().is_attached() {
|
||||
apibail_internal!("Must be attached first");
|
||||
};
|
||||
|
||||
@ -1091,7 +1099,9 @@ impl VeilidAPI {
|
||||
let data = get_debug_argument(&rest, "debug_app_reply", "data", get_data)?;
|
||||
(call_id, data)
|
||||
} else {
|
||||
let call_id = rpc
|
||||
let rpc_processor = registry.rpc_processor();
|
||||
|
||||
let call_id = rpc_processor
|
||||
.get_app_call_ids()
|
||||
.first()
|
||||
.cloned()
|
||||
@ -1113,8 +1123,8 @@ impl VeilidAPI {
|
||||
async fn debug_route_allocate(&self, args: Vec<String>) -> VeilidAPIResult<String> {
|
||||
// [ord|*ord] [rel] [<count>] [in|out] [avoid_node_id]
|
||||
|
||||
let netman = self.network_manager()?;
|
||||
let routing_table = netman.routing_table();
|
||||
let registry = self.core_context()?.registry();
|
||||
let routing_table = registry.routing_table();
|
||||
let rss = routing_table.route_spec_store();
|
||||
let config = self.config().unwrap();
|
||||
let default_route_hop_count = {
|
||||
@ -1171,8 +1181,8 @@ impl VeilidAPI {
|
||||
}
|
||||
async fn debug_route_release(&self, args: Vec<String>) -> VeilidAPIResult<String> {
|
||||
// <route id>
|
||||
let netman = self.network_manager()?;
|
||||
let routing_table = netman.routing_table();
|
||||
let registry = self.core_context()?.registry();
|
||||
let routing_table = registry.routing_table();
|
||||
let rss = routing_table.route_spec_store();
|
||||
|
||||
let route_id = get_debug_argument_at(
|
||||
@ -1180,7 +1190,7 @@ impl VeilidAPI {
|
||||
1,
|
||||
"debug_route",
|
||||
"route_id",
|
||||
get_route_id(rss.clone(), true, true),
|
||||
get_route_id(registry.clone(), true, true),
|
||||
)?;
|
||||
|
||||
// Release route
|
||||
@ -1204,8 +1214,8 @@ impl VeilidAPI {
|
||||
}
|
||||
async fn debug_route_publish(&self, args: Vec<String>) -> VeilidAPIResult<String> {
|
||||
// <route id> [full]
|
||||
let netman = self.network_manager()?;
|
||||
let routing_table = netman.routing_table();
|
||||
let registry = self.core_context()?.registry();
|
||||
let routing_table = registry.routing_table();
|
||||
let rss = routing_table.route_spec_store();
|
||||
|
||||
let route_id = get_debug_argument_at(
|
||||
@ -1213,7 +1223,7 @@ impl VeilidAPI {
|
||||
1,
|
||||
"debug_route",
|
||||
"route_id",
|
||||
get_route_id(rss.clone(), true, false),
|
||||
get_route_id(registry.clone(), true, false),
|
||||
)?;
|
||||
let full = {
|
||||
if args.len() > 2 {
|
||||
@ -1256,8 +1266,8 @@ impl VeilidAPI {
|
||||
}
|
||||
async fn debug_route_unpublish(&self, args: Vec<String>) -> VeilidAPIResult<String> {
|
||||
// <route id>
|
||||
let netman = self.network_manager()?;
|
||||
let routing_table = netman.routing_table();
|
||||
let registry = self.core_context()?.registry();
|
||||
let routing_table = registry.routing_table();
|
||||
let rss = routing_table.route_spec_store();
|
||||
|
||||
let route_id = get_debug_argument_at(
|
||||
@ -1265,7 +1275,7 @@ impl VeilidAPI {
|
||||
1,
|
||||
"debug_route",
|
||||
"route_id",
|
||||
get_route_id(rss.clone(), true, false),
|
||||
get_route_id(registry.clone(), true, false),
|
||||
)?;
|
||||
|
||||
// Unpublish route
|
||||
@ -1278,8 +1288,8 @@ impl VeilidAPI {
|
||||
}
|
||||
async fn debug_route_print(&self, args: Vec<String>) -> VeilidAPIResult<String> {
|
||||
// <route id>
|
||||
let netman = self.network_manager()?;
|
||||
let routing_table = netman.routing_table();
|
||||
let registry = self.core_context()?.registry();
|
||||
let routing_table = registry.routing_table();
|
||||
let rss = routing_table.route_spec_store();
|
||||
|
||||
let route_id = get_debug_argument_at(
|
||||
@ -1287,7 +1297,7 @@ impl VeilidAPI {
|
||||
1,
|
||||
"debug_route",
|
||||
"route_id",
|
||||
get_route_id(rss.clone(), true, true),
|
||||
get_route_id(registry.clone(), true, true),
|
||||
)?;
|
||||
|
||||
match rss.debug_route(&route_id) {
|
||||
@ -1297,8 +1307,8 @@ impl VeilidAPI {
|
||||
}
|
||||
async fn debug_route_list(&self, _args: Vec<String>) -> VeilidAPIResult<String> {
|
||||
//
|
||||
let netman = self.network_manager()?;
|
||||
let routing_table = netman.routing_table();
|
||||
let registry = self.core_context()?.registry();
|
||||
let routing_table = registry.routing_table();
|
||||
let rss = routing_table.route_spec_store();
|
||||
|
||||
let routes = rss.list_allocated_routes(|k, _| Some(*k));
|
||||
@ -1320,12 +1330,15 @@ impl VeilidAPI {
|
||||
}
|
||||
async fn debug_route_import(&self, args: Vec<String>) -> VeilidAPIResult<String> {
|
||||
// <blob>
|
||||
let registry = self.core_context()?.registry();
|
||||
let routing_table = registry.routing_table();
|
||||
let rss = routing_table.route_spec_store();
|
||||
|
||||
let blob = get_debug_argument_at(&args, 1, "debug_route", "blob", get_string)?;
|
||||
let blob_dec = BASE64URL_NOPAD
|
||||
.decode(blob.as_bytes())
|
||||
.map_err(VeilidAPIError::generic)?;
|
||||
let rss = self.routing_table()?.route_spec_store();
|
||||
|
||||
let route_id = rss
|
||||
.import_remote_private_route_blob(blob_dec)
|
||||
.map_err(VeilidAPIError::generic)?;
|
||||
@ -1342,8 +1355,8 @@ impl VeilidAPI {
|
||||
|
||||
async fn debug_route_test(&self, args: Vec<String>) -> VeilidAPIResult<String> {
|
||||
// <route id>
|
||||
let netman = self.network_manager()?;
|
||||
let routing_table = netman.routing_table();
|
||||
let registry = self.core_context()?.registry();
|
||||
let routing_table = registry.routing_table();
|
||||
let rss = routing_table.route_spec_store();
|
||||
|
||||
let route_id = get_debug_argument_at(
|
||||
@ -1351,7 +1364,7 @@ impl VeilidAPI {
|
||||
1,
|
||||
"debug_route",
|
||||
"route_id",
|
||||
get_route_id(rss.clone(), true, true),
|
||||
get_route_id(registry.clone(), true, true),
|
||||
)?;
|
||||
|
||||
let success = rss
|
||||
@ -1396,7 +1409,8 @@ impl VeilidAPI {
|
||||
|
||||
async fn debug_record_list(&self, args: Vec<String>) -> VeilidAPIResult<String> {
|
||||
// <local|remote>
|
||||
let storage_manager = self.storage_manager()?;
|
||||
let registry = self.core_context()?.registry();
|
||||
let storage_manager = registry.storage_manager();
|
||||
|
||||
let scope = get_debug_argument_at(&args, 1, "debug_record_list", "scope", get_string)?;
|
||||
let out = match scope.as_str() {
|
||||
@ -1427,7 +1441,8 @@ impl VeilidAPI {
|
||||
|
||||
async fn debug_record_purge(&self, args: Vec<String>) -> VeilidAPIResult<String> {
|
||||
// <local|remote> [bytes]
|
||||
let storage_manager = self.storage_manager()?;
|
||||
let registry = self.core_context()?.registry();
|
||||
let storage_manager = registry.storage_manager();
|
||||
|
||||
let scope = get_debug_argument_at(&args, 1, "debug_record_purge", "scope", get_string)?;
|
||||
let bytes = get_debug_argument_at(&args, 2, "debug_record_purge", "bytes", get_number).ok();
|
||||
@ -1440,8 +1455,6 @@ impl VeilidAPI {
|
||||
}
|
||||
|
||||
async fn debug_record_create(&self, args: Vec<String>) -> VeilidAPIResult<String> {
|
||||
let netman = self.network_manager()?;
|
||||
let routing_table = netman.routing_table();
|
||||
let crypto = self.crypto()?;
|
||||
|
||||
let schema = get_debug_argument_at(
|
||||
@ -1458,7 +1471,7 @@ impl VeilidAPI {
|
||||
2,
|
||||
"debug_record_create",
|
||||
"kind",
|
||||
get_crypto_system_version(crypto.clone()),
|
||||
get_crypto_system_version(&crypto),
|
||||
)
|
||||
.unwrap_or_else(|_| crypto.best());
|
||||
|
||||
@ -1467,7 +1480,7 @@ impl VeilidAPI {
|
||||
3,
|
||||
"debug_record_create",
|
||||
"safety_selection",
|
||||
get_safety_selection(routing_table),
|
||||
get_safety_selection(self.core_context()?.registry()),
|
||||
)
|
||||
.ok();
|
||||
|
||||
@ -1503,15 +1516,14 @@ impl VeilidAPI {
|
||||
}
|
||||
|
||||
async fn debug_record_open(&self, args: Vec<String>) -> VeilidAPIResult<String> {
|
||||
let netman = self.network_manager()?;
|
||||
let routing_table = netman.routing_table();
|
||||
let registry = self.core_context()?.registry();
|
||||
|
||||
let (key, ss) = get_debug_argument_at(
|
||||
&args,
|
||||
1,
|
||||
"debug_record_open",
|
||||
"key",
|
||||
get_dht_key(routing_table),
|
||||
get_dht_key(registry.clone()),
|
||||
)?;
|
||||
let writer =
|
||||
get_debug_argument_at(&args, 2, "debug_record_open", "writer", get_keypair).ok();
|
||||
@ -1675,7 +1687,8 @@ impl VeilidAPI {
|
||||
}
|
||||
|
||||
async fn debug_record_info(&self, args: Vec<String>) -> VeilidAPIResult<String> {
|
||||
let storage_manager = self.storage_manager()?;
|
||||
let registry = self.core_context()?.registry();
|
||||
let storage_manager = registry.storage_manager();
|
||||
|
||||
let key =
|
||||
get_debug_argument_at(&args, 1, "debug_record_info", "key", get_dht_key_no_safety)?;
|
||||
@ -1976,7 +1989,8 @@ impl VeilidAPI {
|
||||
|
||||
async fn debug_punish_list(&self, _args: Vec<String>) -> VeilidAPIResult<String> {
|
||||
//
|
||||
let network_manager = self.network_manager()?;
|
||||
let registry = self.core_context()?.registry();
|
||||
let network_manager = registry.network_manager();
|
||||
let address_filter = network_manager.address_filter();
|
||||
|
||||
let out = format!("Address filter punishments:\n{:#?}", address_filter);
|
||||
@ -1985,7 +1999,8 @@ impl VeilidAPI {
|
||||
|
||||
async fn debug_punish_clear(&self, _args: Vec<String>) -> VeilidAPIResult<String> {
|
||||
//
|
||||
let network_manager = self.network_manager()?;
|
||||
let registry = self.core_context()?.registry();
|
||||
let network_manager = registry.network_manager();
|
||||
let address_filter = network_manager.address_filter();
|
||||
|
||||
address_filter.clear_punishments();
|
||||
@ -2197,14 +2212,14 @@ TableDB Operations:
|
||||
|
||||
fn get_destination(
|
||||
self,
|
||||
routing_table: RoutingTable,
|
||||
registry: VeilidComponentRegistry,
|
||||
) -> impl FnOnce(&str) -> SendPinBoxFuture<Option<Destination>> {
|
||||
move |text| {
|
||||
let text = text.to_owned();
|
||||
Box::pin(async move {
|
||||
// Safety selection
|
||||
let (text, ss) = if let Some((first, second)) = text.split_once('+') {
|
||||
let ss = get_safety_selection(routing_table.clone())(second)?;
|
||||
let ss = get_safety_selection(registry.clone())(second)?;
|
||||
(first, Some(ss))
|
||||
} else {
|
||||
(text.as_str(), None)
|
||||
@ -2213,13 +2228,14 @@ TableDB Operations:
|
||||
return None;
|
||||
}
|
||||
if &text[0..1] == "#" {
|
||||
let routing_table = registry.routing_table();
|
||||
let rss = routing_table.route_spec_store();
|
||||
|
||||
// Private route
|
||||
let text = &text[1..];
|
||||
|
||||
let private_route = if let Some(prid) =
|
||||
get_route_id(rss.clone(), false, true)(text)
|
||||
get_route_id(registry.clone(), false, true)(text)
|
||||
{
|
||||
rss.best_remote_private_route(&prid)?
|
||||
} else {
|
||||
@ -2243,12 +2259,10 @@ TableDB Operations:
|
||||
))
|
||||
} else if let Some((first, second)) = text.split_once('@') {
|
||||
// Relay
|
||||
let relay_nr = resolve_filtered_node_ref(
|
||||
routing_table.clone(),
|
||||
ss.unwrap_or_default(),
|
||||
)(second)
|
||||
.await?;
|
||||
let target_nr = get_node_ref(routing_table)(first)?;
|
||||
let relay_nr =
|
||||
resolve_filtered_node_ref(registry.clone(), ss.unwrap_or_default())(second)
|
||||
.await?;
|
||||
let target_nr = get_node_ref(registry.clone())(first)?;
|
||||
|
||||
let mut d = Destination::relay(relay_nr, target_nr);
|
||||
if let Some(ss) = ss {
|
||||
@ -2259,7 +2273,7 @@ TableDB Operations:
|
||||
} else {
|
||||
// Direct
|
||||
let target_nr =
|
||||
resolve_filtered_node_ref(routing_table, ss.unwrap_or_default())(text)
|
||||
resolve_filtered_node_ref(registry.clone(), ss.unwrap_or_default())(text)
|
||||
.await?;
|
||||
|
||||
let mut d = Destination::direct(target_nr);
|
||||
|
@ -59,7 +59,7 @@ struct JsonRequestProcessorInner {
|
||||
routing_contexts: BTreeMap<u32, RoutingContext>,
|
||||
table_dbs: BTreeMap<u32, TableDB>,
|
||||
table_db_transactions: BTreeMap<u32, TableDBTransaction>,
|
||||
crypto_systems: BTreeMap<u32, CryptoSystemVersion>,
|
||||
crypto_kinds: BTreeMap<u32, CryptoKind>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
@ -76,7 +76,7 @@ impl JsonRequestProcessor {
|
||||
routing_contexts: Default::default(),
|
||||
table_dbs: Default::default(),
|
||||
table_db_transactions: Default::default(),
|
||||
crypto_systems: Default::default(),
|
||||
crypto_kinds: Default::default(),
|
||||
})),
|
||||
}
|
||||
}
|
||||
@ -179,18 +179,18 @@ impl JsonRequestProcessor {
|
||||
}
|
||||
|
||||
// CryptoSystem
|
||||
fn add_crypto_system(&self, csv: CryptoSystemVersion) -> u32 {
|
||||
fn add_crypto_system(&self, csv: CryptoKind) -> u32 {
|
||||
let mut inner = self.inner.lock();
|
||||
let mut next_id: u32 = 1;
|
||||
while inner.crypto_systems.contains_key(&next_id) {
|
||||
while inner.crypto_kinds.contains_key(&next_id) {
|
||||
next_id += 1;
|
||||
}
|
||||
inner.crypto_systems.insert(next_id, csv);
|
||||
inner.crypto_kinds.insert(next_id, csv);
|
||||
next_id
|
||||
}
|
||||
fn lookup_crypto_system(&self, id: u32, cs_id: u32) -> Result<CryptoSystemVersion, Response> {
|
||||
fn lookup_crypto_system(&self, id: u32, cs_id: u32) -> Result<CryptoKind, Response> {
|
||||
let inner = self.inner.lock();
|
||||
let Some(crypto_system) = inner.crypto_systems.get(&cs_id).cloned() else {
|
||||
let Some(crypto_system) = inner.crypto_kinds.get(&cs_id).cloned() else {
|
||||
return Err(Response {
|
||||
id,
|
||||
op: ResponseOp::CryptoSystem(CryptoSystemResponse {
|
||||
@ -203,7 +203,7 @@ impl JsonRequestProcessor {
|
||||
}
|
||||
fn release_crypto_system(&self, id: u32) -> i32 {
|
||||
let mut inner = self.inner.lock();
|
||||
if inner.crypto_systems.remove(&id).is_none() {
|
||||
if inner.crypto_kinds.remove(&id).is_none() {
|
||||
return 0;
|
||||
}
|
||||
1
|
||||
@ -215,7 +215,7 @@ impl JsonRequestProcessor {
|
||||
async fn parse_target(&self, s: String) -> VeilidAPIResult<Target> {
|
||||
// Is this a route id?
|
||||
if let Ok(rrid) = RouteId::from_str(&s) {
|
||||
let routing_table = self.api.routing_table()?;
|
||||
let routing_table = self.api.core_context()?.routing_table();
|
||||
let rss = routing_table.route_spec_store();
|
||||
|
||||
// Is this a valid remote route id? (can't target allocated routes)
|
||||
@ -467,9 +467,10 @@ impl JsonRequestProcessor {
|
||||
#[instrument(level = "trace", target = "json_api", skip_all)]
|
||||
pub async fn process_crypto_system_request(
|
||||
&self,
|
||||
csv: CryptoSystemVersion,
|
||||
kind: CryptoKind,
|
||||
csr: CryptoSystemRequest,
|
||||
) -> CryptoSystemResponse {
|
||||
xxx continue here
|
||||
let cs_op = match csr.cs_op {
|
||||
CryptoSystemRequestOp::Release => {
|
||||
self.release_crypto_system(csr.cs_id);
|
||||
|
@ -143,7 +143,7 @@ impl RoutingContext {
|
||||
event!(target: "veilid_api", Level::DEBUG,
|
||||
"RoutingContext::get_destination(self: {:?}, target: {:?})", self, target);
|
||||
|
||||
let rpc_processor = self.api.rpc_processor()?;
|
||||
let rpc_processor = self.api.core_context()?.rpc_processor();
|
||||
rpc_processor
|
||||
.resolve_target_to_destination(target, self.unlocked_inner.safety_selection)
|
||||
.await
|
||||
@ -166,7 +166,7 @@ impl RoutingContext {
|
||||
event!(target: "veilid_api", Level::DEBUG,
|
||||
"RoutingContext::app_call(self: {:?}, target: {:?}, message: {:?})", self, target, message);
|
||||
|
||||
let rpc_processor = self.api.rpc_processor()?;
|
||||
let rpc_processor = self.api.core_context()?.rpc_processor();
|
||||
|
||||
// Get destination
|
||||
let dest = self.get_destination(target).await?;
|
||||
@ -200,7 +200,7 @@ impl RoutingContext {
|
||||
event!(target: "veilid_api", Level::DEBUG,
|
||||
"RoutingContext::app_message(self: {:?}, target: {:?}, message: {:?})", self, target, message);
|
||||
|
||||
let rpc_processor = self.api.rpc_processor()?;
|
||||
let rpc_processor = self.api.core_context()?.rpc_processor();
|
||||
|
||||
// Get destination
|
||||
let dest = self.get_destination(target).await?;
|
||||
@ -239,7 +239,7 @@ impl RoutingContext {
|
||||
|
||||
let kind = kind.unwrap_or(best_crypto_kind());
|
||||
Crypto::validate_crypto_kind(kind)?;
|
||||
let storage_manager = self.api.storage_manager()?;
|
||||
let storage_manager = self.api.core_context()?.storage_manager();
|
||||
storage_manager.get_record_key(kind, schema, owner_key)
|
||||
}
|
||||
|
||||
@ -265,7 +265,8 @@ impl RoutingContext {
|
||||
|
||||
let kind = kind.unwrap_or(best_crypto_kind());
|
||||
Crypto::validate_crypto_kind(kind)?;
|
||||
let storage_manager = self.api.storage_manager()?;
|
||||
|
||||
let storage_manager = self.api.core_context()?.storage_manager();
|
||||
storage_manager
|
||||
.create_record(kind, schema, owner, self.unlocked_inner.safety_selection)
|
||||
.await
|
||||
@ -292,7 +293,8 @@ impl RoutingContext {
|
||||
"RoutingContext::open_dht_record(self: {:?}, key: {:?}, default_writer: {:?})", self, key, default_writer);
|
||||
|
||||
Crypto::validate_crypto_kind(key.kind)?;
|
||||
let storage_manager = self.api.storage_manager()?;
|
||||
|
||||
let storage_manager = self.api.core_context()?.storage_manager();
|
||||
storage_manager
|
||||
.open_record(key, default_writer, self.unlocked_inner.safety_selection)
|
||||
.await
|
||||
@ -307,7 +309,8 @@ impl RoutingContext {
|
||||
"RoutingContext::close_dht_record(self: {:?}, key: {:?})", self, key);
|
||||
|
||||
Crypto::validate_crypto_kind(key.kind)?;
|
||||
let storage_manager = self.api.storage_manager()?;
|
||||
|
||||
let storage_manager = self.api.core_context()?.storage_manager();
|
||||
storage_manager.close_record(key).await
|
||||
}
|
||||
|
||||
@ -322,7 +325,8 @@ impl RoutingContext {
|
||||
"RoutingContext::delete_dht_record(self: {:?}, key: {:?})", self, key);
|
||||
|
||||
Crypto::validate_crypto_kind(key.kind)?;
|
||||
let storage_manager = self.api.storage_manager()?;
|
||||
|
||||
let storage_manager = self.api.core_context()?.storage_manager();
|
||||
storage_manager.delete_record(key).await
|
||||
}
|
||||
|
||||
@ -343,7 +347,8 @@ impl RoutingContext {
|
||||
"RoutingContext::get_dht_value(self: {:?}, key: {:?}, subkey: {:?}, force_refresh: {:?})", self, key, subkey, force_refresh);
|
||||
|
||||
Crypto::validate_crypto_kind(key.kind)?;
|
||||
let storage_manager = self.api.storage_manager()?;
|
||||
|
||||
let storage_manager = self.api.core_context()?.storage_manager();
|
||||
storage_manager.get_value(key, subkey, force_refresh).await
|
||||
}
|
||||
|
||||
@ -366,7 +371,8 @@ impl RoutingContext {
|
||||
"RoutingContext::set_dht_value(self: {:?}, key: {:?}, subkey: {:?}, data: len={}, writer: {:?})", self, key, subkey, data.len(), writer);
|
||||
|
||||
Crypto::validate_crypto_kind(key.kind)?;
|
||||
let storage_manager = self.api.storage_manager()?;
|
||||
|
||||
let storage_manager = self.api.core_context()?.storage_manager();
|
||||
storage_manager.set_value(key, subkey, data, writer).await
|
||||
}
|
||||
|
||||
@ -402,7 +408,8 @@ impl RoutingContext {
|
||||
"RoutingContext::watch_dht_values(self: {:?}, key: {:?}, subkeys: {:?}, expiration: {}, count: {})", self, key, subkeys, expiration, count);
|
||||
|
||||
Crypto::validate_crypto_kind(key.kind)?;
|
||||
let storage_manager = self.api.storage_manager()?;
|
||||
|
||||
let storage_manager = self.api.core_context()?.storage_manager();
|
||||
storage_manager
|
||||
.watch_values(key, subkeys, expiration, count)
|
||||
.await
|
||||
@ -427,7 +434,8 @@ impl RoutingContext {
|
||||
"RoutingContext::cancel_dht_watch(self: {:?}, key: {:?}, subkeys: {:?}", self, key, subkeys);
|
||||
|
||||
Crypto::validate_crypto_kind(key.kind)?;
|
||||
let storage_manager = self.api.storage_manager()?;
|
||||
|
||||
let storage_manager = self.api.core_context()?.storage_manager();
|
||||
storage_manager.cancel_watch_values(key, subkeys).await
|
||||
}
|
||||
|
||||
@ -481,7 +489,8 @@ impl RoutingContext {
|
||||
"RoutingContext::inspect_dht_record(self: {:?}, key: {:?}, subkeys: {:?}, scope: {:?})", self, key, subkeys, scope);
|
||||
|
||||
Crypto::validate_crypto_kind(key.kind)?;
|
||||
let storage_manager = self.api.storage_manager()?;
|
||||
|
||||
let storage_manager = self.api.core_context()?.storage_manager();
|
||||
storage_manager.inspect_record(key, subkeys, scope).await
|
||||
}
|
||||
|
||||
|
@ -7,43 +7,56 @@ use stop_token::future::FutureExt as _;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[derive(Debug)]
|
||||
struct DeferredStreamProcessorInner {
|
||||
opt_deferred_stream_channel: Option<flume::Sender<SendPinBoxFuture<()>>>,
|
||||
opt_stopper: Option<StopSource>,
|
||||
opt_join_handle: Option<MustJoinHandle<()>>,
|
||||
}
|
||||
|
||||
/// Background processor for streams
|
||||
/// Handles streams to completion, passing each item from the stream to a callback
|
||||
#[derive(Debug)]
|
||||
pub struct DeferredStreamProcessor {
|
||||
pub opt_deferred_stream_channel: Option<flume::Sender<SendPinBoxFuture<()>>>,
|
||||
pub opt_stopper: Option<StopSource>,
|
||||
pub opt_join_handle: Option<MustJoinHandle<()>>,
|
||||
inner: Mutex<DeferredStreamProcessorInner>,
|
||||
}
|
||||
|
||||
impl DeferredStreamProcessor {
|
||||
/// Create a new DeferredStreamProcessor
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
opt_deferred_stream_channel: None,
|
||||
opt_stopper: None,
|
||||
opt_join_handle: None,
|
||||
inner: Mutex::new(DeferredStreamProcessorInner {
|
||||
opt_deferred_stream_channel: None,
|
||||
opt_stopper: None,
|
||||
opt_join_handle: None,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
/// Initialize the processor before use
|
||||
pub async fn init(&mut self) {
|
||||
pub async fn init(&self) {
|
||||
let stopper = StopSource::new();
|
||||
let stop_token = stopper.token();
|
||||
self.opt_stopper = Some(stopper);
|
||||
|
||||
let mut inner = self.inner.lock();
|
||||
inner.opt_stopper = Some(stopper);
|
||||
let (dsc_tx, dsc_rx) = flume::unbounded::<SendPinBoxFuture<()>>();
|
||||
self.opt_deferred_stream_channel = Some(dsc_tx);
|
||||
self.opt_join_handle = Some(spawn(
|
||||
inner.opt_deferred_stream_channel = Some(dsc_tx);
|
||||
inner.opt_join_handle = Some(spawn(
|
||||
"deferred stream processor",
|
||||
Self::processor(stop_token, dsc_rx),
|
||||
));
|
||||
}
|
||||
|
||||
/// Terminate the processor and ensure all streams are closed
|
||||
pub async fn terminate(&mut self) {
|
||||
drop(self.opt_deferred_stream_channel.take());
|
||||
drop(self.opt_stopper.take());
|
||||
if let Some(jh) = self.opt_join_handle.take() {
|
||||
pub async fn terminate(&self) {
|
||||
let opt_jh = {
|
||||
let mut inner = self.inner.lock();
|
||||
drop(inner.opt_deferred_stream_channel.take());
|
||||
drop(inner.opt_stopper.take());
|
||||
inner.opt_join_handle.take()
|
||||
};
|
||||
if let Some(jh) = opt_jh {
|
||||
jh.await;
|
||||
}
|
||||
}
|
||||
@ -100,15 +113,19 @@ impl DeferredStreamProcessor {
|
||||
///
|
||||
/// Returns 'true' if the stream was added for processing, and 'false' if the stream could not be added, possibly due to not being initialized.
|
||||
pub fn add<T: Send + 'static, S: futures_util::Stream<Item = T> + Unpin + Send + 'static>(
|
||||
&mut self,
|
||||
&self,
|
||||
mut receiver: S,
|
||||
mut handler: impl FnMut(T) -> SendPinBoxFuture<bool> + Send + 'static,
|
||||
) -> bool {
|
||||
let Some(st) = self.opt_stopper.as_ref().map(|s| s.token()) else {
|
||||
return false;
|
||||
};
|
||||
let Some(dsc_tx) = self.opt_deferred_stream_channel.clone() else {
|
||||
return false;
|
||||
let (st, dsc_tx) = {
|
||||
let inner = self.inner.lock();
|
||||
let Some(st) = inner.opt_stopper.as_ref().map(|s| s.token()) else {
|
||||
return false;
|
||||
};
|
||||
let Some(dsc_tx) = inner.opt_deferred_stream_channel.clone() else {
|
||||
return false;
|
||||
};
|
||||
(st, dsc_tx)
|
||||
};
|
||||
let drp = Box::pin(async move {
|
||||
while let Ok(Some(res)) = receiver.next().timeout_at(st.clone()).await {
|
||||
|
Loading…
x
Reference in New Issue
Block a user