send valuechanged

This commit is contained in:
Christien Rioux 2023-12-04 22:01:50 -05:00
parent d1dad8de61
commit a67bfde1f7
30 changed files with 277 additions and 175 deletions

View File

@ -120,6 +120,8 @@ impl ServicesContext {
// Set up storage manager // Set up storage manager
trace!("init storage manager"); trace!("init storage manager");
let update_callback = self.update_callback.clone();
let storage_manager = StorageManager::new( let storage_manager = StorageManager::new(
self.config.clone(), self.config.clone(),
self.crypto.clone().unwrap(), self.crypto.clone().unwrap(),
@ -127,7 +129,7 @@ impl ServicesContext {
#[cfg(feature = "unstable-blockstore")] #[cfg(feature = "unstable-blockstore")]
self.block_store.clone().unwrap(), self.block_store.clone().unwrap(),
); );
if let Err(e) = storage_manager.init().await { if let Err(e) = storage_manager.init(update_callback).await {
error!("failed to init storage manager: {}", e); error!("failed to init storage manager: {}", e);
self.shutdown().await; self.shutdown().await;
return Err(e); return Err(e);

View File

@ -183,7 +183,6 @@ impl RPCProcessor {
&self, &self,
target: Target, target: Target,
safety_selection: SafetySelection, safety_selection: SafetySelection,
sequencing: Sequencing,
) -> Result<rpc_processor::Destination, RPCError> { ) -> Result<rpc_processor::Destination, RPCError> {
match target { match target {
Target::NodeId(node_id) => { Target::NodeId(node_id) => {
@ -195,7 +194,7 @@ impl RPCProcessor {
} }
}; };
// Apply sequencing to match safety selection // Apply sequencing to match safety selection
nr.set_sequencing(sequencing); nr.set_sequencing(safety_selection.get_sequencing());
Ok(rpc_processor::Destination::Direct { Ok(rpc_processor::Destination::Direct {
node: nr, node: nr,

View File

@ -24,7 +24,7 @@ impl RPCProcessor {
let waitable_reply = network_result_try!(self.question(dest, question, None).await?); let waitable_reply = network_result_try!(self.question(dest, question, None).await?);
// Keep the reply private route that was used to return with the answer // Keep the reply private route that was used to return with the answer
let reply_private_route = waitable_reply.reply_private_route.clone(); let reply_private_route = waitable_reply.reply_private_route;
// Wait for reply // Wait for reply
let (msg, latency) = match self.wait_for_reply(waitable_reply, debug_string).await? { let (msg, latency) = match self.wait_for_reply(waitable_reply, debug_string).await? {

View File

@ -44,7 +44,7 @@ impl RPCProcessor {
let waitable_reply = network_result_try!(self.question(dest, find_node_q, None).await?); let waitable_reply = network_result_try!(self.question(dest, find_node_q, None).await?);
// Keep the reply private route that was used to return with the answer // Keep the reply private route that was used to return with the answer
let reply_private_route = waitable_reply.reply_private_route.clone(); let reply_private_route = waitable_reply.reply_private_route;
// Wait for reply // Wait for reply
let (msg, latency) = match self.wait_for_reply(waitable_reply, debug_string).await? { let (msg, latency) = match self.wait_for_reply(waitable_reply, debug_string).await? {

View File

@ -83,7 +83,7 @@ impl RPCProcessor {
); );
// Keep the reply private route that was used to return with the answer // Keep the reply private route that was used to return with the answer
let reply_private_route = waitable_reply.reply_private_route.clone(); let reply_private_route = waitable_reply.reply_private_route;
// Wait for reply // Wait for reply
let (msg, latency) = match self.wait_for_reply(waitable_reply, debug_string).await? { let (msg, latency) = match self.wait_for_reply(waitable_reply, debug_string).await? {
@ -279,9 +279,9 @@ impl RPCProcessor {
// Make GetValue answer // Make GetValue answer
let get_value_a = RPCOperationGetValueA::new( let get_value_a = RPCOperationGetValueA::new(
subkey_result_value, subkey_result_value.map(|x| (*x).clone()),
closer_to_key_peers, closer_to_key_peers,
subkey_result_descriptor, subkey_result_descriptor.map(|x| (*x).clone()),
)?; )?;
// Send GetValue answer // Send GetValue answer

View File

@ -97,7 +97,7 @@ impl RPCProcessor {
); );
// Keep the reply private route that was used to return with the answer // Keep the reply private route that was used to return with the answer
let reply_private_route = waitable_reply.reply_private_route.clone(); let reply_private_route = waitable_reply.reply_private_route;
// Wait for reply // Wait for reply
let (msg, latency) = match self.wait_for_reply(waitable_reply, debug_string).await? { let (msg, latency) = match self.wait_for_reply(waitable_reply, debug_string).await? {
@ -257,7 +257,7 @@ impl RPCProcessor {
// Save the subkey, creating a new record if necessary // Save the subkey, creating a new record if necessary
let storage_manager = self.storage_manager(); let storage_manager = self.storage_manager();
let new_value = network_result_try!(storage_manager let new_value = network_result_try!(storage_manager
.inbound_set_value(key, subkey, value, descriptor) .inbound_set_value(key, subkey, Arc::new(value), descriptor.map(Arc::new))
.await .await
.map_err(RPCError::internal)?); .map_err(RPCError::internal)?);
@ -292,7 +292,7 @@ impl RPCProcessor {
} }
// Make SetValue answer // Make SetValue answer
let set_value_a = RPCOperationSetValueA::new(set, new_value, closer_to_key_peers)?; let set_value_a = RPCOperationSetValueA::new(set, new_value.map(|x| (*x).clone()), closer_to_key_peers)?;
// Send SetValue answer // Send SetValue answer
self.answer(msg, RPCAnswer::new(RPCAnswerDetail::SetValueA(Box::new(set_value_a)))) self.answer(msg, RPCAnswer::new(RPCAnswerDetail::SetValueA(Box::new(set_value_a))))

View File

@ -114,7 +114,7 @@ impl RPCProcessor {
let send_data_method = waitable_reply.send_data_method.clone(); let send_data_method = waitable_reply.send_data_method.clone();
// Keep the reply private route that was used to return with the answer // Keep the reply private route that was used to return with the answer
let reply_private_route = waitable_reply.reply_private_route.clone(); let reply_private_route = waitable_reply.reply_private_route;
// Wait for reply // Wait for reply
let (msg, latency) = match self.wait_for_reply(waitable_reply, debug_string).await? { let (msg, latency) = match self.wait_for_reply(waitable_reply, debug_string).await? {

View File

@ -59,7 +59,7 @@ impl RPCProcessor {
// Save the subkey, creating a new record if necessary // Save the subkey, creating a new record if necessary
let storage_manager = self.storage_manager(); let storage_manager = self.storage_manager();
storage_manager storage_manager
.inbound_value_changed(key, subkeys, count, value) .inbound_value_changed(key, subkeys, count, Arc::new(value))
.await .await
.map_err(RPCError::internal)?; .map_err(RPCError::internal)?;

View File

@ -73,7 +73,7 @@ impl RPCProcessor {
network_result_try!(self.question(dest.clone(), question, None).await?); network_result_try!(self.question(dest.clone(), question, None).await?);
// Keep the reply private route that was used to return with the answer // Keep the reply private route that was used to return with the answer
let reply_private_route = waitable_reply.reply_private_route.clone(); let reply_private_route = waitable_reply.reply_private_route;
// Wait for reply // Wait for reply
let (msg, latency) = match self.wait_for_reply(waitable_reply, debug_string).await? { let (msg, latency) = match self.wait_for_reply(waitable_reply, debug_string).await? {

View File

@ -3,17 +3,17 @@ use super::*;
/// The context of the outbound_get_value operation /// The context of the outbound_get_value operation
struct OutboundGetValueContext { struct OutboundGetValueContext {
/// The latest value of the subkey, may be the value passed in /// The latest value of the subkey, may be the value passed in
pub value: Option<SignedValueData>, pub value: Option<Arc<SignedValueData>>,
/// The nodes that have returned the value so far (up to the consensus count) /// The nodes that have returned the value so far (up to the consensus count)
pub value_nodes: Vec<NodeRef>, pub value_nodes: Vec<NodeRef>,
/// The descriptor if we got a fresh one or empty if no descriptor was needed /// The descriptor if we got a fresh one or empty if no descriptor was needed
pub descriptor: Option<SignedValueDescriptor>, pub descriptor: Option<Arc<SignedValueDescriptor>>,
/// The parsed schema from the descriptor if we have one /// The parsed schema from the descriptor if we have one
pub schema: Option<DHTSchema>, pub schema: Option<DHTSchema>,
} }
/// The result of the outbound_get_value operation /// The result of the outbound_get_value operation
struct OutboundGetValueResult { pub(super) struct OutboundGetValueResult {
/// The subkey that was retrieved /// The subkey that was retrieved
pub subkey_result: SubkeyResult, pub subkey_result: SubkeyResult,
/// And where it was retrieved from /// And where it was retrieved from
@ -22,7 +22,7 @@ struct OutboundGetValueResult {
impl StorageManager { impl StorageManager {
/// Perform a 'get value' query on the network /// Perform a 'get value' query on the network
pub async fn outbound_get_value( pub(super) async fn outbound_get_value(
&self, &self,
rpc_processor: RPCProcessor, rpc_processor: RPCProcessor,
key: TypedKey, key: TypedKey,
@ -69,7 +69,7 @@ impl StorageManager {
Destination::direct(next_node.clone()).with_safety(safety_selection), Destination::direct(next_node.clone()).with_safety(safety_selection),
key, key,
subkey, subkey,
last_descriptor, last_descriptor.map(|x| (*x).clone()),
) )
.await? .await?
); );
@ -80,7 +80,7 @@ impl StorageManager {
let mut ctx = context.lock(); let mut ctx = context.lock();
if ctx.descriptor.is_none() && ctx.schema.is_none() { if ctx.descriptor.is_none() && ctx.schema.is_none() {
ctx.schema = Some(descriptor.schema().map_err(RPCError::invalid_format)?); ctx.schema = Some(descriptor.schema().map_err(RPCError::invalid_format)?);
ctx.descriptor = Some(descriptor); ctx.descriptor = Some(Arc::new(descriptor));
} }
} }
@ -127,7 +127,7 @@ impl StorageManager {
ctx.value_nodes.push(next_node); ctx.value_nodes.push(next_node);
} else if new_seq > prior_seq { } else if new_seq > prior_seq {
// If the sequence number is greater, start over with the new value // If the sequence number is greater, start over with the new value
ctx.value = Some(value); ctx.value = Some(Arc::new(value));
// One node has shown us this value so far // One node has shown us this value so far
ctx.value_nodes = vec![next_node]; ctx.value_nodes = vec![next_node];
} else { } else {
@ -135,7 +135,7 @@ impl StorageManager {
} }
} else { } else {
// If we have no prior value, keep it // If we have no prior value, keep it
ctx.value = Some(value); ctx.value = Some(Arc::new(value));
// One node has shown us this value so far // One node has shown us this value so far
ctx.value_nodes = vec![next_node]; ctx.value_nodes = vec![next_node];
} }

View File

@ -34,6 +34,16 @@ const OFFLINE_SUBKEY_WRITES_INTERVAL_SECS: u32 = 1;
/// Frequency to send ValueChanged notifications to the network /// Frequency to send ValueChanged notifications to the network
const SEND_VALUE_CHANGES_INTERVAL_SECS: u32 = 1; const SEND_VALUE_CHANGES_INTERVAL_SECS: u32 = 1;
#[derive(Debug, Clone)]
/// A single 'value changed' message to send
struct ValueChangedInfo {
target: Target,
key: TypedKey,
subkeys: ValueSubkeyRangeSet,
count: u32,
value: Arc<SignedValueData>,
}
struct StorageManagerUnlockedInner { struct StorageManagerUnlockedInner {
config: VeilidConfig, config: VeilidConfig,
crypto: Crypto, crypto: Crypto,
@ -112,11 +122,11 @@ impl StorageManager {
} }
#[instrument(level = "debug", skip_all, err)] #[instrument(level = "debug", skip_all, err)]
pub async fn init(&self) -> EyreResult<()> { pub async fn init(&self, update_callback: UpdateCallback) -> EyreResult<()> {
debug!("startup storage manager"); debug!("startup storage manager");
let mut inner = self.inner.lock().await; let mut inner = self.inner.lock().await;
inner.init(self.clone()).await?; inner.init(self.clone(), update_callback).await?;
Ok(()) Ok(())
} }
@ -347,7 +357,7 @@ impl StorageManager {
// Return the existing value if we have one unless we are forcing a refresh // Return the existing value if we have one unless we are forcing a refresh
if !force_refresh { if !force_refresh {
if let Some(last_subkey_result_value) = last_subkey_result.value { if let Some(last_subkey_result_value) = last_subkey_result.value {
return Ok(Some(last_subkey_result_value.into_value_data())); return Ok(Some(last_subkey_result_value.value_data().clone()));
} }
} }
@ -357,7 +367,7 @@ impl StorageManager {
let Some(rpc_processor) = inner.rpc_processor.clone() else { let Some(rpc_processor) = inner.rpc_processor.clone() else {
// Return the existing value if we have one if we aren't online // Return the existing value if we have one if we aren't online
if let Some(last_subkey_result_value) = last_subkey_result.value { if let Some(last_subkey_result_value) = last_subkey_result.value {
return Ok(Some(last_subkey_result_value.into_value_data())); return Ok(Some(last_subkey_result_value.value_data().clone()));
} }
apibail_try_again!("offline, try again later"); apibail_try_again!("offline, try again later");
}; };
@ -397,7 +407,7 @@ impl StorageManager {
.handle_set_local_value(key, subkey, subkey_result_value.clone()) .handle_set_local_value(key, subkey, subkey_result_value.clone())
.await?; .await?;
} }
Ok(Some(subkey_result_value.into_value_data())) Ok(Some(subkey_result_value.value_data().clone()))
} }
/// Set the value of a subkey on an opened local record /// Set the value of a subkey on an opened local record
@ -460,13 +470,13 @@ impl StorageManager {
} }
// Sign the new value data with the writer // Sign the new value data with the writer
let signed_value_data = SignedValueData::make_signature( let signed_value_data = Arc::new(SignedValueData::make_signature(
value_data, value_data,
descriptor.owner(), descriptor.owner(),
subkey, subkey,
vcrypto, vcrypto,
writer.secret, writer.secret,
)?; )?);
// Get rpc processor and drop mutex so we don't block while getting the value from the network // Get rpc processor and drop mutex so we don't block while getting the value from the network
let Some(rpc_processor) = Self::online_writes_ready_inner(&inner) else { let Some(rpc_processor) = Self::online_writes_ready_inner(&inner) else {
@ -515,7 +525,7 @@ impl StorageManager {
// Return the new value if it differs from what was asked to set // Return the new value if it differs from what was asked to set
if result.signed_value_data.value_data() != signed_value_data.value_data() { if result.signed_value_data.value_data() != signed_value_data.value_data() {
return Ok(Some(result.signed_value_data.into_value_data())); return Ok(Some(result.signed_value_data.value_data().clone()));
} }
// If the original value was set, return None // If the original value was set, return None
@ -668,4 +678,33 @@ impl StorageManager {
Ok(true) Ok(true)
} }
// Send single value change out to the network
#[instrument(level = "trace", skip(self), err)]
async fn send_value_change(&self, vc: ValueChangedInfo) -> VeilidAPIResult<()> {
let rpc_processor = {
let inner = self.inner.lock().await;
if let Some(rpc_processor) = &inner.rpc_processor {
rpc_processor.clone()
} else {
apibail_try_again!("network is not available");
}
};
let dest = rpc_processor
.resolve_target_to_destination(
vc.target,
SafetySelection::Unsafe(Sequencing::NoPreference),
)
.await
.map_err(VeilidAPIError::from)?;
network_result_value_or_log!(rpc_processor
.rpc_call_value_changed(dest, vc.key, vc.subkeys, vc.count, (*vc.value).clone())
.await
.map_err(VeilidAPIError::from)? => [format!(": dest={:?} vc={:?}", dest, vc)] {
});
Ok(())
}
} }

View File

@ -40,17 +40,7 @@ struct WatchedRecord {
watchers: Vec<WatchedRecordWatch>, watchers: Vec<WatchedRecordWatch>,
} }
#[derive(Debug, Clone)] pub(super) struct RecordStore<D>
/// A single 'value changed' message to send
pub struct ValueChangedInfo {
target: Target,
key: TypedKey,
subkeys: ValueSubkeyRangeSet,
count: u32,
value: SignedValueData,
}
pub struct RecordStore<D>
where where
D: fmt::Debug + Clone + Serialize + for<'d> Deserialize<'d>, D: fmt::Debug + Clone + Serialize + for<'d> Deserialize<'d>,
{ {
@ -631,7 +621,7 @@ where
&mut self, &mut self,
key: TypedKey, key: TypedKey,
subkey: ValueSubkey, subkey: ValueSubkey,
signed_value_data: SignedValueData, signed_value_data: Arc<SignedValueData>,
) -> VeilidAPIResult<()> { ) -> VeilidAPIResult<()> {
// Check size limit for data // Check size limit for data
if signed_value_data.value_data().data().len() > self.limits.max_subkey_size { if signed_value_data.value_data().data().len() > self.limits.max_subkey_size {
@ -847,7 +837,7 @@ where
} }
if let Some(dw) = dead_watcher { if let Some(dw) = dead_watcher {
watch.watchers.remove(dw); watch.watchers.remove(dw);
if watch.watchers.len() == 0 { if watch.watchers.is_empty() {
is_empty = true; is_empty = true;
} }
} }
@ -860,6 +850,16 @@ where
} }
pub async fn take_value_changes(&mut self, changes: &mut Vec<ValueChangedInfo>) { pub async fn take_value_changes(&mut self, changes: &mut Vec<ValueChangedInfo>) {
// ValueChangedInfo but without the subkey data that requires a double mutable borrow to get
struct EarlyValueChangedInfo {
target: Target,
key: TypedKey,
subkeys: ValueSubkeyRangeSet,
count: u32,
}
let mut evcis = vec![];
for rtk in self.changed_watched_values.drain() { for rtk in self.changed_watched_values.drain() {
if let Some(watch) = self.watched_records.get_mut(&rtk) { if let Some(watch) = self.watched_records.get_mut(&rtk) {
// Process watch notifications // Process watch notifications
@ -877,39 +877,51 @@ where
dead_watchers.push(wn); dead_watchers.push(wn);
} }
// Get the first subkey data evcis.push(EarlyValueChangedInfo {
let Some(first_subkey) = subkeys.first() else {
log_stor!(error "first subkey should exist for value change notification");
continue;
};
let subkey_result = match self.get_subkey(rtk.key, first_subkey, false).await {
Ok(Some(skr)) => skr,
Ok(None) => {
log_stor!(error "subkey should have data for value change notification");
continue;
}
Err(e) => {
log_stor!(error "error getting subkey data for value change notification: {}", e);
continue;
}
};
let Some(value) = subkey_result.value else {
log_stor!(error "first subkey should have had value for value change notification");
continue;
};
let vci = ValueChangedInfo {
target: w.target.clone(), target: w.target.clone(),
key: rtk.key, key: rtk.key,
subkeys, subkeys,
count, count,
value, });
}; }
changes.push(vci); // Remove in reverse so we don't have to offset the index to remove the right key
for dw in dead_watchers.iter().rev().copied() {
watch.watchers.remove(dw);
} }
} }
} }
for evci in evcis {
// Get the first subkey data
let Some(first_subkey) = evci.subkeys.first() else {
log_stor!(error "first subkey should exist for value change notification");
continue;
};
let subkey_result = match self.get_subkey(evci.key, first_subkey, false).await {
Ok(Some(skr)) => skr,
Ok(None) => {
log_stor!(error "subkey should have data for value change notification");
continue;
}
Err(e) => {
log_stor!(error "error getting subkey data for value change notification: {}", e);
continue;
}
};
let Some(value) = subkey_result.value else {
log_stor!(error "first subkey should have had value for value change notification");
continue;
};
changes.push(ValueChangedInfo {
target: evci.target,
key: evci.key,
subkeys: evci.subkeys,
count: evci.count,
value,
});
}
} }
/// LRU out some records until we reclaim the amount of space requested /// LRU out some records until we reclaim the amount of space requested
@ -931,7 +943,7 @@ where
reclaimed reclaimed
} }
pub(super) fn debug_records(&self) -> String { pub fn debug_records(&self) -> String {
// Dump fields in an abbreviated way // Dump fields in an abbreviated way
let mut out = String::new(); let mut out = String::new();
@ -963,16 +975,12 @@ where
out out
} }
pub(super) fn debug_record_info(&self, key: TypedKey) -> String { pub fn debug_record_info(&self, key: TypedKey) -> String {
self.peek_record(key, |r| format!("{:#?}", r)) self.peek_record(key, |r| format!("{:#?}", r))
.unwrap_or("Not found".to_owned()) .unwrap_or("Not found".to_owned())
} }
pub(super) async fn debug_record_subkey_info( pub async fn debug_record_subkey_info(&self, key: TypedKey, subkey: ValueSubkey) -> String {
&self,
key: TypedKey,
subkey: ValueSubkey,
) -> String {
match self.peek_subkey(key, subkey, true).await { match self.peek_subkey(key, subkey, true).await {
Ok(Some(v)) => { Ok(Some(v)) => {
format!("{:#?}", v) format!("{:#?}", v)

View File

@ -3,7 +3,7 @@ use super::*;
/// The context of the outbound_set_value operation /// The context of the outbound_set_value operation
struct OutboundSetValueContext { struct OutboundSetValueContext {
/// The latest value of the subkey, may be the value passed in /// The latest value of the subkey, may be the value passed in
pub value: SignedValueData, pub value: Arc<SignedValueData>,
/// The nodes that have set the value so far (up to the consensus count) /// The nodes that have set the value so far (up to the consensus count)
pub value_nodes: Vec<NodeRef>, pub value_nodes: Vec<NodeRef>,
/// The number of non-sets since the last set we have received /// The number of non-sets since the last set we have received
@ -13,23 +13,23 @@ struct OutboundSetValueContext {
} }
/// The result of the outbound_set_value operation /// The result of the outbound_set_value operation
struct OutboundSetValueResult { pub(super) struct OutboundSetValueResult {
/// The value that was set /// The value that was set
pub signed_value_data: SignedValueData, pub signed_value_data: Arc<SignedValueData>,
/// And where it was set to /// And where it was set to
pub value_nodes: Vec<NodeRef>, pub value_nodes: Vec<NodeRef>,
} }
impl StorageManager { impl StorageManager {
/// Perform a 'set value' query on the network /// Perform a 'set value' query on the network
pub async fn outbound_set_value( pub(super) async fn outbound_set_value(
&self, &self,
rpc_processor: RPCProcessor, rpc_processor: RPCProcessor,
key: TypedKey, key: TypedKey,
subkey: ValueSubkey, subkey: ValueSubkey,
safety_selection: SafetySelection, safety_selection: SafetySelection,
value: SignedValueData, value: Arc<SignedValueData>,
descriptor: SignedValueDescriptor, descriptor: Arc<SignedValueDescriptor>,
) -> VeilidAPIResult<OutboundSetValueResult> { ) -> VeilidAPIResult<OutboundSetValueResult> {
let routing_table = rpc_processor.routing_table(); let routing_table = rpc_processor.routing_table();
@ -75,8 +75,8 @@ impl StorageManager {
Destination::direct(next_node.clone()).with_safety(safety_selection), Destination::direct(next_node.clone()).with_safety(safety_selection),
key, key,
subkey, subkey,
value, (*value).clone(),
descriptor.clone(), (*descriptor).clone(),
send_descriptor, send_descriptor,
) )
.await? .await?
@ -105,7 +105,7 @@ impl StorageManager {
let new_seq = value.value_data().seq(); let new_seq = value.value_data().seq();
if new_seq > prior_seq { if new_seq > prior_seq {
// If the sequence number is greater, keep it // If the sequence number is greater, keep it
ctx.value = value; ctx.value = Arc::new(value);
// One node has shown us this value so far // One node has shown us this value so far
ctx.value_nodes = vec![next_node]; ctx.value_nodes = vec![next_node];
ctx.missed_since_last_set = 0; ctx.missed_since_last_set = 0;
@ -225,9 +225,9 @@ impl StorageManager {
&self, &self,
key: TypedKey, key: TypedKey,
subkey: ValueSubkey, subkey: ValueSubkey,
value: SignedValueData, value: Arc<SignedValueData>,
descriptor: Option<SignedValueDescriptor>, descriptor: Option<Arc<SignedValueDescriptor>>,
) -> VeilidAPIResult<NetworkResult<Option<SignedValueData>>> { ) -> VeilidAPIResult<NetworkResult<Option<Arc<SignedValueData>>>> {
let mut inner = self.lock().await?; let mut inner = self.lock().await?;
// See if this is a remote or local value // See if this is a remote or local value

View File

@ -28,6 +28,8 @@ pub(super) struct StorageManagerInner {
pub rpc_processor: Option<RPCProcessor>, pub rpc_processor: Option<RPCProcessor>,
/// Background processing task (not part of attachment manager tick tree so it happens when detached too) /// Background processing task (not part of attachment manager tick tree so it happens when detached too)
pub tick_future: Option<SendPinBoxFuture<()>>, pub tick_future: Option<SendPinBoxFuture<()>>,
/// Update callback to send ValueChanged notification to
pub update_callback: Option<UpdateCallback>,
} }
fn local_limits_from_config(config: VeilidConfig) -> RecordStoreLimits { fn local_limits_from_config(config: VeilidConfig) -> RecordStoreLimits {
@ -78,10 +80,15 @@ impl StorageManagerInner {
metadata_db: Default::default(), metadata_db: Default::default(),
rpc_processor: Default::default(), rpc_processor: Default::default(),
tick_future: Default::default(), tick_future: Default::default(),
update_callback: None,
} }
} }
pub async fn init(&mut self, outer_self: StorageManager) -> EyreResult<()> { pub async fn init(
&mut self,
outer_self: StorageManager,
update_callback: UpdateCallback,
) -> EyreResult<()> {
let metadata_db = self let metadata_db = self
.unlocked_inner .unlocked_inner
.table_store .table_store
@ -121,13 +128,15 @@ impl StorageManagerInner {
} }
}); });
self.tick_future = Some(tick_future); self.tick_future = Some(tick_future);
self.update_callback = Some(update_callback);
self.initialized = true; self.initialized = true;
Ok(()) Ok(())
} }
pub async fn terminate(&mut self) { pub async fn terminate(&mut self) {
self.update_callback = None;
// Stop ticker // Stop ticker
let tick_future = self.tick_future.take(); let tick_future = self.tick_future.take();
if let Some(f) = tick_future { if let Some(f) = tick_future {
@ -207,12 +216,12 @@ impl StorageManagerInner {
let owner = vcrypto.generate_keypair(); let owner = vcrypto.generate_keypair();
// Make a signed value descriptor for this dht value // Make a signed value descriptor for this dht value
let signed_value_descriptor = SignedValueDescriptor::make_signature( let signed_value_descriptor = Arc::new(SignedValueDescriptor::make_signature(
owner.key, owner.key,
schema_data, schema_data,
vcrypto.clone(), vcrypto.clone(),
owner.secret, owner.secret,
)?; )?);
// Add new local value record // Add new local value record
let cur_ts = get_aligned_timestamp(); let cur_ts = get_aligned_timestamp();
@ -428,11 +437,11 @@ impl StorageManagerInner {
}; };
// Get routing table to see if we still know about these nodes // Get routing table to see if we still know about these nodes
let Some(routing_table) = self.rpc_processor.map(|r| r.routing_table()) else { let Some(routing_table) = self.rpc_processor.as_ref().map(|r| r.routing_table()) else {
apibail_try_again!("offline, try again later"); apibail_try_again!("offline, try again later");
}; };
let opt_value_nodes = local_record_store.with_record(key, |r| { let opt_value_nodes = local_record_store.peek_record(key, |r| {
let d = r.detail(); let d = r.detail();
d.value_nodes d.value_nodes
.iter() .iter()
@ -475,7 +484,7 @@ impl StorageManagerInner {
Ok(opened_record) Ok(opened_record)
} }
pub async fn handle_get_local_value( pub(super) async fn handle_get_local_value(
&mut self, &mut self,
key: TypedKey, key: TypedKey,
subkey: ValueSubkey, subkey: ValueSubkey,
@ -498,11 +507,11 @@ impl StorageManagerInner {
}) })
} }
pub async fn handle_set_local_value( pub(super) async fn handle_set_local_value(
&mut self, &mut self,
key: TypedKey, key: TypedKey,
subkey: ValueSubkey, subkey: ValueSubkey,
signed_value_data: SignedValueData, signed_value_data: Arc<SignedValueData>,
) -> VeilidAPIResult<()> { ) -> VeilidAPIResult<()> {
// See if it's in the local record store // See if it's in the local record store
let Some(local_record_store) = self.local_record_store.as_mut() else { let Some(local_record_store) = self.local_record_store.as_mut() else {
@ -517,7 +526,7 @@ impl StorageManagerInner {
Ok(()) Ok(())
} }
pub async fn handle_watch_local_value( pub(super) async fn handle_watch_local_value(
&mut self, &mut self,
key: TypedKey, key: TypedKey,
subkeys: ValueSubkeyRangeSet, subkeys: ValueSubkeyRangeSet,
@ -535,7 +544,7 @@ impl StorageManagerInner {
.await .await
} }
pub async fn handle_get_remote_value( pub(super) async fn handle_get_remote_value(
&mut self, &mut self,
key: TypedKey, key: TypedKey,
subkey: ValueSubkey, subkey: ValueSubkey,
@ -558,12 +567,12 @@ impl StorageManagerInner {
}) })
} }
pub async fn handle_set_remote_value( pub(super) async fn handle_set_remote_value(
&mut self, &mut self,
key: TypedKey, key: TypedKey,
subkey: ValueSubkey, subkey: ValueSubkey,
signed_value_data: SignedValueData, signed_value_data: Arc<SignedValueData>,
signed_value_descriptor: SignedValueDescriptor, signed_value_descriptor: Arc<SignedValueDescriptor>,
) -> VeilidAPIResult<()> { ) -> VeilidAPIResult<()> {
// See if it's in the remote record store // See if it's in the remote record store
let Some(remote_record_store) = self.remote_record_store.as_mut() else { let Some(remote_record_store) = self.remote_record_store.as_mut() else {
@ -591,7 +600,7 @@ impl StorageManagerInner {
Ok(()) Ok(())
} }
pub async fn handle_watch_remote_value( pub(super) async fn handle_watch_remote_value(
&mut self, &mut self,
key: TypedKey, key: TypedKey,
subkeys: ValueSubkeyRangeSet, subkeys: ValueSubkeyRangeSet,
@ -614,7 +623,8 @@ impl StorageManagerInner {
where where
D: fmt::Debug + Clone + Serialize, D: fmt::Debug + Clone + Serialize,
{ {
let compiled = record.descriptor().schema_data(); let descriptor = record.descriptor();
let compiled = descriptor.schema_data();
let mut hash_data = Vec::<u8>::with_capacity(PUBLIC_KEY_LENGTH + 4 + compiled.len()); let mut hash_data = Vec::<u8>::with_capacity(PUBLIC_KEY_LENGTH + 4 + compiled.len());
hash_data.extend_from_slice(&vcrypto.kind().0); hash_data.extend_from_slice(&vcrypto.kind().0);
hash_data.extend_from_slice(&record.owner().bytes); hash_data.extend_from_slice(&record.owner().bytes);

View File

@ -5,7 +5,7 @@ use stop_token::future::FutureExt;
impl StorageManager { impl StorageManager {
// Flush records stores to disk and remove dead records and send watch notifications // Flush records stores to disk and remove dead records and send watch notifications
#[instrument(level = "trace", skip(self), err)] #[instrument(level = "trace", skip(self), err)]
pub(crate) async fn send_value_changes_task_routine( pub(super) async fn send_value_changes_task_routine(
self, self,
stop_token: StopToken, stop_token: StopToken,
_last_ts: Timestamp, _last_ts: Timestamp,
@ -13,22 +13,31 @@ impl StorageManager {
) -> EyreResult<()> { ) -> EyreResult<()> {
let mut value_changes: Vec<ValueChangedInfo> = vec![]; let mut value_changes: Vec<ValueChangedInfo> = vec![];
let mut inner = self.inner.lock().await; {
if let Some(local_record_store) = &mut inner.local_record_store { let mut inner = self.inner.lock().await;
local_record_store if let Some(local_record_store) = &mut inner.local_record_store {
.take_value_changes(&mut value_changes) local_record_store
.await?; .take_value_changes(&mut value_changes)
.await;
}
if let Some(remote_record_store) = &mut inner.remote_record_store {
remote_record_store
.take_value_changes(&mut value_changes)
.await;
}
} }
if let Some(remote_record_store) = &mut inner.remote_record_store {
remote_record_store
.take_value_changes(&mut value_changes)
.await?;
}
// Send all value changes in parallel // Send all value changes in parallel
let mut unord = FuturesUnordered::new(); let mut unord = FuturesUnordered::new();
// xxx // Add a future for each value change
for vc in value_changes {
let this = self.clone();
unord.push(async move {
if let Err(e) = this.send_value_change(vc).await {
log_stor!(debug "Failed to send value change: {}", e);
}
});
}
while !unord.is_empty() { while !unord.is_empty() {
match unord.next().timeout_at(stop_token.clone()).await { match unord.next().timeout_at(stop_token.clone()).await {

View File

@ -2,7 +2,7 @@ use super::*;
/// Information required to handle locally opened records /// Information required to handle locally opened records
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct LocalRecordDetail { pub(in crate::storage_manager) struct LocalRecordDetail {
/// The last 'safety selection' used when creating/opening this record. /// The last 'safety selection' used when creating/opening this record.
/// Even when closed, this safety selection applies to re-publication attempts by the system. /// Even when closed, this safety selection applies to re-publication attempts by the system.
pub safety_selection: SafetySelection, pub safety_selection: SafetySelection,

View File

@ -8,10 +8,10 @@ mod signed_value_descriptor;
use super::*; use super::*;
pub use local_record_detail::*; pub(super) use local_record_detail::*;
pub use opened_record::*; pub(super) use opened_record::*;
pub use record::*; pub(super) use record::*;
pub use record_data::*; pub(super) use record_data::*;
pub use remote_record_detail::*; pub(super) use remote_record_detail::*;
pub use signed_value_data::*; pub use signed_value_data::*;
pub use signed_value_descriptor::*; pub use signed_value_descriptor::*;

View File

@ -1,7 +1,7 @@
use super::*; use super::*;
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct ActiveWatch { pub(in crate::storage_manager) struct ActiveWatch {
/// The expiration of a successful watch /// The expiration of a successful watch
pub expiration_ts: Timestamp, pub expiration_ts: Timestamp,
/// Which node accepted the watch /// Which node accepted the watch
@ -17,7 +17,7 @@ pub struct ActiveWatch {
/// The state associated with a local record when it is opened /// The state associated with a local record when it is opened
/// This is not serialized to storage as it is ephemeral for the lifetime of the opened record /// This is not serialized to storage as it is ephemeral for the lifetime of the opened record
#[derive(Clone, Debug, Default)] #[derive(Clone, Debug, Default)]
pub struct OpenedRecord { pub(in crate::storage_manager) struct OpenedRecord {
/// The key pair used to perform writes to subkey on this opened record /// The key pair used to perform writes to subkey on this opened record
/// Without this, set_value() will fail regardless of which key or subkey is being written to /// Without this, set_value() will fail regardless of which key or subkey is being written to
/// as all writes are signed /// as all writes are signed

View File

@ -1,11 +1,11 @@
use super::*; use super::*;
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct Record<D> pub(in crate::storage_manager) struct Record<D>
where where
D: fmt::Debug + Serialize, D: fmt::Debug + Serialize + Clone,
{ {
descriptor: SignedValueDescriptor, descriptor: Arc<SignedValueDescriptor>,
subkey_count: usize, subkey_count: usize,
stored_subkeys: ValueSubkeyRangeSet, stored_subkeys: ValueSubkeyRangeSet,
last_touched_ts: Timestamp, last_touched_ts: Timestamp,
@ -15,11 +15,11 @@ where
impl<D> Record<D> impl<D> Record<D>
where where
D: fmt::Debug + Serialize, D: fmt::Debug + Serialize + Clone,
{ {
pub fn new( pub fn new(
cur_ts: Timestamp, cur_ts: Timestamp,
descriptor: SignedValueDescriptor, descriptor: Arc<SignedValueDescriptor>,
detail: D, detail: D,
) -> VeilidAPIResult<Self> { ) -> VeilidAPIResult<Self> {
let schema = descriptor.schema()?; let schema = descriptor.schema()?;
@ -34,8 +34,8 @@ where
}) })
} }
pub fn descriptor(&self) -> &SignedValueDescriptor { pub fn descriptor(&self) -> Arc<SignedValueDescriptor> {
&self.descriptor self.descriptor.clone()
} }
pub fn owner(&self) -> &PublicKey { pub fn owner(&self) -> &PublicKey {
self.descriptor.owner() self.descriptor.owner()

View File

@ -1,12 +1,10 @@
use super::*; use super::*;
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)] #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
pub struct RecordData { pub(in crate::storage_manager) struct RecordData {
signed_value_data: Arc<SignedValueData>, signed_value_data: Arc<SignedValueData>,
} }
xxx continue here, use arc everywhere to avoid copies
impl RecordData { impl RecordData {
pub fn new(signed_value_data: Arc<SignedValueData>) -> Self { pub fn new(signed_value_data: Arc<SignedValueData>) -> Self {
Self { signed_value_data } Self { signed_value_data }

View File

@ -1,4 +1,4 @@
use super::*; use super::*;
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct RemoteRecordDetail {} pub(in crate::storage_manager) struct RemoteRecordDetail {}

View File

@ -48,10 +48,6 @@ impl SignedValueData {
&self.value_data &self.value_data
} }
pub fn into_value_data(self) -> ValueData {
self.value_data
}
pub fn signature(&self) -> &Signature { pub fn signature(&self) -> &Signature {
&self.signature &self.signature
} }

View File

@ -8,7 +8,7 @@ struct OutboundWatchValueContext {
/// The result of the outbound_watch_value operation /// The result of the outbound_watch_value operation
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
struct OutboundWatchValueResult { pub(super) struct OutboundWatchValueResult {
/// The expiration of a successful watch /// The expiration of a successful watch
pub expiration_ts: Timestamp, pub expiration_ts: Timestamp,
/// Which node accepted the watch /// Which node accepted the watch
@ -19,7 +19,8 @@ struct OutboundWatchValueResult {
impl StorageManager { impl StorageManager {
/// Perform a 'watch value' query on the network /// Perform a 'watch value' query on the network
pub async fn outbound_watch_value( #[allow(clippy::too_many_arguments)]
pub(super) async fn outbound_watch_value(
&self, &self,
rpc_processor: RPCProcessor, rpc_processor: RPCProcessor,
key: TypedKey, key: TypedKey,
@ -219,8 +220,30 @@ impl StorageManager {
key: TypedKey, key: TypedKey,
subkeys: ValueSubkeyRangeSet, subkeys: ValueSubkeyRangeSet,
count: u32, count: u32,
value: SignedValueData, value: Arc<SignedValueData>,
) -> VeilidAPIResult<()> { ) -> VeilidAPIResult<()> {
// // Update local record store with new value
let (res, opt_update_callback) = {
let mut inner = self.lock().await?;
let res = if let Some(first_subkey) = subkeys.first() {
inner
.handle_set_local_value(key, first_subkey, value.clone())
.await
} else {
VeilidAPIResult::Ok(())
};
(res, inner.update_callback.clone())
};
// Announce ValueChanged VeilidUpdate
if let Some(update_callback) = opt_update_callback {
update_callback(VeilidUpdate::ValueChange(Box::new(VeilidValueChange {
key,
subkeys,
count,
value: value.value_data().clone(),
})));
}
res
} }
} }

View File

@ -123,11 +123,7 @@ impl RoutingContext {
async fn get_destination(&self, target: Target) -> VeilidAPIResult<rpc_processor::Destination> { async fn get_destination(&self, target: Target) -> VeilidAPIResult<rpc_processor::Destination> {
let rpc_processor = self.api.rpc_processor()?; let rpc_processor = self.api.rpc_processor()?;
rpc_processor rpc_processor
.resolve_target_to_destination( .resolve_target_to_destination(target, self.unlocked_inner.safety_selection)
target,
self.unlocked_inner.safety_selection,
self.sequencing(),
)
.await .await
.map_err(VeilidAPIError::invalid_target) .map_err(VeilidAPIError::invalid_target)
} }

View File

@ -208,7 +208,7 @@ pub fn fix_veilidconfiginner() -> VeilidConfigInner {
pub fn fix_veilidvaluechange() -> VeilidValueChange { pub fn fix_veilidvaluechange() -> VeilidValueChange {
VeilidValueChange { VeilidValueChange {
key: fix_typedkey(), key: fix_typedkey(),
subkeys: vec![1, 2, 3, 4], subkeys: ValueSubkeyRangeSet::new(),
count: 5, count: 5,
value: ValueData::new_with_seq(23, b"ValueData".to_vec(), fix_cryptokey()).unwrap(), value: ValueData::new_with_seq(23, b"ValueData".to_vec(), fix_cryptokey()).unwrap(),
} }

View File

@ -55,7 +55,7 @@ impl DHTSchemaDFLT {
} }
/// Check if a key is a schema member /// Check if a key is a schema member
pub fn is_member(&self, key: &PublicKey) -> bool { pub fn is_member(&self, _key: &PublicKey) -> bool {
false false
} }
} }

View File

@ -30,21 +30,21 @@ impl ValueSubkeyRangeSet {
Self { data } Self { data }
} }
pub fn interset(&self, other: &ValueSubkeyRangeSet) -> ValueSubkeyRangeSet { pub fn intersect(&self, other: &ValueSubkeyRangeSet) -> ValueSubkeyRangeSet {
Self::new_with_data(self.data & other.data) Self::new_with_data(&self.data & &other.data)
} }
pub fn difference(&self, other: &ValueSubkeyRangeSet) -> ValueSubkeyRangeSet { pub fn difference(&self, other: &ValueSubkeyRangeSet) -> ValueSubkeyRangeSet {
Self::new_with_data(self.data - other.data) Self::new_with_data(&self.data - &other.data)
} }
pub fn union(&self, other: &ValueSubkeyRangeSet) -> ValueSubkeyRangeSet { pub fn union(&self, other: &ValueSubkeyRangeSet) -> ValueSubkeyRangeSet {
Self::new_with_data(self.data | other.data) Self::new_with_data(&self.data | &other.data)
} }
pub fn data(&self) -> RangeSetBlaze<ValueSubkey> { pub fn data(&self) -> &RangeSetBlaze<ValueSubkey> {
self.data().clone() &self.data
} }
pub fn into_data(self) -> RangeSetBlaze<ValueSubkey> { pub fn into_data(self) -> RangeSetBlaze<ValueSubkey> {
self.data() self.data
} }
} }

View File

@ -99,7 +99,7 @@ pub struct VeilidStateConfig {
pub struct VeilidValueChange { pub struct VeilidValueChange {
#[schemars(with = "String")] #[schemars(with = "String")]
pub key: TypedKey, pub key: TypedKey,
pub subkeys: Vec<ValueSubkey>, pub subkeys: ValueSubkeyRangeSet,
pub count: u32, pub count: u32,
pub value: ValueData, pub value: ValueData,
} }

View File

@ -61,10 +61,10 @@ packages:
dependency: transitive dependency: transitive
description: description:
name: collection name: collection
sha256: f092b211a4319e98e5ff58223576de6c2803db36221657b46c82574721240687 sha256: ee67cb0715911d28db6bf4af1026078bd6f0128b07a5f66fb2ed94ec6783c09a
url: "https://pub.dev" url: "https://pub.dev"
source: hosted source: hosted
version: "1.17.2" version: "1.18.0"
convert: convert:
dependency: transitive dependency: transitive
description: description:
@ -220,10 +220,10 @@ packages:
dependency: transitive dependency: transitive
description: description:
name: meta name: meta
sha256: "3c74dbf8763d36539f114c799d8a2d87343b5067e9d796ca22b5eb8437090ee3" sha256: a6e590c838b18133bb482a2745ad77c5bb7715fb0451209e1a7567d416678b8e
url: "https://pub.dev" url: "https://pub.dev"
source: hosted source: hosted
version: "1.9.1" version: "1.10.0"
path: path:
dependency: "direct main" dependency: "direct main"
description: description:
@ -329,18 +329,18 @@ packages:
dependency: transitive dependency: transitive
description: description:
name: stack_trace name: stack_trace
sha256: c3c7d8edb15bee7f0f74debd4b9c5f3c2ea86766fe4178eb2a18eb30a0bdaed5 sha256: "73713990125a6d93122541237550ee3352a2d84baad52d375a4cad2eb9b7ce0b"
url: "https://pub.dev" url: "https://pub.dev"
source: hosted source: hosted
version: "1.11.0" version: "1.11.1"
stream_channel: stream_channel:
dependency: transitive dependency: transitive
description: description:
name: stream_channel name: stream_channel
sha256: "83615bee9045c1d322bbbd1ba209b7a749c2cbcdcb3fdd1df8eb488b3279c1c8" sha256: ba2aa5d8cc609d96bbb2899c28934f9e1af5cddbd60a827822ea467161eb54e7
url: "https://pub.dev" url: "https://pub.dev"
source: hosted source: hosted
version: "2.1.1" version: "2.1.2"
string_scanner: string_scanner:
dependency: transitive dependency: transitive
description: description:
@ -377,10 +377,10 @@ packages:
dependency: transitive dependency: transitive
description: description:
name: test_api name: test_api
sha256: "75760ffd7786fffdfb9597c35c5b27eaeec82be8edfb6d71d32651128ed7aab8" sha256: "5c2f730018264d276c20e4f1503fd1308dfbbae39ec8ee63c5236311ac06954b"
url: "https://pub.dev" url: "https://pub.dev"
source: hosted source: hosted
version: "0.6.0" version: "0.6.1"
typed_data: typed_data:
dependency: transitive dependency: transitive
description: description:
@ -408,10 +408,10 @@ packages:
dependency: transitive dependency: transitive
description: description:
name: web name: web
sha256: dc8ccd225a2005c1be616fe02951e2e342092edf968cf0844220383757ef8f10 sha256: afe077240a270dcfd2aafe77602b4113645af95d0ad31128cc02bce5ac5d5152
url: "https://pub.dev" url: "https://pub.dev"
source: hosted source: hosted
version: "0.1.4-beta" version: "0.3.0"
win32: win32:
dependency: transitive dependency: transitive
description: description:
@ -437,5 +437,5 @@ packages:
source: hosted source: hosted
version: "3.5.0" version: "3.5.0"
sdks: sdks:
dart: ">=3.1.0-185.0.dev <4.0.0" dart: ">=3.2.0-194.0.dev <4.0.0"
flutter: ">=3.10.6" flutter: ">=3.10.6"

View File

@ -61,6 +61,17 @@ impl<E: Send + 'static> TickTask<E> {
self.running.load(core::sync::atomic::Ordering::Acquire) self.running.load(core::sync::atomic::Ordering::Acquire)
} }
pub fn last_timestamp_us(&self) -> Option<u64> {
let ts = self
.last_timestamp_us
.load(core::sync::atomic::Ordering::Acquire);
if ts == 0 {
None
} else {
Some(ts)
}
}
pub async fn stop(&self) -> Result<(), E> { pub async fn stop(&self) -> Result<(), E> {
// drop the stop source if we have one // drop the stop source if we have one
let opt_stop_source = &mut *self.stop_source.lock().await; let opt_stop_source = &mut *self.stop_source.lock().await;
@ -89,6 +100,17 @@ impl<E: Send + 'static> TickTask<E> {
return Ok(()); return Ok(());
} }
self.internal_tick(now, last_timestamp_us).await.map(drop)
}
pub async fn try_tick_now(&self) -> Result<bool, E> {
let now = get_timestamp();
let last_timestamp_us = self.last_timestamp_us.load(Ordering::Acquire);
self.internal_tick(now, last_timestamp_us).await
}
async fn internal_tick(&self, now: u64, last_timestamp_us: u64) -> Result<bool, E> {
// Lock the stop source, tells us if we have ever started this future // Lock the stop source, tells us if we have ever started this future
let opt_stop_source = &mut *self.stop_source.lock().await; let opt_stop_source = &mut *self.stop_source.lock().await;
if opt_stop_source.is_some() { if opt_stop_source.is_some() {
@ -104,12 +126,12 @@ impl<E: Send + 'static> TickTask<E> {
Ok(None) => { Ok(None) => {
// No prior result to return which means things are still running // No prior result to return which means things are still running
// We can just return now, since the singlefuture will not run a second time // We can just return now, since the singlefuture will not run a second time
return Ok(()); return Ok(false);
} }
Err(()) => { Err(()) => {
// If we get this, it's because we are joining the singlefuture already // If we get this, it's because we are joining the singlefuture already
// Don't bother running but this is not an error in this case // Don't bother running but this is not an error in this case
return Ok(()); return Ok(false);
} }
}; };
} }
@ -134,7 +156,7 @@ impl<E: Send + 'static> TickTask<E> {
self.last_timestamp_us.store(now, Ordering::Release); self.last_timestamp_us.store(now, Ordering::Release);
// Save new stopper // Save new stopper
*opt_stop_source = Some(stop_source); *opt_stop_source = Some(stop_source);
Ok(()) Ok(true)
} }
// All other conditions should not be reachable // All other conditions should not be reachable
_ => { _ => {