checkpoint

This commit is contained in:
Christien Rioux 2025-09-28 15:53:56 -04:00
parent 9a4118501e
commit f7350e934f
39 changed files with 5290 additions and 2648 deletions

View file

@ -315,20 +315,23 @@ struct SignedValueDescriptor @0xf6ffa63ef36d0f73 {
struct OperationGetValueQ @0x83b34ce1e72afc7f {
key @0 :OpaqueRecordKey; # DHT Key = Hash(ownerKeyKind) of: [ ownerKeyValue, schema ]
subkey @1 :Subkey; # the index of the subkey
wantDescriptor @2 :Bool; # whether or not to include the descriptor for the key
transactionId @1 :UInt64; # transaction id if inside a transaction, 0 otherwise
subkey @2 :Subkey; # the index of the subkey
wantDescriptor @3 :Bool; # whether or not to include the descriptor for the key
}
struct OperationGetValueA @0xf97edb86a914d093 {
value @0 :SignedValueData; # optional: the value if successful, or if unset, no value returned
peers @1 :List(PeerInfo); # returned 'closer peer' information on either success or failure
descriptor @2 :SignedValueDescriptor; # optional: the descriptor if requested if the value is also returned
accepted @0 :Bool; # true if the operation was accepted by the distance metric
transactionValid @1 :Bool; # true if the transaction id requested was valid
value @2 :SignedValueData; # optional: the value if successful, or if unset, no value returned
peers @3 :List(PeerInfo); # returned 'closer peer' information on either success or failure
descriptor @4 :SignedValueDescriptor; # optional: the descriptor if requested if the value is also returned
}
struct OperationSetValueQ @0xb315a71cd3f555b3 {
key @0 :OpaqueRecordKey; # DHT Key = Hash(ownerKeyKind) of: [ ownerKeyValue, schema ]
transactionId @1 :UInt64; # optional: transaction id if inside a transaction
transactionId @1 :UInt64; # transaction id if inside a transaction, 0 otherwise
subkey @2 :Subkey; # the index of the subkey
value @3 :SignedValueData; # value or subvalue contents (older or equal seq number gets dropped)
descriptor @4 :SignedValueDescriptor; # optional: the descriptor if needed
@ -336,19 +339,24 @@ struct OperationSetValueQ @0xb315a71cd3f555b3 {
struct OperationSetValueA @0xb5ff5b18c0d7b918 {
accepted @0 :Bool; # true if the operation was accepted by the distance metric
needsDescriptor @1 :Bool; # true if the descriptor was not sent but it was needed
value @2 :SignedValueData; # optional: the current value at the key if the set seq number was lower or equal to what was there before
peers @3 :List(PeerInfo); # returned 'closer peer' information on either success or failure
transactionValid @1 :Bool; # true if the transaction id requested was valid
needsDescriptor @2 :Bool; # true if the descriptor was not sent but it was needed
value @3 :SignedValueData; # optional: the current value at the key if the set seq number was lower or equal to what was there before
peers @4 :List(PeerInfo); # returned 'closer peer' information on either success or failure
}
struct OperationWatchValueQ @0xddae6e08cea11e84 {
struct WatchValueParams @0xf9e7db439276842d {
key @0 :OpaqueRecordKey; # key for value to watch
subkeys @1 :List(SubkeyRange); # subkey range to watch (up to 512 subranges). An empty range here should not be specified unless cancelling a watch (count=0).
expiration @2 :UInt64; # requested timestamp when this watch will expire in usec since epoch (watch can return less, 0 for max)
count @3 :UInt32; # requested number of changes to watch for (0 = cancel, 1 = single shot, 2+ = counter, UINT32_MAX = continuous)
watchId @4 :UInt64; # if 0, request a new watch. if >0, existing watch id
watcher @5 :PublicKey; # the watcher performing the watch, can be the owner or a schema member, or a generated anonymous watch keypair
signature @6 :Signature; # signature of the watcher, signature covers: key, subkeys, expiration, count, watchId
}
struct OperationWatchValueQ @0xddae6e08cea11e84 {
watchValueParamsData @0 :Data; # the parameters in serialized capnp format
watcher @1 :PublicKey; # the watcher performing the watch, can be the owner or a schema member, or a generated anonymous watch keypair
signature @2 :Signature; # signature of the watcher, signature covers the watchValueParamsData blob
}
struct OperationWatchValueA @0xaeed4433b1c35108 {
@ -360,14 +368,17 @@ struct OperationWatchValueA @0xaeed4433b1c35108 {
struct OperationInspectValueQ @0xe4d014b5a2f6ffaf {
key @0 :OpaqueRecordKey; # DHT Key = Hash(ownerKeyKind) of: [ ownerKeyValue, schema ]
subkeys @1 :List(SubkeyRange); # subkey range to inspect (up to 512 total subkeys), if empty this implies 0..=511
wantDescriptor @2 :Bool; # whether or not to include the descriptor for the key
transactionId @1 :UInt64; # transaction id if inside a transaction, 0 otherwise
subkeys @2 :List(SubkeyRange); # subkey range to inspect (up to 1024 total subkeys), if empty this implies 0..=511
wantDescriptor @3 :Bool; # whether or not to include the descriptor for the key
}
struct OperationInspectValueA @0x8540edb633391b2a {
seqs @0 :List(ValueSeqNum); # the list of subkey value sequence numbers in ascending order for each subkey in the requested range. if a subkey has not been written to, it is given a value of UINT32_MAX. these are not signed, and may be immediately out of date, and must be verified by a GetValueQ request.
peers @1 :List(PeerInfo); # returned 'closer peer' information on either success or failure
descriptor @2 :SignedValueDescriptor; # optional: the descriptor if requested if the value is also returned
accepted @0 :Bool; # true if the operation was accepted by the distance metric
transactionValid @1 :Bool; # true if the transaction id requested was valid
seqs @2 :List(ValueSeqNum); # the list of subkey value sequence numbers in ascending order for each subkey in the requested range. if a subkey has not been written to, it is given a value of UINT32_MAX. these are not signed, and may be immediately out of date, and must be verified by a GetValueQ request.
peers @3 :List(PeerInfo); # returned 'closer peer' information on either success or failure
descriptor @4 :SignedValueDescriptor; # optional: the descriptor if requested if the value is also returned
}
struct OperationValueChanged @0xbf9d00e88fd96623 {
@ -386,26 +397,30 @@ enum TransactCommand @0xa841a757a9a7f946 {
rollback @3; # roll back all operations (called at any time after start)
}
struct OperationTransactValueQ @0xf8629eff87ac729d {
struct TransactValueParams @0xc349f8768e059533 {
key @0 :OpaqueRecordKey; # key for record to transact on
command @1 :TransactCommand; # transaction command to execute
transactionId @2 :UInt64; # transaction id for all commands
transactionId @1 :UInt64; # 0 for begin, transaction id for all other commands
command @2 :TransactCommand; # transaction command to execute
descriptor @3 :SignedValueDescriptor; # optional: the descriptor if needed
writer @4 :PublicKey; # the writer performing the transaction, can be the owner or a schema member
signature @5 :Signature; # signature of the writer, signature covers: key, writer, signature, count, watchId
}
struct OperationTransactValueQ @0xf8629eff87ac729d {
transactValueParamsData @0 :Data; # the parameters in serialized capnp format
writer @1 :PublicKey; # the writer performing the transaction, can be the owner or a schema member
signature @2 :Signature; # signature of the writer, signature covers the transactValueParamsData blob
}
struct OperationTransactValueA @0xd2b5a46f55268aa4 {
accepted @0 :Bool; # true if the operation was accepted by the distance metric
needsDescriptor @1 :Bool; # true if the descriptor was not sent but it was needed
transactionId @2 :UInt64; # transaction id if successful, 0 if operation failed
transactionId @2 :UInt64; # transaction id if successful, 0 if operation failed or transaction id was invalid
seqs @3 :List(ValueSeqNum); # optional: on begin command: the list of subkey value sequence numbers in ascending order for each subkey
peers @4 :List(PeerInfo); # optional: on begin command: returned 'closer peer' information on either success or failure
}
struct OperationSyncValueQ @0xee28e7a72302fef7 {
key @0 :OpaqueRecordKey; # key for record to sync
transactionId @1 :UInt64; # transaction id if inside a transaction
transactionId @1 :UInt64; # transaction id if inside a transaction, 0 otherwise
seqs @2 :List(ValueSeqNum); # the list of subkey value sequence numbers in ascending order for each subkey
subkey @3 :Subkey; # optional: the index of the subkey
value @4 :SignedValueData; # optional: value or subvalue contents (older or equal seq number gets dropped)
@ -414,10 +429,11 @@ struct OperationSyncValueQ @0xee28e7a72302fef7 {
struct OperationSyncValueA @0xe42e5bb79e2f9009 {
accepted @0 :Bool; # true if the sync was close enough to be accepted
needsDescriptor @1 :Bool; # true if the descriptor was not sent but it was needed
seqs @2 :List(ValueSeqNum); # the list of subkey value sequence numbers in ascending order for each subkey
subkey @3 :Subkey; # optional: the index of the subkey
value @4 :SignedValueData; # optional: value or subvalue contents (older or equal seq number gets dropped)
transactionValid @1 :Bool; # true if the transaction id requested was valid
needsDescriptor @2 :Bool; # true if the descriptor was not sent but it was needed
seqs @3 :List(ValueSeqNum); # the list of subkey value sequence numbers in ascending order for each subkey
subkey @4 :Subkey; # optional: the index of the subkey
value @5 :SignedValueData; # optional: value or subvalue contents (older or equal seq number gets dropped)
}
struct OperationSupplyBlockQ @0xe0d00fd8091dd2e0 {
@ -533,17 +549,19 @@ struct Question @0xcb35ddc42056db29 {
setValueQ @6 :OperationSetValueQ;
watchValueQ @7 :OperationWatchValueQ;
inspectValueQ @8 :OperationInspectValueQ;
transactValueQ @9 :OperationTransactValueQ;
syncValueQ @10 :OperationSyncValueQ;
# Blockstore operations
# #[cfg(feature="unstable-blockstore")]
# supplyBlockQ @9 :OperationSupplyBlockQ;
# findBlockQ @10 :OperationFindBlockQ;
# supplyBlockQ @11 :OperationSupplyBlockQ;
# findBlockQ @12 :OperationFindBlockQ;
# Tunnel operations
# #[cfg(feature="unstable-tunnels")]
# startTunnelQ @11 :OperationStartTunnelQ;
# completeTunnelQ @12 :OperationCompleteTunnelQ;
# cancelTunnelQ @13 :OperationCancelTunnelQ;
# startTunnelQ @13 :OperationStartTunnelQ;
# completeTunnelQ @14 :OperationCompleteTunnelQ;
# cancelTunnelQ @15 :OperationCancelTunnelQ;
}
}
@ -575,17 +593,19 @@ struct Answer @0x8edae77299061a3b {
setValueA @4 :OperationSetValueA;
watchValueA @5 :OperationWatchValueA;
inspectValueA @6 :OperationInspectValueA;
transactValueA @7 :OperationTransactValueA;
syncValueA @8 :OperationSyncValueA;
# Blockstore operations
# #[cfg(feature="unstable-blockstore")]
# supplyBlockA @7 :OperationSupplyBlockA;
# findBlockA @8 :OperationFindBlockA;
# supplyBlockA @9 :OperationSupplyBlockA;
# findBlockA @10 :OperationFindBlockA;
# Tunnel operations
# #[cfg(feature="unstable-tunnels")]
# startTunnelA @9 :OperationStartTunnelA;
# completeTunnelA @10 :OperationCompleteTunnelA;
# cancelTunnelA @11 :OperationCancelTunnelA;
# startTunnelA @11 :OperationStartTunnelA;
# completeTunnelA @12 :OperationCompleteTunnelA;
# cancelTunnelA @13 :OperationCancelTunnelA;
}
}

File diff suppressed because it is too large Load diff

View file

@ -1184,7 +1184,7 @@ impl RouteSpecStore {
let mut pr_message = ::capnp::message::Builder::new_default();
let mut pr_builder = pr_message.init_root::<veilid_capnp::private_route::Builder>();
encode_private_route(&private_route, &mut pr_builder)?;
let mut blob_data = message_builder_to_vec(pr_message)?;
let mut blob_data = canonical_message_builder_to_vec_packed(pr_message)?;
// append the private route tag so we know how to decode it later
blob_data.push(1u8);
@ -1261,7 +1261,7 @@ impl RouteSpecStore {
let mut rh_message = ::capnp::message::Builder::new_default();
let mut rh_builder = rh_message.init_root::<veilid_capnp::route_hop::Builder>();
encode_route_hop(&route_hop, &mut rh_builder)?;
let mut blob_data = message_builder_to_vec(rh_message)?;
let mut blob_data = canonical_message_builder_to_vec_packed(rh_message)?;
// Append the route hop tag so we know how to decode it later
blob_data.push(0u8);
@ -1503,7 +1503,7 @@ impl RouteSpecStore {
let mut rh_message = ::capnp::message::Builder::new_default();
let mut rh_builder = rh_message.init_root::<veilid_capnp::route_hop::Builder>();
encode_route_hop(&route_hop, &mut rh_builder)?;
message_builder_to_vec(rh_message)?
canonical_message_builder_to_vec_packed(rh_message)?
};
let dh_secret = vcrypto.cached_dh(&hop_public_key, &rsd.secret_key)?;
@ -1840,8 +1840,8 @@ impl RouteSpecStore {
encode_private_route(private_route, &mut pr_builder)
.map_err(VeilidAPIError::internal)?;
capnp::serialize_packed::write_message(&mut buffer, &pr_message)
.map_err(RPCError::internal)?;
canonical_message_builder_to_write_packed(&mut buffer, pr_message)
.map_err(VeilidAPIError::internal)?;
}
Ok(buffer)
}

View file

@ -78,7 +78,7 @@ impl PeerInfo {
let mut node_info_builder =
node_info_message_builder.init_root::<veilid_capnp::node_info::Builder>();
encode_node_info(&node_info, &mut node_info_builder)?;
let node_info_message = message_builder_to_vec(node_info_message_builder)?;
let node_info_message = canonical_message_builder_to_vec_packed(node_info_message_builder)?;
// Sign the message
let crypto = routing_table.crypto();
@ -205,7 +205,7 @@ impl PeerInfo {
let mut node_info_builder =
node_info_message_builder.init_root::<veilid_capnp::node_info::Builder>();
encode_node_info(&node_info, &mut node_info_builder)?;
let node_info_message = message_builder_to_vec(node_info_message_builder)?;
let node_info_message = canonical_message_builder_to_vec_packed(node_info_message_builder)?;
// Extract node ids for convenience
let mut node_ids = NodeIdGroup::new();

View file

@ -47,6 +47,7 @@ pub use socket_address::*;
pub use tunnel::*;
use super::*;
use capnp::message::ReaderSegments;
impl_veilid_log_facility!("rpc");
@ -56,6 +57,8 @@ pub enum QuestionContext {
GetValue(ValidateGetValueContext),
SetValue(ValidateSetValueContext),
InspectValue(ValidateInspectValueContext),
TransactValue(ValidateTransactValueContext),
SyncValue(ValidateSyncValueContext),
}
#[derive(Clone)]
@ -72,13 +75,88 @@ pub struct RPCDecodeContext {
}
#[instrument(level = "trace", target = "rpc", skip_all, err)]
pub fn message_builder_to_vec<'a, T>(
pub fn canonical_message_builder_to_vec_packed<'a, T>(
builder: capnp::message::Builder<T>,
) -> Result<Vec<u8>, RPCError>
where
T: capnp::message::Allocator + 'a,
{
let mut buffer = vec![];
capnp::serialize_packed::write_message(&mut buffer, &builder).map_err(RPCError::protocol)?;
// Canonicalize builder
let buffer = if builder.len() != 1 {
let root = builder
.get_root_as_reader::<capnp::any_pointer::Reader>()
.map_err(RPCError::protocol)?;
let size = root.target_size()?.word_count + 1;
let mut canonical_builder = capnp::message::Builder::new(
capnp::message::HeapAllocator::new().first_segment_words(size as u32),
);
canonical_builder.set_root_canonical(root)?;
let mut buffer = Vec::<u8>::with_capacity(canonical_builder.size_in_words());
capnp::serialize_packed::write_message(&mut buffer, &canonical_builder)
.map_err(RPCError::protocol)?;
buffer
} else {
let mut buffer = Vec::<u8>::with_capacity(builder.size_in_words());
capnp::serialize_packed::write_message(&mut buffer, &builder)
.map_err(RPCError::protocol)?;
buffer
};
Ok(buffer)
}
#[instrument(level = "trace", target = "rpc", skip_all, err)]
pub fn canonical_message_builder_to_write_packed<'a, T, W>(
write: W,
builder: capnp::message::Builder<T>,
) -> Result<(), RPCError>
where
T: capnp::message::Allocator + 'a,
W: capnp::io::Write,
{
// Canonicalize builder
if builder.len() != 1 {
let root = builder
.get_root_as_reader::<capnp::any_pointer::Reader>()
.map_err(RPCError::protocol)?;
let size = root.target_size()?.word_count + 1;
let mut canonical_builder = capnp::message::Builder::new(
capnp::message::HeapAllocator::new().first_segment_words(size as u32),
);
canonical_builder.set_root_canonical(root)?;
capnp::serialize_packed::write_message(write, &canonical_builder)
.map_err(RPCError::protocol)?;
} else {
capnp::serialize_packed::write_message(write, &builder).map_err(RPCError::protocol)?;
};
Ok(())
}
#[instrument(level = "trace", target = "rpc", skip_all, err)]
pub fn canonical_message_builder_to_vec_unpacked<'a, T>(
builder: capnp::message::Builder<T>,
) -> Result<Vec<u8>, RPCError>
where
T: capnp::message::Allocator + 'a,
{
// Canonicalize builder
if builder.len() != 1 {
let root = builder
.get_root_as_reader::<capnp::any_pointer::Reader>()
.map_err(RPCError::protocol)?;
let size = root.target_size()?.word_count + 1;
let mut canonical_builder = capnp::message::Builder::new(
capnp::message::HeapAllocator::new().first_segment_words(size as u32),
);
canonical_builder.set_root_canonical(root)?;
Ok(capnp::serialize::write_message_to_words(&canonical_builder))
} else {
Ok(capnp::serialize::write_message_to_words(&builder))
}
}

View file

@ -41,6 +41,8 @@ pub(in crate::rpc_processor) enum RPCAnswerDetail {
SetValueA(Box<RPCOperationSetValueA>),
WatchValueA(Box<RPCOperationWatchValueA>),
InspectValueA(Box<RPCOperationInspectValueA>),
TransactValueA(Box<RPCOperationTransactValueA>),
SyncValueA(Box<RPCOperationSyncValueA>),
#[cfg(feature = "unstable-blockstore")]
SupplyBlockA(Box<RPCOperationSupplyBlockA>),
#[cfg(feature = "unstable-blockstore")]
@ -63,6 +65,8 @@ impl RPCAnswerDetail {
RPCAnswerDetail::SetValueA(_) => "SetValueA",
RPCAnswerDetail::WatchValueA(_) => "WatchValueA",
RPCAnswerDetail::InspectValueA(_) => "InspectValueA",
RPCAnswerDetail::TransactValueA(_) => "TransactValueA",
RPCAnswerDetail::SyncValueA(_) => "SyncValueA",
#[cfg(feature = "unstable-blockstore")]
RPCAnswerDetail::SupplyBlockA(_) => "SupplyBlockA",
#[cfg(feature = "unstable-blockstore")]
@ -84,6 +88,8 @@ impl RPCAnswerDetail {
RPCAnswerDetail::SetValueA(r) => r.validate(validate_context),
RPCAnswerDetail::WatchValueA(r) => r.validate(validate_context),
RPCAnswerDetail::InspectValueA(r) => r.validate(validate_context),
RPCAnswerDetail::TransactValueA(r) => r.validate(validate_context),
RPCAnswerDetail::SyncValueA(r) => r.validate(validate_context),
#[cfg(feature = "unstable-blockstore")]
RPCAnswerDetail::SupplyBlockA(r) => r.validate(validate_context),
#[cfg(feature = "unstable-blockstore")]
@ -137,6 +143,16 @@ impl RPCAnswerDetail {
let out = RPCOperationInspectValueA::decode(decode_context, &op_reader)?;
RPCAnswerDetail::InspectValueA(Box::new(out))
}
veilid_capnp::answer::detail::TransactValueA(r) => {
let op_reader = r?;
let out = RPCOperationTransactValueA::decode(decode_context, &op_reader)?;
RPCAnswerDetail::TransactValueA(Box::new(out))
}
veilid_capnp::answer::detail::SyncValueA(r) => {
let op_reader = r?;
let out = RPCOperationSyncValueA::decode(decode_context, &op_reader)?;
RPCAnswerDetail::SyncValueA(Box::new(out))
}
#[cfg(feature = "unstable-blockstore")]
veilid_capnp::answer::detail::SupplyBlockA(r) => {
let op_reader = r?;
@ -186,6 +202,10 @@ impl RPCAnswerDetail {
RPCAnswerDetail::InspectValueA(d) => {
d.encode(&mut builder.reborrow().init_inspect_value_a())
}
RPCAnswerDetail::TransactValueA(d) => {
d.encode(&mut builder.reborrow().init_transact_value_a())
}
RPCAnswerDetail::SyncValueA(d) => d.encode(&mut builder.reborrow().init_sync_value_a()),
#[cfg(feature = "unstable-blockstore")]
RPCAnswerDetail::SupplyBlockA(d) => {
d.encode(&mut builder.reborrow().init_supply_block_a())

View file

@ -10,6 +10,8 @@ mod operation_route;
mod operation_set_value;
mod operation_signal;
mod operation_status;
mod operation_sync_value;
mod operation_transact_value;
mod operation_validate_dial_info;
mod operation_value_changed;
@ -31,6 +33,7 @@ mod operation_complete_tunnel;
mod operation_start_tunnel;
pub use operation_inspect_value::MAX_INSPECT_VALUE_A_SEQS_LEN;
pub use operation_transact_value::{TransactValueCommand, MAX_TRANSACT_VALUE_A_SEQS_LEN};
pub(in crate::rpc_processor) use answer::*;
pub(in crate::rpc_processor) use operation::*;
@ -44,6 +47,8 @@ pub(in crate::rpc_processor) use operation_route::*;
pub(in crate::rpc_processor) use operation_set_value::*;
pub(in crate::rpc_processor) use operation_signal::*;
pub(in crate::rpc_processor) use operation_status::*;
pub(in crate::rpc_processor) use operation_sync_value::*;
pub(in crate::rpc_processor) use operation_transact_value::*;
pub(in crate::rpc_processor) use operation_validate_dial_info::*;
pub(in crate::rpc_processor) use operation_value_changed::*;
pub(in crate::rpc_processor) use operation_watch_value::*;

View file

@ -8,39 +8,46 @@ pub(in crate::rpc_processor) struct ValidateGetValueContext {
pub opaque_record_key: OpaqueRecordKey,
pub last_descriptor: Option<SignedValueDescriptor>,
pub subkey: ValueSubkey,
pub crypto_kind: CryptoKind,
}
#[derive(Debug, Clone)]
pub(in crate::rpc_processor) struct RPCOperationGetValueQ {
key: OpaqueRecordKey,
transaction_id: Option<u64>,
subkey: ValueSubkey,
want_descriptor: bool,
}
impl RPCOperationGetValueQ {
pub fn new(key: OpaqueRecordKey, subkey: ValueSubkey, want_descriptor: bool) -> Self {
Self {
pub fn new(
key: OpaqueRecordKey,
transaction_id: Option<u64>,
subkey: ValueSubkey,
want_descriptor: bool,
) -> Result<Self, RPCError> {
// Transaction id should never be zero here as that is the sentinel for None
if transaction_id == Some(0u64) {
return Err(RPCError::protocol("invalid transaction id"));
}
Ok(Self {
key,
transaction_id,
subkey,
want_descriptor,
}
})
}
pub fn validate(&mut self, _validate_context: &RPCValidateContext) -> Result<(), RPCError> {
Ok(())
}
// pub fn key(&self) -> &PublicKey {
// &self.key
// }
// pub fn subkey(&self) -> ValueSubkey {
// self.subkey
// }
// pub fn want_descriptor(&self) -> bool {
// self.want_descriptor
// }
pub fn destructure(self) -> (OpaqueRecordKey, ValueSubkey, bool) {
(self.key, self.subkey, self.want_descriptor)
pub fn destructure(self) -> (OpaqueRecordKey, Option<u64>, ValueSubkey, bool) {
(
self.key,
self.transaction_id,
self.subkey,
self.want_descriptor,
)
}
pub fn decode(
@ -50,10 +57,17 @@ impl RPCOperationGetValueQ {
rpc_ignore_missing_property!(reader, key);
let k_reader = reader.get_key()?;
let key = decode_opaque_record_key(&k_reader)?;
let transaction_id = reader.get_transaction_id();
let transaction_id = if transaction_id == 0 {
None
} else {
Some(transaction_id)
};
let subkey = reader.get_subkey();
let want_descriptor = reader.get_want_descriptor();
Ok(Self {
key,
transaction_id,
subkey,
want_descriptor,
})
@ -64,6 +78,7 @@ impl RPCOperationGetValueQ {
) -> Result<(), RPCError> {
let mut k_builder = builder.reborrow().init_key();
encode_opaque_record_key(&self.key, &mut k_builder);
builder.set_transaction_id(self.transaction_id.unwrap_or(0));
builder.set_subkey(self.subkey);
builder.set_want_descriptor(self.want_descriptor);
Ok(())
@ -74,6 +89,8 @@ impl RPCOperationGetValueQ {
#[derive(Debug, Clone)]
pub(in crate::rpc_processor) struct RPCOperationGetValueA {
accepted: bool,
transaction_valid: bool,
value: Option<SignedValueData>,
peers: Vec<Arc<PeerInfo>>,
descriptor: Option<SignedValueDescriptor>,
@ -81,6 +98,8 @@ pub(in crate::rpc_processor) struct RPCOperationGetValueA {
impl RPCOperationGetValueA {
pub fn new(
accepted: bool,
transaction_valid: bool,
value: Option<SignedValueData>,
peers: Vec<Arc<PeerInfo>>,
descriptor: Option<SignedValueDescriptor>,
@ -91,6 +110,8 @@ impl RPCOperationGetValueA {
));
}
Ok(Self {
accepted,
transaction_valid,
value,
peers,
descriptor,
@ -107,7 +128,7 @@ impl RPCOperationGetValueA {
};
let crypto = validate_context.crypto();
let Some(vcrypto) = crypto.get(get_value_context.crypto_kind) else {
let Some(vcrypto) = crypto.get(get_value_context.opaque_record_key.kind()) else {
return Err(RPCError::protocol("unsupported cryptosystem"));
};
@ -153,29 +174,31 @@ impl RPCOperationGetValueA {
Ok(())
}
// pub fn value(&self) -> Option<&SignedValueData> {
// self.value.as_ref()
// }
// pub fn peers(&self) -> &[PeerInfo] {
// &self.peers
// }
// pub fn descriptor(&self) -> Option<&SignedValueDescriptor> {
// self.descriptor.as_ref()
// }
pub fn destructure(
self,
) -> (
bool,
bool,
Option<SignedValueData>,
Vec<Arc<PeerInfo>>,
Option<SignedValueDescriptor>,
) {
(self.value, self.peers, self.descriptor)
(
self.accepted,
self.transaction_valid,
self.value,
self.peers,
self.descriptor,
)
}
pub fn decode(
decode_context: &RPCDecodeContext,
reader: &veilid_capnp::operation_get_value_a::Reader,
) -> Result<Self, RPCError> {
let accepted = reader.get_accepted();
let transaction_valid = reader.get_transaction_valid();
let value = if reader.has_value() {
let value_reader = reader.get_value()?;
let value = decode_signed_value_data(&value_reader)?;
@ -204,6 +227,8 @@ impl RPCOperationGetValueA {
};
Ok(Self {
accepted,
transaction_valid,
value,
peers,
descriptor,
@ -213,6 +238,9 @@ impl RPCOperationGetValueA {
&self,
builder: &mut veilid_capnp::operation_get_value_a::Builder,
) -> Result<(), RPCError> {
builder.set_accepted(self.accepted);
builder.set_transaction_valid(self.transaction_valid);
if let Some(value) = &self.value {
let mut v_builder = builder.reborrow().init_value();
encode_signed_value_data(value, &mut v_builder)?;

View file

@ -1,8 +1,8 @@
use super::*;
use crate::storage_manager::SignedValueDescriptor;
const MAX_INSPECT_VALUE_Q_SUBKEY_RANGES_LEN: usize = 512;
pub const MAX_INSPECT_VALUE_A_SEQS_LEN: usize = 512;
const MAX_INSPECT_VALUE_Q_SUBKEY_RANGES_LEN: usize = DHTSchema::MAX_SUBKEY_COUNT / 2;
pub const MAX_INSPECT_VALUE_A_SEQS_LEN: usize = DHTSchema::MAX_SUBKEY_COUNT;
const MAX_INSPECT_VALUE_A_PEERS_LEN: usize = 20;
#[derive(Debug, Clone)]
@ -10,12 +10,12 @@ pub(in crate::rpc_processor) struct ValidateInspectValueContext {
pub opaque_record_key: OpaqueRecordKey,
pub last_descriptor: Option<SignedValueDescriptor>,
pub subkeys: ValueSubkeyRangeSet,
pub crypto_kind: CryptoKind,
}
#[derive(Debug, Clone)]
pub(in crate::rpc_processor) struct RPCOperationInspectValueQ {
key: OpaqueRecordKey,
transaction_id: Option<u64>,
subkeys: ValueSubkeyRangeSet,
want_descriptor: bool,
}
@ -23,11 +23,18 @@ pub(in crate::rpc_processor) struct RPCOperationInspectValueQ {
impl RPCOperationInspectValueQ {
pub fn new(
key: OpaqueRecordKey,
transaction_id: Option<u64>,
subkeys: ValueSubkeyRangeSet,
want_descriptor: bool,
) -> Result<Self, RPCError> {
// Transaction id should never be zero here as that is the sentinel for None
if transaction_id == Some(0u64) {
return Err(RPCError::protocol("invalid transaction id"));
}
Ok(Self {
key,
transaction_id,
subkeys,
want_descriptor,
})
@ -36,17 +43,13 @@ impl RPCOperationInspectValueQ {
Ok(())
}
// pub fn key(&self) -> &PublicKey {
// &self.key
// }
// pub fn subkeys(&self) -> &ValueSubkeyRangeSet {
// &self.subkeys
// }
// pub fn want_descriptor(&self) -> bool {
// self.want_descriptor
// }
pub fn destructure(self) -> (OpaqueRecordKey, ValueSubkeyRangeSet, bool) {
(self.key, self.subkeys, self.want_descriptor)
pub fn destructure(self) -> (OpaqueRecordKey, Option<u64>, ValueSubkeyRangeSet, bool) {
(
self.key,
self.transaction_id,
self.subkeys,
self.want_descriptor,
)
}
pub fn decode(
@ -57,6 +60,13 @@ impl RPCOperationInspectValueQ {
let k_reader = reader.get_key()?;
let key = decode_opaque_record_key(&k_reader)?;
let transaction_id = reader.get_transaction_id();
let transaction_id = if transaction_id == 0 {
None
} else {
Some(transaction_id)
};
rpc_ignore_missing_property!(reader, subkeys);
let sk_reader = reader.get_subkeys()?;
// Maximum number of ranges that can hold the maximum number of subkeys is one subkey per range
@ -81,6 +91,7 @@ impl RPCOperationInspectValueQ {
let want_descriptor = reader.get_want_descriptor();
Ok(Self {
key,
transaction_id,
subkeys,
want_descriptor,
})
@ -92,6 +103,8 @@ impl RPCOperationInspectValueQ {
let mut k_builder = builder.reborrow().init_key();
encode_opaque_record_key(&self.key, &mut k_builder);
builder.set_transaction_id(self.transaction_id.unwrap_or(0));
let mut sk_builder = builder.reborrow().init_subkeys(
self.subkeys
.ranges_len()
@ -112,6 +125,8 @@ impl RPCOperationInspectValueQ {
#[derive(Debug, Clone)]
pub(in crate::rpc_processor) struct RPCOperationInspectValueA {
accepted: bool,
transaction_valid: bool,
seqs: Vec<ValueSeqNum>,
peers: Vec<Arc<PeerInfo>>,
descriptor: Option<SignedValueDescriptor>,
@ -119,21 +134,27 @@ pub(in crate::rpc_processor) struct RPCOperationInspectValueA {
impl RPCOperationInspectValueA {
pub fn new(
accepted: bool,
transaction_valid: bool,
seqs: Vec<ValueSeqNum>,
peers: Vec<Arc<PeerInfo>>,
descriptor: Option<SignedValueDescriptor>,
) -> Result<Self, RPCError> {
// Validate length of seqs
if seqs.len() > MAX_INSPECT_VALUE_A_SEQS_LEN {
return Err(RPCError::protocol(
"encoded InspectValueA seqs length too long",
));
}
// Validate length of peers
if peers.len() > MAX_INSPECT_VALUE_A_PEERS_LEN {
return Err(RPCError::protocol(
"encoded InspectValueA peers length too long",
));
}
Ok(Self {
accepted,
transaction_valid,
seqs,
peers,
descriptor,
@ -150,7 +171,7 @@ impl RPCOperationInspectValueA {
};
let crypto = validate_context.crypto();
let Some(vcrypto) = crypto.get(inspect_value_context.crypto_kind) else {
let Some(vcrypto) = crypto.get(inspect_value_context.opaque_record_key.kind()) else {
return Err(RPCError::protocol("unsupported cryptosystem"));
};
@ -192,29 +213,31 @@ impl RPCOperationInspectValueA {
Ok(())
}
// pub fn seqs(&self) -> &[ValueSeqNum] {
// &self.seqs
// }
// pub fn peers(&self) -> &[PeerInfo] {
// &self.peers
// }
// pub fn descriptor(&self) -> Option<&SignedValueDescriptor> {
// self.descriptor.as_ref()
// }
pub fn destructure(
self,
) -> (
bool,
bool,
Vec<ValueSeqNum>,
Vec<Arc<PeerInfo>>,
Option<SignedValueDescriptor>,
) {
(self.seqs, self.peers, self.descriptor)
(
self.accepted,
self.transaction_valid,
self.seqs,
self.peers,
self.descriptor,
)
}
pub fn decode(
decode_context: &RPCDecodeContext,
reader: &veilid_capnp::operation_inspect_value_a::Reader,
) -> Result<Self, RPCError> {
let accepted = reader.get_accepted();
let transaction_valid = reader.get_transaction_valid();
rpc_ignore_missing_property!(reader, seqs);
let seqs = {
let seqs_reader = reader.get_seqs()?;
@ -245,6 +268,8 @@ impl RPCOperationInspectValueA {
};
Ok(Self {
accepted,
transaction_valid,
seqs,
peers,
descriptor,
@ -254,6 +279,9 @@ impl RPCOperationInspectValueA {
&self,
builder: &mut veilid_capnp::operation_inspect_value_a::Builder,
) -> Result<(), RPCError> {
builder.set_accepted(self.accepted);
builder.set_transaction_valid(self.transaction_valid);
let mut seqs_builder = builder.reborrow().init_seqs(
self.seqs
.len()

View file

@ -8,12 +8,12 @@ pub(in crate::rpc_processor) struct ValidateSetValueContext {
pub opaque_record_key: OpaqueRecordKey,
pub descriptor: SignedValueDescriptor,
pub subkey: ValueSubkey,
pub crypto_kind: CryptoKind,
}
#[derive(Debug, Clone)]
pub(in crate::rpc_processor) struct RPCOperationSetValueQ {
key: OpaqueRecordKey,
transaction_id: Option<u64>,
subkey: ValueSubkey,
value: SignedValueData,
descriptor: Option<SignedValueDescriptor>,
@ -22,45 +22,45 @@ pub(in crate::rpc_processor) struct RPCOperationSetValueQ {
impl RPCOperationSetValueQ {
pub fn new(
key: OpaqueRecordKey,
transaction_id: Option<u64>,
subkey: ValueSubkey,
value: SignedValueData,
descriptor: Option<SignedValueDescriptor>,
) -> Self {
Self {
) -> Result<Self, RPCError> {
// Transaction id should never be zero here as that is the sentinel for None
if transaction_id == Some(0u64) {
return Err(RPCError::protocol("invalid transaction id"));
}
Ok(Self {
key,
transaction_id,
subkey,
value,
descriptor,
}
})
}
pub fn validate(&mut self, _validate_context: &RPCValidateContext) -> Result<(), RPCError> {
// Validation is performed by StorageManager because descriptor is not always available here
Ok(())
}
// pub fn key(&self) -> &PublicKey {
// &self.key
// }
// pub fn subkey(&self) -> ValueSubkey {
// self.subkey
// }
// pub fn value(&self) -> &SignedValueData {
// &self.value
// }
// pub fn descriptor(&self) -> Option<&SignedValueDescriptor> {
// self.descriptor.as_ref()
// }
pub fn destructure(
self,
) -> (
OpaqueRecordKey,
Option<u64>,
ValueSubkey,
SignedValueData,
Option<SignedValueDescriptor>,
) {
(self.key, self.subkey, self.value, self.descriptor)
(
self.key,
self.transaction_id,
self.subkey,
self.value,
self.descriptor,
)
}
pub fn decode(
@ -71,6 +71,13 @@ impl RPCOperationSetValueQ {
let k_reader = reader.get_key()?;
let key = decode_opaque_record_key(&k_reader)?;
let transaction_id = reader.get_transaction_id();
let transaction_id = if transaction_id == 0 {
Some(transaction_id)
} else {
None
};
let subkey = reader.get_subkey();
rpc_ignore_missing_property!(reader, value);
@ -86,6 +93,7 @@ impl RPCOperationSetValueQ {
};
Ok(Self {
key,
transaction_id,
subkey,
value,
descriptor,
@ -97,6 +105,7 @@ impl RPCOperationSetValueQ {
) -> Result<(), RPCError> {
let mut k_builder = builder.reborrow().init_key();
encode_opaque_record_key(&self.key, &mut k_builder);
builder.set_transaction_id(self.transaction_id.unwrap_or(0u64));
builder.set_subkey(self.subkey);
let mut v_builder = builder.reborrow().init_value();
encode_signed_value_data(&self.value, &mut v_builder)?;
@ -113,6 +122,7 @@ impl RPCOperationSetValueQ {
#[derive(Debug, Clone)]
pub(in crate::rpc_processor) struct RPCOperationSetValueA {
accepted: bool,
transaction_valid: bool,
needs_descriptor: bool,
value: Option<SignedValueData>,
peers: Vec<Arc<PeerInfo>>,
@ -121,6 +131,7 @@ pub(in crate::rpc_processor) struct RPCOperationSetValueA {
impl RPCOperationSetValueA {
pub fn new(
accepted: bool,
transaction_valid: bool,
needs_descriptor: bool,
value: Option<SignedValueData>,
peers: Vec<Arc<PeerInfo>>,
@ -132,6 +143,7 @@ impl RPCOperationSetValueA {
}
Ok(Self {
accepted,
transaction_valid,
needs_descriptor,
value,
peers,
@ -148,7 +160,7 @@ impl RPCOperationSetValueA {
};
let crypto = validate_context.crypto();
let Some(vcrypto) = crypto.get(set_value_context.crypto_kind) else {
let Some(vcrypto) = crypto.get(set_value_context.opaque_record_key.kind()) else {
return Err(RPCError::protocol("unsupported cryptosystem"));
};
@ -175,17 +187,22 @@ impl RPCOperationSetValueA {
Ok(())
}
// pub fn accepted(&self) -> bool {
// self.accepted
// }
// pub fn value(&self) -> Option<&SignedValueData> {
// self.value.as_ref()
// }
// pub fn peers(&self) -> &[PeerInfo] {
// &self.peers
// }
pub fn destructure(self) -> (bool, bool, Option<SignedValueData>, Vec<Arc<PeerInfo>>) {
(self.accepted, self.needs_descriptor, self.value, self.peers)
pub fn destructure(
self,
) -> (
bool,
bool,
bool,
Option<SignedValueData>,
Vec<Arc<PeerInfo>>,
) {
(
self.accepted,
self.transaction_valid,
self.needs_descriptor,
self.value,
self.peers,
)
}
pub fn decode(
@ -194,6 +211,7 @@ impl RPCOperationSetValueA {
) -> Result<Self, RPCError> {
let accepted = reader.get_accepted();
let needs_descriptor = reader.get_needs_descriptor();
let transaction_valid = reader.get_transaction_valid();
let value = if reader.has_value() {
let v_reader = reader.get_value()?;
let value = decode_signed_value_data(&v_reader)?;
@ -213,6 +231,7 @@ impl RPCOperationSetValueA {
Ok(Self {
accepted,
transaction_valid,
needs_descriptor,
value,
peers,
@ -224,6 +243,8 @@ impl RPCOperationSetValueA {
) -> Result<(), RPCError> {
builder.set_accepted(self.accepted);
builder.set_needs_descriptor(self.needs_descriptor);
builder.set_transaction_valid(self.transaction_valid);
if let Some(value) = &self.value {
let mut v_builder = builder.reborrow().init_value();
encode_signed_value_data(value, &mut v_builder)?;

View file

@ -0,0 +1,336 @@
use super::*;
use crate::storage_manager::{SignedValueData, SignedValueDescriptor};
pub const MAX_SYNC_VALUE_Q_SEQS_LEN: usize = DHTSchema::MAX_SUBKEY_COUNT;
pub const MAX_SYNC_VALUE_A_SEQS_LEN: usize = DHTSchema::MAX_SUBKEY_COUNT;
#[derive(Debug, Clone)]
pub(in crate::rpc_processor) struct ValidateSyncValueContext {
pub opaque_record_key: OpaqueRecordKey,
pub descriptor: SignedValueDescriptor,
}
#[derive(Debug, Clone)]
pub(in crate::rpc_processor) struct RPCOperationSyncValueQ {
key: OpaqueRecordKey,
transaction_id: Option<u64>,
seqs: Vec<ValueSeqNum>,
subkey: Option<ValueSubkey>,
value: Option<SignedValueData>,
descriptor: Option<SignedValueDescriptor>,
}
impl RPCOperationSyncValueQ {
pub fn new(
key: OpaqueRecordKey,
transaction_id: Option<u64>,
seqs: Vec<ValueSeqNum>,
subkey: Option<ValueSubkey>,
value: Option<SignedValueData>,
descriptor: Option<SignedValueDescriptor>,
) -> Result<Self, RPCError> {
// Transaction id should never be zero here as that is the sentinel for None
if transaction_id == Some(0u64) {
return Err(RPCError::protocol("invalid transaction id"));
}
// Validate length of seqs
if seqs.len() > MAX_SYNC_VALUE_Q_SEQS_LEN {
return Err(RPCError::protocol(
"encoded SyncValueQ seqs length too long",
));
}
// subkey should never be 0xFFFFFFFF here as that is the sentinel for None
if subkey == Some(ValueSubkey::MAX) {
return Err(RPCError::protocol("invalid subkey number"));
}
Ok(Self {
key,
transaction_id,
seqs,
subkey,
value,
descriptor,
})
}
pub fn validate(&mut self, _validate_context: &RPCValidateContext) -> Result<(), RPCError> {
// Validation is performed by StorageManager because descriptor is not always available here
Ok(())
}
pub fn destructure(
self,
) -> (
OpaqueRecordKey,
Option<u64>,
Vec<ValueSeqNum>,
Option<ValueSubkey>,
Option<SignedValueData>,
Option<SignedValueDescriptor>,
) {
(
self.key,
self.transaction_id,
self.seqs,
self.subkey,
self.value,
self.descriptor,
)
}
pub fn decode(
_decode_context: &RPCDecodeContext,
reader: &veilid_capnp::operation_sync_value_q::Reader,
) -> Result<Self, RPCError> {
rpc_ignore_missing_property!(reader, key);
let k_reader = reader.get_key()?;
let key = decode_opaque_record_key(&k_reader)?;
let transaction_id = reader.get_transaction_id();
let transaction_id = if transaction_id == 0 {
Some(transaction_id)
} else {
None
};
rpc_ignore_missing_property!(reader, seqs);
let seqs = {
let seqs_reader = reader.get_seqs()?;
rpc_ignore_max_len!(seqs_reader, MAX_SYNC_VALUE_Q_SEQS_LEN);
let Some(seqs) = seqs_reader.as_slice().map(|s| s.to_vec()) else {
return Err(RPCError::protocol("invalid decoded SyncValueQ seqs"));
};
seqs
};
let subkey = reader.get_subkey();
let subkey = if subkey == ValueSubkey::MAX {
None
} else {
Some(subkey)
};
let value = if reader.has_value() {
let v_reader = reader.get_value()?;
Some(decode_signed_value_data(&v_reader)?)
} else {
None
};
let descriptor = if reader.has_descriptor() {
let d_reader = reader.get_descriptor()?;
Some(decode_signed_value_descriptor(&d_reader)?)
} else {
None
};
Ok(Self {
key,
transaction_id,
seqs,
subkey,
value,
descriptor,
})
}
pub fn encode(
&self,
builder: &mut veilid_capnp::operation_sync_value_q::Builder,
) -> Result<(), RPCError> {
let mut k_builder = builder.reborrow().init_key();
encode_opaque_record_key(&self.key, &mut k_builder);
builder.set_transaction_id(self.transaction_id.unwrap_or(0u64));
let mut seqs_builder = builder.reborrow().init_seqs(
self.seqs
.len()
.try_into()
.map_err(RPCError::map_internal("invalid seqs list length"))?,
);
for (i, seq) in self.seqs.iter().enumerate() {
seqs_builder.set(i as u32, *seq);
}
builder.set_subkey(self.subkey.unwrap_or(ValueSubkey::MAX));
if let Some(value) = &self.value {
let mut v_builder = builder.reborrow().init_value();
encode_signed_value_data(value, &mut v_builder)?;
}
if let Some(descriptor) = &self.descriptor {
let mut d_builder = builder.reborrow().init_descriptor();
encode_signed_value_descriptor(descriptor, &mut d_builder)?;
}
Ok(())
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
#[derive(Debug, Clone)]
pub(in crate::rpc_processor) struct RPCOperationSyncValueA {
accepted: bool,
transaction_valid: bool,
needs_descriptor: bool,
seqs: Vec<ValueSeqNum>,
subkey: Option<ValueSubkey>,
value: Option<SignedValueData>,
}
impl RPCOperationSyncValueA {
pub fn new(
accepted: bool,
transaction_valid: bool,
needs_descriptor: bool,
seqs: Vec<ValueSeqNum>,
subkey: Option<ValueSubkey>,
value: Option<SignedValueData>,
) -> Result<Self, RPCError> {
// Validate length of seqs
if seqs.len() > MAX_SYNC_VALUE_A_SEQS_LEN {
return Err(RPCError::protocol(
"encoded SyncValueA seqs length too long",
));
}
// Subkey should never be 0xFFFFFFFF here as that is the sentinel for None
if subkey == Some(ValueSubkey::MAX) {
return Err(RPCError::protocol("invalid subkey number"));
}
Ok(Self {
accepted,
transaction_valid,
needs_descriptor,
seqs,
subkey,
value,
})
}
pub fn validate(&mut self, validate_context: &RPCValidateContext) -> Result<(), RPCError> {
let question_context = validate_context
.question_context
.as_ref()
.expect("SyncValueA requires question context");
let QuestionContext::SyncValue(sync_value_context) = question_context else {
panic!("Wrong context type for SetValueA");
};
let crypto = validate_context.crypto();
let Some(vcrypto) = crypto.get(sync_value_context.opaque_record_key.kind()) else {
return Err(RPCError::protocol("unsupported cryptosystem"));
};
// Ensure the descriptor itself validates
sync_value_context
.descriptor
.validate(&vcrypto, &sync_value_context.opaque_record_key)
.map_err(RPCError::protocol)?;
if let Some(value) = &self.value {
let Some(subkey) = self.subkey else {
return Err(RPCError::protocol("subkey not specified for synced value"));
};
// And the signed value data
if !value
.validate(sync_value_context.descriptor.ref_owner(), subkey, &vcrypto)
.map_err(RPCError::protocol)?
{
return Err(RPCError::protocol("signed value data did not validate"));
}
}
Ok(())
}
pub fn destructure(
self,
) -> (
bool,
bool,
bool,
Vec<ValueSeqNum>,
Option<ValueSubkey>,
Option<SignedValueData>,
) {
(
self.accepted,
self.transaction_valid,
self.needs_descriptor,
self.seqs,
self.subkey,
self.value,
)
}
pub fn decode(
_decode_context: &RPCDecodeContext,
reader: &veilid_capnp::operation_sync_value_a::Reader,
) -> Result<Self, RPCError> {
let accepted = reader.get_accepted();
let transaction_valid = reader.get_transaction_valid();
let needs_descriptor = reader.get_needs_descriptor();
rpc_ignore_missing_property!(reader, seqs);
let seqs = {
let seqs_reader = reader.get_seqs()?;
rpc_ignore_max_len!(seqs_reader, MAX_SYNC_VALUE_A_SEQS_LEN);
let Some(seqs) = seqs_reader.as_slice().map(|s| s.to_vec()) else {
return Err(RPCError::protocol("invalid decoded SyncValueA seqs"));
};
seqs
};
let subkey = reader.get_subkey();
let subkey = if subkey == ValueSubkey::MAX {
None
} else {
Some(subkey)
};
let value = if reader.has_value() {
let v_reader = reader.get_value()?;
let value = decode_signed_value_data(&v_reader)?;
Some(value)
} else {
None
};
Ok(Self {
accepted,
transaction_valid,
needs_descriptor,
seqs,
subkey,
value,
})
}
pub fn encode(
&self,
builder: &mut veilid_capnp::operation_sync_value_a::Builder,
) -> Result<(), RPCError> {
builder.set_accepted(self.accepted);
builder.set_needs_descriptor(self.needs_descriptor);
builder.set_transaction_valid(self.transaction_valid);
let mut seqs_builder = builder.reborrow().init_seqs(
self.seqs
.len()
.try_into()
.map_err(RPCError::map_internal("invalid seqs list length"))?,
);
for (i, seq) in self.seqs.iter().enumerate() {
seqs_builder.set(i as u32, *seq);
}
builder.set_subkey(self.subkey.unwrap_or(ValueSubkey::MAX));
if let Some(value) = &self.value {
let mut v_builder = builder.reborrow().init_value();
encode_signed_value_data(value, &mut v_builder)?;
}
Ok(())
}
}

View file

@ -0,0 +1,398 @@
use super::*;
use crate::storage_manager::SignedValueDescriptor;
pub const MAX_TRANSACT_VALUE_A_SEQS_LEN: usize = DHTSchema::MAX_SUBKEY_COUNT;
const MAX_TRANSACT_VALUE_A_PEERS_LEN: usize = 20;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum TransactValueCommand {
Begin,
End,
Commit,
Rollback,
}
#[derive(Debug, Clone)]
struct TransactValueParams {
key: OpaqueRecordKey,
command: TransactValueCommand,
transaction_id: Option<u64>,
descriptor: Option<SignedValueDescriptor>,
}
fn decode_transact_value_params(
reader: &veilid_capnp::transact_value_params::Reader,
) -> Result<TransactValueParams, RPCError> {
rpc_ignore_missing_property!(reader, key);
let k_reader = reader.get_key()?;
let key = decode_opaque_record_key(&k_reader)?;
let cmd_reader = reader.get_command()?;
let command = match cmd_reader {
veilid_capnp::TransactCommand::Begin => TransactValueCommand::Begin,
veilid_capnp::TransactCommand::End => TransactValueCommand::End,
veilid_capnp::TransactCommand::Commit => TransactValueCommand::Commit,
veilid_capnp::TransactCommand::Rollback => TransactValueCommand::Rollback,
};
let transaction_id = if reader.get_transaction_id() != 0 {
Some(reader.get_transaction_id())
} else {
None
};
// Transaction id should be present for everything except begin
if matches!(command, TransactValueCommand::Begin) {
if transaction_id.is_some() {
return Err(RPCError::protocol("begin should not have transaction id"));
}
} else {
if transaction_id.is_none() {
return Err(RPCError::protocol(
"everything except begin should have transaction id",
));
}
}
let descriptor = if reader.has_descriptor() {
None
} else {
let svd_reader = reader.get_descriptor()?;
Some(decode_signed_value_descriptor(&svd_reader)?)
};
Ok(TransactValueParams {
key,
command,
transaction_id,
descriptor,
})
}
fn encode_transact_value_params(
transact_value_params: &TransactValueParams,
builder: &mut veilid_capnp::transact_value_params::Builder,
) -> Result<(), RPCError> {
let mut k_builder = builder.reborrow().init_key();
encode_opaque_record_key(&transact_value_params.key, &mut k_builder);
builder
.reborrow()
.set_command(match transact_value_params.command {
TransactValueCommand::Begin => veilid_capnp::TransactCommand::Begin,
TransactValueCommand::End => veilid_capnp::TransactCommand::End,
TransactValueCommand::Commit => veilid_capnp::TransactCommand::Commit,
TransactValueCommand::Rollback => veilid_capnp::TransactCommand::Rollback,
});
builder.set_transaction_id(transact_value_params.transaction_id.unwrap_or(0u64));
if let Some(descriptor) = &transact_value_params.descriptor {
let mut d_builder = builder.reborrow().init_descriptor();
encode_signed_value_descriptor(descriptor, &mut d_builder);
}
Ok(())
}
#[derive(Debug, Clone)]
pub(in crate::rpc_processor) struct ValidateTransactValueContext {
pub opaque_record_key: OpaqueRecordKey,
pub descriptor: SignedValueDescriptor,
}
#[derive(Debug, Clone)]
pub(in crate::rpc_processor) struct RPCOperationTransactValueQ {
key: OpaqueRecordKey,
command: TransactValueCommand,
transaction_id: Option<u64>,
descriptor: Option<SignedValueDescriptor>,
params_data: Vec<u8>,
writer: PublicKey,
signature: Signature,
}
impl RPCOperationTransactValueQ {
pub fn new(
key: OpaqueRecordKey,
transaction_id: Option<u64>,
command: TransactValueCommand,
descriptor: Option<SignedValueDescriptor>,
writer: KeyPair,
vcrypto: &CryptoSystemGuard<'_>,
) -> Result<Self, RPCError> {
// Transaction id should never be zero here as that is the sentinel for None
if transaction_id == Some(0u64) {
return Err(RPCError::protocol("invalid transaction id"));
}
// Transaction id should be present for everything except begin
if matches!(command, TransactValueCommand::Begin) {
if transaction_id.is_some() {
return Err(RPCError::protocol("begin should not have transaction id"));
}
} else {
if transaction_id.is_none() {
return Err(RPCError::protocol(
"everything except begin should have transaction id",
));
}
}
// Make parameter blob to sign
let transact_value_params = TransactValueParams {
key,
command,
transaction_id,
descriptor,
};
let mut message_builder = ::capnp::message::Builder::new_default();
let mut builder =
message_builder.init_root::<veilid_capnp::transact_value_params::Builder>();
encode_transact_value_params(&transact_value_params, &mut builder)?;
let params_data = canonical_message_builder_to_vec_packed(message_builder)?;
// Sign the blob
let signature = vcrypto
.sign(&writer.key(), &writer.secret(), &params_data)
.map_err(RPCError::protocol)?;
Ok(Self {
key: transact_value_params.key,
command: transact_value_params.command,
transaction_id: transact_value_params.transaction_id,
descriptor: transact_value_params.descriptor,
params_data,
writer: writer.key(),
signature,
})
}
pub fn validate(&mut self, validate_context: &RPCValidateContext) -> Result<(), RPCError> {
let crypto = validate_context.crypto();
let Some(vcrypto) = crypto.get(self.writer.kind()) else {
return Err(RPCError::protocol("unsupported cryptosystem"));
};
if !vcrypto
.verify(&self.writer, &self.params_data, &self.signature)
.map_err(RPCError::protocol)?
{
return Err(RPCError::protocol("failed to validate writer signature"));
}
// SignedValueDescriptor validation is performed by StorageManager
Ok(())
}
pub fn destructure(
self,
) -> (
OpaqueRecordKey,
Option<u64>,
TransactValueCommand,
Option<SignedValueDescriptor>,
PublicKey,
) {
(
self.key,
self.transaction_id,
self.command,
self.descriptor,
self.writer,
)
}
pub fn decode(
_decode_context: &RPCDecodeContext,
reader: &veilid_capnp::operation_transact_value_q::Reader,
) -> Result<Self, RPCError> {
rpc_ignore_missing_property!(reader, transact_value_params_data);
let params_data = reader.get_transact_value_params_data()?.to_vec();
let mut params_data_cursor = &mut &params_data[..];
let tmp_reader = capnp::serialize_packed::read_message(
&mut params_data_cursor,
capnp::message::ReaderOptions::new(),
)?;
let params_reader = tmp_reader.get_root::<veilid_capnp::transact_value_params::Reader>()?;
let params = decode_transact_value_params(&params_reader)?;
rpc_ignore_missing_property!(reader, writer);
let w_reader = reader.get_writer()?;
let writer = decode_public_key(&w_reader)?;
rpc_ignore_missing_property!(reader, signature);
let s_reader = reader.get_signature()?;
let signature = decode_signature(&s_reader)?;
Ok(Self {
key: params.key,
transaction_id: params.transaction_id,
command: params.command,
descriptor: params.descriptor,
params_data,
writer,
signature,
})
}
pub fn encode(
&self,
builder: &mut veilid_capnp::operation_transact_value_q::Builder,
) -> Result<(), RPCError> {
builder
.reborrow()
.set_transact_value_params_data(&self.params_data);
let mut w_builder = builder.reborrow().init_writer();
encode_public_key(&self.writer, &mut w_builder);
let mut s_builder = builder.reborrow().init_signature();
encode_signature(&self.signature, &mut s_builder);
Ok(())
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
#[derive(Debug, Clone)]
pub(in crate::rpc_processor) struct RPCOperationTransactValueA {
accepted: bool,
needs_descriptor: bool,
transaction_id: Option<u64>,
seqs: Vec<ValueSeqNum>,
peers: Vec<Arc<PeerInfo>>,
}
impl RPCOperationTransactValueA {
pub fn new(
accepted: bool,
needs_descriptor: bool,
transaction_id: Option<u64>,
seqs: Vec<ValueSeqNum>,
peers: Vec<Arc<PeerInfo>>,
) -> Result<Self, RPCError> {
// Transaction id should never be zero here as that is the sentinel for None
if transaction_id == Some(0u64) {
return Err(RPCError::protocol("invalid transaction id"));
}
if seqs.len() > MAX_TRANSACT_VALUE_A_SEQS_LEN {
return Err(RPCError::protocol(
"encoded InspectValueA seqs length too long",
));
}
if peers.len() > MAX_TRANSACT_VALUE_A_PEERS_LEN {
return Err(RPCError::protocol(
"encoded TransactValueA peers length too long",
));
}
Ok(Self {
accepted,
needs_descriptor,
transaction_id,
seqs,
peers,
})
}
pub fn validate(&mut self, _validate_context: &RPCValidateContext) -> Result<(), RPCError> {
Ok(())
}
pub fn destructure(
self,
) -> (
bool,
bool,
Option<u64>,
Vec<ValueSeqNum>,
Vec<Arc<PeerInfo>>,
) {
(
self.accepted,
self.needs_descriptor,
self.transaction_id,
self.seqs,
self.peers,
)
}
pub fn decode(
decode_context: &RPCDecodeContext,
reader: &veilid_capnp::operation_transact_value_a::Reader,
) -> Result<Self, RPCError> {
let accepted = reader.get_accepted();
let needs_descriptor = reader.get_needs_descriptor();
let transaction_id = reader.get_transaction_id();
let transaction_id = if transaction_id == 0 {
None
} else {
Some(transaction_id)
};
rpc_ignore_missing_property!(reader, seqs);
let seqs = {
let seqs_reader = reader.get_seqs()?;
rpc_ignore_max_len!(seqs_reader, MAX_TRANSACT_VALUE_A_SEQS_LEN);
let Some(seqs) = seqs_reader.as_slice().map(|s| s.to_vec()) else {
return Err(RPCError::protocol("invalid decoded TransactValueA seqs"));
};
seqs
};
let peers_reader = reader.get_peers()?;
let peers_len = rpc_ignore_max_len!(peers_reader, MAX_TRANSACT_VALUE_A_PEERS_LEN);
let mut peers = Vec::<Arc<PeerInfo>>::with_capacity(peers_len);
for p in peers_reader.iter() {
let Some(peer_info) = decode_peer_info(decode_context, &p).ignore_ok()? else {
continue;
};
peers.push(Arc::new(peer_info));
}
Ok(Self {
accepted,
needs_descriptor,
transaction_id,
seqs,
peers,
})
}
pub fn encode(
&self,
builder: &mut veilid_capnp::operation_transact_value_a::Builder,
) -> Result<(), RPCError> {
builder.set_accepted(self.accepted);
builder.set_needs_descriptor(self.needs_descriptor);
builder
.reborrow()
.set_transaction_id(self.transaction_id.unwrap_or(0));
let mut seqs_builder = builder.reborrow().init_seqs(
self.seqs
.len()
.try_into()
.map_err(RPCError::map_internal("invalid seqs list length"))?,
);
for (i, seq) in self.seqs.iter().enumerate() {
seqs_builder.set(i as u32, *seq);
}
let mut peers_builder = builder.reborrow().init_peers(
self.peers
.len()
.try_into()
.map_err(RPCError::map_internal("invalid peers list length"))?,
);
for (i, peer) in self.peers.iter().enumerate() {
let mut pi_builder = peers_builder.reborrow().get(i as u32);
encode_peer_info(peer, &mut pi_builder)?;
}
Ok(())
}
}

View file

@ -3,6 +3,83 @@ use super::*;
const MAX_WATCH_VALUE_Q_SUBKEY_RANGES_LEN: usize = 512;
const MAX_WATCH_VALUE_A_PEERS_LEN: usize = 20;
#[derive(Debug, Clone)]
struct WatchValueParams {
key: OpaqueRecordKey,
subkeys: ValueSubkeyRangeSet,
expiration: u64,
count: u32,
watch_id: Option<u64>,
}
fn decode_watch_value_params(
reader: &veilid_capnp::watch_value_params::Reader,
) -> Result<WatchValueParams, RPCError> {
rpc_ignore_missing_property!(reader, key);
let k_reader = reader.get_key()?;
let key = decode_opaque_record_key(&k_reader)?;
rpc_ignore_missing_property!(reader, subkeys);
let sk_reader = reader.get_subkeys()?;
rpc_ignore_max_len!(sk_reader, MAX_WATCH_VALUE_Q_SUBKEY_RANGES_LEN);
let mut subkeys = ValueSubkeyRangeSet::new();
for skr in sk_reader.iter() {
let vskr = (skr.get_start(), skr.get_end());
if vskr.0 > vskr.1 {
return Err(RPCError::protocol("invalid subkey range"));
}
if let Some(lvskr) = subkeys.last() {
if lvskr >= vskr.0 {
return Err(RPCError::protocol(
"subkey range out of order or not merged",
));
}
}
subkeys.ranges_insert(vskr.0..=vskr.1);
}
let expiration = reader.get_expiration();
let count = reader.get_count();
let watch_id = if reader.get_watch_id() != 0 {
Some(reader.get_watch_id())
} else {
None
};
Ok(WatchValueParams {
key,
subkeys,
expiration,
count,
watch_id,
})
}
fn encode_watch_value_params(
watch_value_params: &WatchValueParams,
builder: &mut veilid_capnp::watch_value_params::Builder,
) -> Result<(), RPCError> {
let mut k_builder = builder.reborrow().init_key();
encode_opaque_record_key(&watch_value_params.key, &mut k_builder);
let mut sk_builder = builder.reborrow().init_subkeys(
watch_value_params
.subkeys
.ranges_len()
.try_into()
.map_err(RPCError::map_internal("invalid subkey range list length"))?,
);
for (i, skr) in watch_value_params.subkeys.ranges().enumerate() {
let mut skr_builder = sk_builder.reborrow().get(i as u32);
skr_builder.set_start(*skr.start());
skr_builder.set_end(*skr.end());
}
builder.set_expiration(watch_value_params.expiration);
builder.set_count(watch_value_params.count);
builder.set_watch_id(watch_value_params.watch_id.unwrap_or(0u64));
Ok(())
}
#[derive(Debug, Clone)]
pub(in crate::rpc_processor) struct RPCOperationWatchValueQ {
key: OpaqueRecordKey,
@ -10,6 +87,7 @@ pub(in crate::rpc_processor) struct RPCOperationWatchValueQ {
expiration: u64,
count: u32,
watch_id: Option<u64>,
params_data: Vec<u8>,
watcher: PublicKey,
signature: Signature,
}
@ -33,73 +111,50 @@ impl RPCOperationWatchValueQ {
return Err(RPCError::protocol("can't cancel zero watch id"));
}
let signature_data = Self::make_signature_data(&key, &subkeys, expiration, count, watch_id);
let signature = vcrypto
.sign(&watcher.key(), &watcher.secret(), &signature_data)
.map_err(RPCError::protocol)?;
Ok(Self {
// Make parameter blob to sign
let watch_value_params = WatchValueParams {
key,
subkeys,
expiration,
count,
watch_id,
};
let mut message_builder = ::capnp::message::Builder::new_default();
let mut builder = message_builder.init_root::<veilid_capnp::watch_value_params::Builder>();
encode_watch_value_params(&watch_value_params, &mut builder)?;
let params_data = canonical_message_builder_to_vec_packed(message_builder)?;
// Sign the blob
let signature = vcrypto
.sign(&watcher.key(), &watcher.secret(), &params_data)
.map_err(RPCError::protocol)?;
Ok(Self {
key: watch_value_params.key,
subkeys: watch_value_params.subkeys,
expiration: watch_value_params.expiration,
count: watch_value_params.count,
watch_id: watch_value_params.watch_id,
params_data,
watcher: watcher.key(),
signature,
})
}
// signature covers: key, subkeys, expiration, count, using watcher key
fn make_signature_data(
key: &OpaqueRecordKey,
subkeys: &ValueSubkeyRangeSet,
expiration: u64,
count: u32,
watch_id: Option<u64>,
) -> Vec<u8> {
let subkeys_ranges_len = subkeys.ranges_len();
let mut sig_data =
Vec::with_capacity(key.ref_value().len() + 4 + (subkeys_ranges_len * 8) + 8 + 8);
sig_data.extend_from_slice(key.kind().bytes());
sig_data.extend_from_slice(key.ref_value());
for sk in subkeys.ranges() {
sig_data.extend_from_slice(&sk.start().to_le_bytes());
sig_data.extend_from_slice(&sk.end().to_le_bytes());
}
sig_data.extend_from_slice(&expiration.to_le_bytes());
sig_data.extend_from_slice(&count.to_le_bytes());
if let Some(watch_id) = watch_id {
sig_data.extend_from_slice(&watch_id.to_le_bytes());
}
sig_data
}
pub fn validate(&mut self, validate_context: &RPCValidateContext) -> Result<(), RPCError> {
let crypto = validate_context.crypto();
let Some(vcrypto) = crypto.get(self.watcher.kind()) else {
return Err(RPCError::protocol("unsupported cryptosystem"));
};
let sig_data = Self::make_signature_data(
&self.key,
&self.subkeys,
self.expiration,
self.count,
self.watch_id,
);
if !vcrypto
.verify(&self.watcher, &sig_data, &self.signature)
.verify(&self.watcher, &self.params_data, &self.signature)
.map_err(RPCError::protocol)?
{
return Err(RPCError::protocol("failed to validate watcher signature"));
}
// Count is zero means cancelling, so there should always be a watch id in this case
if self.count == 0 && self.watch_id.is_none() {
return Err(RPCError::protocol("can't cancel zero watch id"));
}
Ok(())
}
@ -162,37 +217,21 @@ impl RPCOperationWatchValueQ {
_decode_context: &RPCDecodeContext,
reader: &veilid_capnp::operation_watch_value_q::Reader,
) -> Result<Self, RPCError> {
rpc_ignore_missing_property!(reader, key);
let k_reader = reader.get_key()?;
let key = decode_opaque_record_key(&k_reader)?;
rpc_ignore_missing_property!(reader, watch_value_params_data);
let params_data = reader.get_watch_value_params_data()?.to_vec();
let mut params_data_cursor = &mut &params_data[..];
let tmp_reader = capnp::serialize_packed::read_message(
&mut params_data_cursor,
capnp::message::ReaderOptions::new(),
)?;
let params_reader = tmp_reader.get_root::<veilid_capnp::watch_value_params::Reader>()?;
let params = decode_watch_value_params(&params_reader)?;
rpc_ignore_missing_property!(reader, subkeys);
let sk_reader = reader.get_subkeys()?;
rpc_ignore_max_len!(sk_reader, MAX_WATCH_VALUE_Q_SUBKEY_RANGES_LEN);
let mut subkeys = ValueSubkeyRangeSet::new();
for skr in sk_reader.iter() {
let vskr = (skr.get_start(), skr.get_end());
if vskr.0 > vskr.1 {
return Err(RPCError::protocol("invalid subkey range"));
}
if let Some(lvskr) = subkeys.last() {
if lvskr >= vskr.0 {
return Err(RPCError::protocol(
"subkey range out of order or not merged",
));
}
}
subkeys.ranges_insert(vskr.0..=vskr.1);
// Count is zero means cancelling, so there should always be a watch id in this case
if params.count == 0 && params.watch_id.is_none() {
return Err(RPCError::protocol("can't cancel zero watch id"));
}
let expiration = reader.get_expiration();
let count = reader.get_count();
let watch_id = if reader.get_watch_id() != 0 {
Some(reader.get_watch_id())
} else {
None
};
rpc_ignore_missing_property!(reader, watcher);
let w_reader = reader.get_watcher()?;
let watcher = decode_public_key(&w_reader)?;
@ -202,11 +241,12 @@ impl RPCOperationWatchValueQ {
let signature = decode_signature(&s_reader)?;
Ok(Self {
key,
subkeys,
expiration,
count,
watch_id,
key: params.key,
subkeys: params.subkeys,
expiration: params.expiration,
count: params.count,
watch_id: params.watch_id,
params_data,
watcher,
signature,
})
@ -216,23 +256,9 @@ impl RPCOperationWatchValueQ {
&self,
builder: &mut veilid_capnp::operation_watch_value_q::Builder,
) -> Result<(), RPCError> {
let mut k_builder = builder.reborrow().init_key();
encode_opaque_record_key(&self.key, &mut k_builder);
let mut sk_builder = builder.reborrow().init_subkeys(
self.subkeys
.ranges_len()
.try_into()
.map_err(RPCError::map_internal("invalid subkey range list length"))?,
);
for (i, skr) in self.subkeys.ranges().enumerate() {
let mut skr_builder = sk_builder.reborrow().get(i as u32);
skr_builder.set_start(*skr.start());
skr_builder.set_end(*skr.end());
}
builder.set_expiration(self.expiration);
builder.set_count(self.count);
builder.set_watch_id(self.watch_id.unwrap_or(0u64));
builder
.reborrow()
.set_watch_value_params_data(&self.params_data);
let mut w_builder = builder.reborrow().init_watcher();
encode_public_key(&self.watcher, &mut w_builder);

View file

@ -52,6 +52,8 @@ pub(in crate::rpc_processor) enum RPCQuestionDetail {
SetValueQ(Box<RPCOperationSetValueQ>),
WatchValueQ(Box<RPCOperationWatchValueQ>),
InspectValueQ(Box<RPCOperationInspectValueQ>),
TransactValueQ(Box<RPCOperationTransactValueQ>),
SyncValueQ(Box<RPCOperationSyncValueQ>),
#[cfg(feature = "unstable-blockstore")]
SupplyBlockQ(Box<RPCOperationSupplyBlockQ>),
#[cfg(feature = "unstable-blockstore")]
@ -74,6 +76,8 @@ impl RPCQuestionDetail {
RPCQuestionDetail::SetValueQ(_) => "SetValueQ",
RPCQuestionDetail::WatchValueQ(_) => "WatchValueQ",
RPCQuestionDetail::InspectValueQ(_) => "InspectValueQ",
RPCQuestionDetail::TransactValueQ(_) => "TransactValueQ",
RPCQuestionDetail::SyncValueQ(_) => "SyncValueQ",
#[cfg(feature = "unstable-blockstore")]
RPCQuestionDetail::SupplyBlockQ(_) => "SupplyBlockQ",
#[cfg(feature = "unstable-blockstore")]
@ -95,6 +99,8 @@ impl RPCQuestionDetail {
RPCQuestionDetail::SetValueQ(r) => r.validate(validate_context),
RPCQuestionDetail::WatchValueQ(r) => r.validate(validate_context),
RPCQuestionDetail::InspectValueQ(r) => r.validate(validate_context),
RPCQuestionDetail::TransactValueQ(r) => r.validate(validate_context),
RPCQuestionDetail::SyncValueQ(r) => r.validate(validate_context),
#[cfg(feature = "unstable-blockstore")]
RPCQuestionDetail::SupplyBlockQ(r) => r.validate(validate_context),
#[cfg(feature = "unstable-blockstore")]
@ -149,6 +155,16 @@ impl RPCQuestionDetail {
let out = RPCOperationInspectValueQ::decode(decode_context, &op_reader)?;
RPCQuestionDetail::InspectValueQ(Box::new(out))
}
veilid_capnp::question::detail::TransactValueQ(r) => {
let op_reader = r?;
let out = RPCOperationTransactValueQ::decode(decode_context, &op_reader)?;
RPCQuestionDetail::TransactValueQ(Box::new(out))
}
veilid_capnp::question::detail::SyncValueQ(r) => {
let op_reader = r?;
let out = RPCOperationSyncValueQ::decode(decode_context, &op_reader)?;
RPCQuestionDetail::SyncValueQ(Box::new(out))
}
#[cfg(feature = "unstable-blockstore")]
veilid_capnp::question::detail::SupplyBlockQ(r) => {
let op_reader = r?;
@ -198,6 +214,12 @@ impl RPCQuestionDetail {
RPCQuestionDetail::InspectValueQ(d) => {
d.encode(&mut builder.reborrow().init_inspect_value_q())
}
RPCQuestionDetail::TransactValueQ(d) => {
d.encode(&mut builder.reborrow().init_transact_value_q())
}
RPCQuestionDetail::SyncValueQ(d) => {
d.encode(&mut builder.reborrow().init_sync_value_q())
}
#[cfg(feature = "unstable-blockstore")]
RPCQuestionDetail::SupplyBlockQ(d) => {
d.encode(&mut builder.reborrow().init_supply_block_q())

View file

@ -65,6 +65,7 @@ pub fn encode_signed_value_data(
mod tests {
use super::{decode_signed_value_data, encode_signed_value_data};
use crate::crypto::tests::fixtures::*;
use crate::rpc_processor::canonical_message_builder_to_vec_packed;
use crate::storage_manager::SignedValueData;
use crate::{veilid_capnp, BareSignature, EncryptedValueData, Nonce, Signature};
@ -88,8 +89,8 @@ mod tests {
Signature::new(keypair.kind(), BareSignature::new(&fake_signature)),
);
encode_signed_value_data(&signed_value_data, &mut builder).unwrap();
let mut buffer = Vec::with_capacity(32768 + 4096);
capnp::serialize_packed::write_message(&mut buffer, &message_builder).unwrap();
let buffer = canonical_message_builder_to_vec_packed(message_builder).unwrap();
println!("buffer[{}] = {:02x?}", buffer.len(), &buffer);

View file

@ -1,36 +0,0 @@
use super::*;
pub fn decode_signed_value_data(
reader: &veilid_capnp::signed_value_data::Reader,
) -> Result<ValueData, RPCError> {
let seq = reader.get_seq();
rpc_ignore_missing_property!(reader, data);
let data = reader.get_data()?.to_vec();
rpc_ignore_missing_property!(reader, writer);
let wr = reader.get_writer()?;
let writer = decode_key256(&wr);
rpc_ignore_missing_property!(reader, signature);
let sr = reader.get_signature()?;
let signature = decode_signature512(&sr);
Ok(SignedValueData {
value_data: ValueData { seq, data, writer },
signature,
})
}
pub fn encode_signed_value_data(
signed_value_data: &SignedValueData,
builder: &mut veilid_capnp::signed_value_data::Builder,
) -> Result<(), RPCError> {
builder.set_seq(signed_value_data.value_data().seq());
builder.set_data(signed_value_data.value_data().data());
let mut wb = builder.reborrow().init_writer();
encode_key256(signed_value_data.value_data().writer(), &mut wb);
let mut sb = builder.reborrow().init_signature();
encode_signature512(signed_value_data.signature(), &mut sb);
Ok(())
}

View file

@ -20,6 +20,7 @@ mod rpc_route;
mod rpc_set_value;
mod rpc_signal;
mod rpc_status;
mod rpc_transact_value;
mod rpc_validate_dial_info;
mod rpc_value_changed;
mod rpc_watch_value;
@ -44,8 +45,10 @@ mod rpc_start_tunnel;
pub(crate) use answer::*;
pub(crate) use coders::{
decode_node_info, decode_private_route, encode_node_info, encode_private_route,
encode_route_hop, message_builder_to_vec, RPCDecodeContext, MAX_INSPECT_VALUE_A_SEQS_LEN,
canonical_message_builder_to_vec_packed, canonical_message_builder_to_vec_unpacked,
canonical_message_builder_to_write_packed, decode_node_info, decode_private_route,
encode_node_info, encode_private_route, encode_route_hop, RPCDecodeContext,
TransactValueCommand, MAX_INSPECT_VALUE_A_SEQS_LEN,
};
pub(crate) use destination::*;
pub(crate) use error::*;
@ -625,7 +628,7 @@ impl RPCProcessor {
let mut route_msg = ::capnp::message::Builder::new_default();
let mut route_operation = route_msg.init_root::<veilid_capnp::operation::Builder>();
operation.encode(&mut route_operation)?;
let out_message = message_builder_to_vec(route_msg)?;
let out_message = canonical_message_builder_to_vec_packed(route_msg)?;
let out = RenderedOperation {
message: out_message,
@ -656,7 +659,7 @@ impl RPCProcessor {
let mut msg_builder = ::capnp::message::Builder::new_default();
let mut op_builder = msg_builder.init_root::<veilid_capnp::operation::Builder>();
operation.encode(&mut op_builder)?;
message_builder_to_vec(msg_builder)?
canonical_message_builder_to_vec_packed(msg_builder)?
};
// Get reply private route if we are asking for one to be used in our 'respond to'
@ -1561,6 +1564,12 @@ impl RPCProcessor {
RPCQuestionDetail::InspectValueQ(_) => {
pin_dyn_future_closure!(self.process_inspect_value_q(msg))
}
RPCQuestionDetail::TransactValueQ(_) => {
pin_dyn_future_closure!(self.process_transact_value_q(msg))
}
RPCQuestionDetail::SyncValueQ(_) => {
pin_dyn_future_closure!(self.process_sync_value_q(msg))
}
#[cfg(feature = "unstable-blockstore")]
RPCQuestionDetail::SupplyBlockQ(_) => {
pin_dyn_future_closure!(self.process_supply_block_q(msg))

View file

@ -5,6 +5,8 @@ impl_veilid_log_facility!("rpc");
#[derive(Clone, Debug)]
pub struct GetValueAnswer {
pub accepted: bool,
pub transaction_valid: bool,
pub value: Option<SignedValueData>,
pub peers: Vec<Arc<PeerInfo>>,
pub descriptor: Option<SignedValueDescriptor>,
@ -29,6 +31,7 @@ impl RPCProcessor {
&self,
dest: Destination,
opaque_record_key: OpaqueRecordKey,
transaction_id: Option<u64>,
subkey: ValueSubkey,
last_descriptor: Option<SignedValueDescriptor>,
) -> RPCNetworkResult<Answer<GetValueAnswer>> {
@ -47,17 +50,19 @@ impl RPCProcessor {
};
// Get the target node id
let crypto = self.crypto();
let Some(vcrypto) = crypto.get(opaque_record_key.kind()) else {
return Err(RPCError::internal("unsupported cryptosystem"));
};
Crypto::validate_crypto_kind(opaque_record_key.kind()).map_err(RPCError::internal)?;
let Some(target_node_id) = target_node_ids.get(opaque_record_key.kind()) else {
return Err(RPCError::internal("No node id for crypto kind"));
};
let debug_string = format!(
"OUT ==> GetValueQ({} #{}{}) => {}",
"OUT ==> GetValueQ({} {}#{}{}) => {}",
opaque_record_key,
if let Some(xid) = transaction_id {
format!("xid={} ", xid)
} else {
"".to_string()
},
subkey,
if last_descriptor.is_some() {
" +lastdesc"
@ -70,9 +75,10 @@ impl RPCProcessor {
// Send the getvalue question
let get_value_q = RPCOperationGetValueQ::new(
opaque_record_key.clone(),
transaction_id,
subkey,
last_descriptor.is_none(),
);
)?;
let question = RPCQuestion::new(
network_result_try!(self.get_destination_respond_to(&dest)?),
RPCQuestionDetail::GetValueQ(Box::new(get_value_q)),
@ -82,7 +88,6 @@ impl RPCProcessor {
opaque_record_key: opaque_record_key.clone(),
last_descriptor,
subkey,
crypto_kind: vcrypto.kind(),
});
veilid_log!(self debug target: "dht", "{}", debug_string);
@ -111,7 +116,7 @@ impl RPCProcessor {
_ => return Ok(NetworkResult::invalid_message("not an answer")),
};
let (value, peers, descriptor) = get_value_a.destructure();
let (accepted, transaction_valid, value, peers, descriptor) = get_value_a.destructure();
if debug_target_enabled!("dht") {
let debug_string_value = value
.as_ref()
@ -126,10 +131,12 @@ impl RPCProcessor {
.unwrap_or_default();
let debug_string_answer = format!(
"OUT <== GetValueA({} #{}{}{} peers={}) <= {}",
"OUT <== GetValueA({} #{}{}{}{}{} peers={}) <= {}",
opaque_record_key,
subkey,
debug_string_value,
if accepted { " +accept" } else { "" },
if transaction_valid { " +xvalid" } else { "" },
if descriptor.is_some() { " +desc" } else { "" },
peers.len(),
dest
@ -184,6 +191,8 @@ impl RPCProcessor {
latency,
reply_private_route,
GetValueAnswer {
accepted,
transaction_valid,
value,
peers,
descriptor,
@ -227,7 +236,8 @@ impl RPCProcessor {
};
// Destructure
let (opaque_record_key, subkey, want_descriptor) = get_value_q.destructure();
let (opaque_record_key, transaction_id, subkey, want_descriptor) =
get_value_q.destructure();
// Get the nodes that we know about that are closer to the the key than our own node
let closer_to_key_peers = network_result_try!(routing_table
@ -251,20 +261,31 @@ impl RPCProcessor {
// See if this is within the consensus width
let consensus_width = self.config().network.dht.consensus_width as usize;
let (get_result_value, get_result_descriptor) =
let (accepted, transaction_valid, get_result_value, get_result_descriptor) =
if closer_to_key_peers.len() >= consensus_width {
// Not close enough
(None, None)
(false, false, None, None)
} else {
// Close enough, lets get it
// See if we have this record ourselves
let storage_manager = self.storage_manager();
let get_result = network_result_try!(storage_manager
.inbound_get_value(opaque_record_key.clone(), subkey, want_descriptor)
let inbound_get_value_result = network_result_try!(storage_manager
.inbound_get_value(
opaque_record_key.clone(),
transaction_id,
subkey,
want_descriptor
)
.await
.map_err(RPCError::internal)?);
(get_result.opt_value, get_result.opt_descriptor)
match inbound_get_value_result {
InboundGetValueResult::Success(get_result) => {
(true, true, get_result.opt_value, get_result.opt_descriptor)
}
InboundGetValueResult::InvalidTransaction => (true, false, None, None),
}
};
if debug_target_enabled!("dht") {
@ -281,10 +302,12 @@ impl RPCProcessor {
.unwrap_or_default();
let debug_string_answer = format!(
"IN ===> GetValueA({} #{}{}{} peers={}) ==> {}",
"IN ===> GetValueA({} #{}{}{}{}{} peers={}) ==> {}",
opaque_record_key,
subkey,
debug_string_value,
if accepted { " +accept" } else { "" },
if transaction_valid { " +xvalid" } else { "" },
if get_result_descriptor.is_some() {
" +desc"
} else {
@ -299,6 +322,8 @@ impl RPCProcessor {
// Make GetValue answer
let get_value_a = RPCOperationGetValueA::new(
accepted,
transaction_valid,
get_result_value.map(|x| (*x).clone()),
closer_to_key_peers,
get_result_descriptor.map(|x| (*x).clone()),

View file

@ -5,6 +5,8 @@ impl_veilid_log_facility!("rpc");
#[derive(Clone, Debug)]
pub struct InspectValueAnswer {
pub accepted: bool,
pub transaction_valid: bool,
pub seqs: Vec<Option<ValueSeqNum>>,
pub peers: Vec<Arc<PeerInfo>>,
pub descriptor: Option<SignedValueDescriptor>,
@ -31,6 +33,7 @@ impl RPCProcessor {
&self,
dest: Destination,
opaque_record_key: OpaqueRecordKey,
transaction_id: Option<u64>,
subkeys: ValueSubkeyRangeSet,
last_descriptor: Option<SignedValueDescriptor>,
) -> RPCNetworkResult<Answer<InspectValueAnswer>> {
@ -44,22 +47,24 @@ impl RPCProcessor {
// and get the target noderef so we can validate the response
let Some(target_node_ids) = dest.get_target_node_ids() else {
return Err(RPCError::internal(
"Never send get value requests over private routes",
"Never send inspect value requests over private routes",
));
};
// Get the target node id
let crypto = self.crypto();
let Some(vcrypto) = crypto.get(opaque_record_key.kind()) else {
return Err(RPCError::internal("unsupported cryptosystem"));
};
Crypto::validate_crypto_kind(opaque_record_key.kind()).map_err(RPCError::internal)?;
let Some(target_node_id) = target_node_ids.get(opaque_record_key.kind()) else {
return Err(RPCError::internal("No node id for crypto kind"));
};
let debug_string = format!(
"OUT ==> InspectValueQ({} #{}{}) => {}",
"OUT ==> InspectValueQ({} {}#{}{}) => {}",
opaque_record_key,
if let Some(transaction_id) = transaction_id {
format!("xid={} ", transaction_id)
} else {
"".to_string()
},
&subkeys,
if last_descriptor.is_some() {
" +lastdesc"
@ -72,6 +77,7 @@ impl RPCProcessor {
// Send the inspectvalue question
let inspect_value_q = RPCOperationInspectValueQ::new(
opaque_record_key.clone(),
transaction_id,
subkeys.clone(),
last_descriptor.is_none(),
)?;
@ -84,7 +90,6 @@ impl RPCProcessor {
opaque_record_key: opaque_record_key.clone(),
last_descriptor,
subkeys,
crypto_kind: vcrypto.kind(),
});
veilid_log!(self debug target: "dht", "{}", debug_string);
@ -113,7 +118,7 @@ impl RPCProcessor {
_ => return Ok(NetworkResult::invalid_message("not an answer")),
};
let (seqs, peers, descriptor) = inspect_value_a.destructure();
let (accepted, transaction_valid, seqs, peers, descriptor) = inspect_value_a.destructure();
let seqs = seqs
.into_iter()
.map(|x| if x == ValueSeqNum::MAX { None } else { Some(x) })
@ -121,8 +126,10 @@ impl RPCProcessor {
if debug_target_enabled!("dht") {
let debug_string_answer = format!(
"OUT <== InspectValueA({} {} peers={}) <= {} seqs:\n{}",
"OUT <== InspectValueA({} {}{}{} peers={}) <= {} seqs:\n{}",
opaque_record_key,
if accepted { " +accept" } else { "" },
if transaction_valid { " +xvalid" } else { "" },
if descriptor.is_some() { " +desc" } else { "" },
peers.len(),
dest,
@ -169,6 +176,8 @@ impl RPCProcessor {
latency,
reply_private_route,
InspectValueAnswer {
accepted,
transaction_valid,
seqs,
peers,
descriptor,
@ -212,7 +221,8 @@ impl RPCProcessor {
};
// Destructure
let (opaque_record_key, subkeys, want_descriptor) = inspect_value_q.destructure();
let (opaque_record_key, transaction_id, subkeys, want_descriptor) =
inspect_value_q.destructure();
// Get the nodes that we know about that are closer to the the key than our own node
let closer_to_key_peers = network_result_try!(routing_table
@ -224,8 +234,13 @@ impl RPCProcessor {
if debug_target_enabled!("dht") {
let debug_string = format!(
"IN <=== InspectValueQ({} {}{}) <== {}",
"IN <=== InspectValueQ({} {}{}{}) <== {}",
opaque_record_key,
if let Some(xid) = transaction_id {
format!("xid={} ", xid)
} else {
"".to_string()
},
subkeys,
if want_descriptor { " +wantdesc" } else { "" },
msg.header.direct_sender_node_id()
@ -237,23 +252,34 @@ impl RPCProcessor {
// See if this is within the consensus width
let consensus_width = self.config().network.dht.consensus_width as usize;
let (inspect_result_seqs, inspect_result_descriptor) =
let (accepted, transaction_valid, inspect_result_seqs, inspect_result_descriptor) =
if closer_to_key_peers.len() >= consensus_width {
// Not close enough
(Vec::new(), None)
(false, false, vec![], None)
} else {
// Close enough, lets get it
// See if we have this record ourselves
let storage_manager = self.storage_manager();
let inspect_result = network_result_try!(storage_manager
.inbound_inspect_value(opaque_record_key.clone(), subkeys, want_descriptor)
let inbound_inspect_value_result = network_result_try!(storage_manager
.inbound_inspect_value(
opaque_record_key.clone(),
transaction_id,
subkeys,
want_descriptor
)
.await
.map_err(RPCError::internal)?);
(
inspect_result.seqs().to_vec(),
inspect_result.opt_descriptor(),
)
match inbound_inspect_value_result {
InboundInspectValueResult::Success(inspect_result) => (
true,
true,
inspect_result.seqs().to_vec(),
inspect_result.opt_descriptor(),
),
InboundInspectValueResult::InvalidTransaction => (true, false, vec![], None),
}
};
let inspect_result_seqs = inspect_result_seqs
.into_iter()
@ -262,9 +288,11 @@ impl RPCProcessor {
if debug_target_enabled!("dht") {
let debug_string_answer = format!(
"IN ===> InspectValueA({} {:?}{} peers={}) ==> {}",
"IN ===> InspectValueA({} {:?}{}{}{} peers={}) ==> {}",
opaque_record_key,
inspect_result_seqs,
if accepted { " +accept" } else { "" },
if transaction_valid { " +xvalid" } else { "" },
if inspect_result_descriptor.is_some() {
" +desc"
} else {
@ -279,6 +307,8 @@ impl RPCProcessor {
// Make InspectValue answer
let inspect_value_a = RPCOperationInspectValueA::new(
accepted,
transaction_valid,
inspect_result_seqs,
closer_to_key_peers,
inspect_result_descriptor.map(|x| (*x).clone()),

View file

@ -5,6 +5,7 @@ impl_veilid_log_facility!("rpc");
#[derive(Clone, Debug)]
pub struct SetValueAnswer {
pub accepted: bool,
pub transaction_valid: bool,
pub needs_descriptor: bool,
pub value: Option<SignedValueData>,
pub peers: Vec<Arc<PeerInfo>>,
@ -32,6 +33,7 @@ impl RPCProcessor {
&self,
dest: Destination,
opaque_record_key: OpaqueRecordKey,
transaction_id: Option<u64>,
subkey: ValueSubkey,
value: SignedValueData,
descriptor: SignedValueDescriptor,
@ -52,17 +54,19 @@ impl RPCProcessor {
};
// Get the target node id
let crypto = self.crypto();
let Some(vcrypto) = crypto.get(opaque_record_key.kind()) else {
return Err(RPCError::internal("unsupported cryptosystem"));
};
Crypto::validate_crypto_kind(opaque_record_key.kind()).map_err(RPCError::internal)?;
let Some(target_node_id) = target_node_ids.get(opaque_record_key.kind()) else {
return Err(RPCError::internal("No node id for crypto kind"));
};
let debug_string = format!(
"OUT ==> SetValueQ({} #{} len={} seq={} writer={}{}) => {}",
"OUT ==> SetValueQ({} {}#{} len={} seq={} writer={}{}) => {}",
opaque_record_key,
if let Some(transaction_id) = transaction_id {
format!("xid={} ", transaction_id)
} else {
"".to_string()
},
subkey,
value.value_data().data().len(),
value.value_data().seq(),
@ -74,6 +78,7 @@ impl RPCProcessor {
// Send the setvalue question
let set_value_q = RPCOperationSetValueQ::new(
opaque_record_key.clone(),
transaction_id,
subkey,
value,
if send_descriptor {
@ -81,7 +86,7 @@ impl RPCProcessor {
} else {
None
},
);
)?;
let question = RPCQuestion::new(
network_result_try!(self.get_destination_respond_to(&dest)?),
RPCQuestionDetail::SetValueQ(Box::new(set_value_q)),
@ -90,7 +95,6 @@ impl RPCProcessor {
opaque_record_key: opaque_record_key.clone(),
descriptor,
subkey,
crypto_kind: vcrypto.kind(),
});
if debug_target_enabled!("dht") {
@ -121,7 +125,8 @@ impl RPCProcessor {
_ => return Ok(NetworkResult::invalid_message("not an answer")),
};
let (accepted, needs_descriptor, value, peers) = set_value_a.destructure();
let (accepted, transaction_valid, needs_descriptor, value, peers) =
set_value_a.destructure();
if debug_target_enabled!("dht") {
let debug_string_value = value
@ -137,10 +142,11 @@ impl RPCProcessor {
.unwrap_or_default();
let debug_string_answer = format!(
"OUT <== SetValueA({} #{}{}{}{} peers={}) <= {}",
"OUT <== SetValueA({} #{}{}{}{}{} peers={}) <= {}",
opaque_record_key,
subkey,
if accepted { " +set" } else { "" },
if accepted { " +accept" } else { "" },
if transaction_valid { " +xvalid" } else { "" },
if needs_descriptor { " +set" } else { "" },
debug_string_value,
peers.len(),
@ -199,6 +205,7 @@ impl RPCProcessor {
reply_private_route,
SetValueAnswer {
accepted,
transaction_valid,
needs_descriptor,
value,
peers,
@ -242,7 +249,8 @@ impl RPCProcessor {
};
// Destructure
let (opaque_record_key, subkey, value, descriptor) = set_value_q.destructure();
let (opaque_record_key, transaction_id, subkey, value, descriptor) =
set_value_q.destructure();
// Get target for ValueChanged notifications
let dest = network_result_try!(self.get_respond_to_destination(&msg));
@ -257,8 +265,13 @@ impl RPCProcessor {
));
let debug_string = format!(
"IN <=== SetValueQ({} #{} len={} seq={} writer={}{}) <== {}",
"IN <=== SetValueQ({} {}#{} len={} seq={} writer={}{}) <== {}",
opaque_record_key,
if let Some(xid) = transaction_id {
format!("xid={} ", xid)
} else {
"".to_string()
},
subkey,
value.value_data().data().len(),
value.value_data().seq(),
@ -272,10 +285,10 @@ impl RPCProcessor {
// If there are less than 'consensus_width' peers that are closer, then store here too
let consensus_width = self.config().network.dht.consensus_width as usize;
let (accepted, needs_descriptor, return_value) =
let (accepted, transaction_valid, needs_descriptor, return_value) =
if closer_to_key_peers.len() >= consensus_width {
// Not close enough
(false, false, None)
(false, false, false, None)
} else {
// Close enough, lets set it
@ -284,6 +297,7 @@ impl RPCProcessor {
let result = network_result_try!(storage_manager
.inbound_set_value(
opaque_record_key.clone(),
transaction_id,
subkey,
Arc::new(value),
descriptor.map(Arc::new),
@ -292,13 +306,14 @@ impl RPCProcessor {
.await
.map_err(RPCError::internal)?);
let (needs_descriptor, return_value) = match result {
InboundSetValueResult::Success => (false, None),
InboundSetValueResult::Ignored(old_value) => (false, Some(old_value)),
InboundSetValueResult::NeedsDescriptor => (true, None),
let (transaction_valid, needs_descriptor, return_value) = match result {
InboundSetValueResult::Success => (true, false, None),
InboundSetValueResult::Ignored(old_value) => (true, false, Some(old_value)),
InboundSetValueResult::InvalidTransaction => (false, false, None),
InboundSetValueResult::NeedsDescriptor => (true, true, None),
};
(true, needs_descriptor, return_value)
(true, transaction_valid, needs_descriptor, return_value)
};
if debug_target_enabled!("dht") {
@ -315,10 +330,11 @@ impl RPCProcessor {
.unwrap_or_default();
let debug_string_answer = format!(
"IN ===> SetValueA({} #{}{}{}{} peers={}) ==> {}",
"IN ===> SetValueA({} #{}{}{}{}{} peers={}) ==> {}",
opaque_record_key,
subkey,
if accepted { " +accepted" } else { "" },
if accepted { " +accept" } else { "" },
if transaction_valid { " +xvalid" } else { "" },
if needs_descriptor { " +needdesc" } else { "" },
debug_string_value,
closer_to_key_peers.len(),
@ -331,6 +347,7 @@ impl RPCProcessor {
// Make SetValue answer
let set_value_a = RPCOperationSetValueA::new(
accepted,
transaction_valid,
needs_descriptor,
return_value.map(|x| (*x).clone()),
closer_to_key_peers,

View file

@ -0,0 +1,351 @@
use super::*;
use crate::storage_manager::SignedValueDescriptor;
impl_veilid_log_facility!("rpc");
#[derive(Clone, Debug)]
pub struct TransactValueAnswer {
pub accepted: bool,
pub needs_descriptor: bool,
pub transaction_id: Option<u64>,
pub seqs: Vec<Option<ValueSeqNum>>,
pub peers: Vec<Arc<PeerInfo>>,
}
impl RPCProcessor {
/// Sends an transact value request and wait for response
/// Can be sent via all methods including relays
/// Safety routes may be used, but never private routes.
/// Because this leaks information about the identity of the node itself,
/// replying to this request received over a private route will leak
/// the identity of the node and defeat the private route.
/// The number of subkey sequence numbers returned may either be:
/// * the amount requested
/// * an amount truncated to MAX_TRANSACT_VALUE_A_SEQS_LEN subkeys
/// * zero if nothing was found
#[
instrument(level = "trace", target = "rpc", skip(self, descriptor),
fields(ret.peers.len,
ret.latency
),err(level=Level::DEBUG))
]
pub async fn rpc_call_transact_value(
&self,
dest: Destination,
opaque_record_key: OpaqueRecordKey,
transaction_id: Option<u64>,
command: TransactValueCommand,
descriptor: SignedValueDescriptor,
send_descriptor: bool,
writer: KeyPair,
) -> RPCNetworkResult<Answer<TransactValueAnswer>> {
let _guard = self
.startup_context
.startup_lock
.enter()
.map_err(RPCError::map_try_again("not started up"))?;
// Ensure destination never has a private route
// and get the target noderef so we can validate the response
let Some(target_node_ids) = dest.get_target_node_ids() else {
return Err(RPCError::internal(
"Never send transact value requests over private routes",
));
};
// Get the target node id
let crypto = self.crypto();
let Some(vcrypto) = crypto.get(opaque_record_key.kind()) else {
return Err(RPCError::internal("unsupported cryptosystem"));
};
let Some(target_node_id) = target_node_ids.get(opaque_record_key.kind()) else {
return Err(RPCError::internal("No node id for crypto kind"));
};
let debug_string = format!(
"OUT ==> TransactValueQ({}{} {}#{}) => {} (writer={}) ",
opaque_record_key,
match command {
TransactValueCommand::Begin => " begin",
TransactValueCommand::End => " end",
TransactValueCommand::Commit => " commit",
TransactValueCommand::Rollback => " rollback",
},
if let Some(transaction_id) = transaction_id {
format!("xid={} ", transaction_id)
} else {
"".to_string()
},
if send_descriptor { " +senddesc" } else { "" },
dest,
writer,
);
// Send the transactvalue question
let transact_value_q = RPCOperationTransactValueQ::new(
opaque_record_key.clone(),
transaction_id,
command,
if send_descriptor {
Some(descriptor.clone())
} else {
None
},
writer,
&vcrypto,
)?;
let question = RPCQuestion::new(
network_result_try!(self.get_destination_respond_to(&dest)?),
RPCQuestionDetail::TransactValueQ(Box::new(transact_value_q)),
);
let question_context = QuestionContext::TransactValue(ValidateTransactValueContext {
opaque_record_key: opaque_record_key.clone(),
descriptor,
});
veilid_log!(self debug target: "dht", "{}", debug_string);
let waitable_reply = network_result_try!(
self.question(dest.clone(), question, Some(question_context))
.await?
);
// Keep the reply private route that was used to return with the answer
let reply_private_route = waitable_reply.context.reply_private_route.clone();
// Wait for reply
let (msg, latency) = match self.wait_for_reply(waitable_reply, debug_string).await? {
TimeoutOr::Timeout => return Ok(NetworkResult::Timeout),
TimeoutOr::Value(v) => v,
};
// Get the right answer type
let (_, _, kind) = msg.operation.destructure();
let transact_value_a = match kind {
RPCOperationKind::Answer(a) => match a.destructure() {
RPCAnswerDetail::TransactValueA(a) => a,
_ => return Ok(NetworkResult::invalid_message("not a transactvalue answer")),
},
_ => return Ok(NetworkResult::invalid_message("not an answer")),
};
let (accepted, needs_descriptor, transaction_id, seqs, peers) =
transact_value_a.destructure();
let seqs = seqs
.into_iter()
.map(|x| if x == ValueSeqNum::MAX { None } else { Some(x) })
.collect::<Vec<_>>();
if debug_target_enabled!("dht") {
let debug_string_answer = format!(
"OUT <== TransactValueA({} {}{}{} peers={}) <= {} seqs:\n{}",
opaque_record_key,
if let Some(transaction_id) = transaction_id {
format!("xid={} ", transaction_id)
} else {
"".to_string()
},
if accepted { " +accept" } else { "" },
if needs_descriptor { " +needdesc" } else { "" },
peers.len(),
dest,
debug_seqs(&seqs)
);
veilid_log!(self debug target: "dht", "{}", debug_string_answer);
let peer_ids: Vec<String> = peers
.iter()
.filter_map(|p| {
p.node_ids()
.get(opaque_record_key.kind())
.map(|k| k.to_string())
})
.collect();
veilid_log!(self debug target: "dht", "Peers: {:#?}", peer_ids);
}
// Validate peers returned are, in fact, closer to the key than the node we sent this to
let valid = match self.routing_table().verify_peers_closer(
target_node_id.to_hash_coordinate(),
opaque_record_key.to_hash_coordinate(),
&peers,
) {
Ok(v) => v,
Err(e) => {
return Ok(NetworkResult::invalid_message(format!(
"missing cryptosystem in peers node ids: {}",
e
)));
}
};
if !valid {
return Ok(NetworkResult::invalid_message("non-closer peers returned"));
}
#[cfg(feature = "verbose-tracing")]
tracing::Span::current().record("ret.latency", latency.as_u64());
#[cfg(feature = "verbose-tracing")]
tracing::Span::current().record("ret.peers.len", peers.len());
Ok(NetworkResult::value(Answer::new(
latency,
reply_private_route,
TransactValueAnswer {
accepted,
needs_descriptor,
transaction_id,
seqs,
peers,
},
)))
}
////////////////////////////////////////////////////////////////////////////////////////////////
#[instrument(level = "trace", target = "rpc", skip(self, msg), fields(msg.operation.op_id), ret, err)]
pub(super) async fn process_transact_value_q(&self, msg: Message) -> RPCNetworkResult<()> {
// Ensure this never came over a private route, safety route is okay though
match &msg.header.detail {
RPCMessageHeaderDetail::Direct(_) | RPCMessageHeaderDetail::SafetyRouted(_) => {}
RPCMessageHeaderDetail::PrivateRouted(_) => {
return Ok(NetworkResult::invalid_message(
"not processing transact value request over private route",
))
}
}
let routing_table = self.routing_table();
let routing_domain = msg.header.routing_domain();
// Ignore if disabled
let has_capability_dht = routing_table
.get_published_peer_info(msg.header.routing_domain())
.map(|ppi| ppi.node_info().has_capability(CAP_DHT))
.unwrap_or(false);
if !has_capability_dht {
return Ok(NetworkResult::service_unavailable("dht is not available"));
}
// Get the question
let kind = msg.operation.kind().clone();
let transact_value_q = match kind {
RPCOperationKind::Question(q) => match q.destructure() {
(_, RPCQuestionDetail::TransactValueQ(q)) => q,
_ => panic!("not a transactvalue question"),
},
_ => panic!("not a question"),
};
// Destructure
let (opaque_record_key, transaction_id, command, descriptor, writer) =
transact_value_q.destructure();
// Get the nodes that we know about that are closer to the the key than our own node
let closer_to_key_peers = network_result_try!(routing_table
.find_preferred_peers_closer_to_key(
routing_domain,
opaque_record_key.to_hash_coordinate(),
vec![CAP_DHT]
));
if debug_target_enabled!("dht") {
let debug_string = format!(
"IN <=== TransactValueQ({}{} {}{}) <== {} (writer={})",
opaque_record_key,
match command {
TransactValueCommand::Begin => " begin",
TransactValueCommand::End => " end",
TransactValueCommand::Commit => " commit",
TransactValueCommand::Rollback => " rollback",
},
if let Some(xid) = transaction_id {
format!("xid={} ", xid)
} else {
"".to_string()
},
if descriptor.is_some() { " +desc" } else { "" },
msg.header.direct_sender_node_id(),
writer,
);
veilid_log!(self debug target: "dht", "{}", debug_string);
}
// See if this is within the consensus width
let consensus_width = self.config().network.dht.consensus_width as usize;
let (accepted, needs_descriptor, transaction_id, transact_result_seqs) =
if closer_to_key_peers.len() >= consensus_width {
// Not close enough
(false, false, None, vec![])
} else {
// Close enough, lets get it
// See if we have this record ourselves
let storage_manager = self.storage_manager();
let inbound_transact_value_result = network_result_try!(storage_manager
.inbound_transact_value(
opaque_record_key.clone(),
transaction_id,
command,
descriptor,
writer,
)
.await
.map_err(RPCError::internal)?);
match inbound_transact_value_result {
InboundTransactValueResult::Success(transact_result) => (
true,
true,
transact_result.transaction_id,
transact_result.seqs().to_vec(),
),
InboundTransactValueResult::InvalidTransaction => (true, false, None, vec![]),
}
};
if debug_target_enabled!("dht") {
let debug_string_answer = format!(
"IN ===> TransactValueA({} {}{} peers={}) ==> {} (seqs={})",
opaque_record_key,
if accepted { " +accept" } else { "" },
if needs_descriptor { " +needdesc" } else { "" },
closer_to_key_peers.len(),
msg.header.direct_sender_node_id(),
debug_seqs(&transact_result_seqs)
);
veilid_log!(self debug target: "dht", "{}", debug_string_answer);
}
let transact_result_seqs = transact_result_seqs
.into_iter()
.map(|x| {
if let Some(s) = x {
*s
} else {
ValueSubkey::MAX
}
})
.collect::<Vec<_>>();
// Make TransactValue answer
let transact_value_a = RPCOperationTransactValueA::new(
accepted,
needs_descriptor,
transaction_id,
transact_result_seqs,
closer_to_key_peers,
)?;
// Send TransactValue answer
self.answer(
msg,
RPCAnswer::new(RPCAnswerDetail::TransactValueA(Box::new(transact_value_a))),
)
.await
}
}

View file

@ -0,0 +1,71 @@
use super::*;
impl StorageManager {
/// Close an opened local record
#[instrument(level = "trace", target = "stor", skip_all)]
pub async fn close_record(&self, record_key: RecordKey) -> VeilidAPIResult<()> {
let Ok(_guard) = self.startup_lock.enter() else {
apibail_not_initialized!();
};
// Attempt to close the record, returning the opened record if it wasn't already closed
let mut inner = self.inner.lock().await;
Self::close_record_inner(&mut inner, record_key)?;
Ok(())
}
/// Close all opened records
#[instrument(level = "trace", target = "stor", skip_all)]
pub async fn close_all_records(&self) -> VeilidAPIResult<()> {
let Ok(_guard) = self.startup_lock.enter() else {
apibail_not_initialized!();
};
// Attempt to close the record, returning the opened record if it wasn't already closed
let mut inner = self.inner.lock().await;
let keys = inner
.opened_records
.iter()
.map(|(k, v)| {
RecordKey::new(
k.kind(),
BareRecordKey::new(k.value(), v.encryption_key().cloned()),
)
})
.collect::<Vec<_>>();
for key in keys {
Self::close_record_inner(&mut inner, key)?;
}
Ok(())
}
////////////////////////////////////////////////////////////////////////
pub(super) fn close_record_inner(
inner: &mut StorageManagerInner,
record_key: RecordKey,
) -> VeilidAPIResult<()> {
let Some(local_record_store) = inner.local_record_store.as_mut() else {
apibail_not_initialized!();
};
let opaque_record_key = record_key.opaque();
if local_record_store
.peek_record(&opaque_record_key, |_| {})
.is_none()
{
apibail_key_not_found!(opaque_record_key);
}
if inner.opened_records.remove(&opaque_record_key).is_some() {
// Set the watch to cancelled if we have one
// Will process cancellation in the background
inner
.outbound_watch_manager
.set_desired_watch(record_key, None);
}
Ok(())
}
}

View file

@ -0,0 +1,122 @@
use super::*;
impl StorageManager {
/// Create a local record from scratch with a new owner key, open it, and return the opened descriptor
pub async fn create_record(
&self,
kind: CryptoKind,
schema: DHTSchema,
owner: Option<KeyPair>,
safety_selection: SafetySelection,
) -> VeilidAPIResult<DHTRecordDescriptor> {
let Ok(_guard) = self.startup_lock.enter() else {
apibail_not_initialized!();
};
// Validate schema
schema.validate()?;
// Lock access to the record stores
let mut inner = self.inner.lock().await;
// Create a new owned local record from scratch
let (key, owner) = self
.create_new_owned_local_record_inner(
&mut inner,
kind,
schema,
owner,
safety_selection.clone(),
)
.await?;
// Now that the record is made we should always succeed to open the existing record
// The initial writer is the owner of the record
self.open_existing_record_inner(&mut inner, key, Some(owner), safety_selection)
.await
.map(|r| r.unwrap())
}
////////////////////////////////////////////////////////////////////////
#[instrument(level = "trace", target = "stor", skip_all, err)]
async fn create_new_owned_local_record_inner(
&self,
inner: &mut StorageManagerInner,
kind: CryptoKind,
schema: DHTSchema,
owner: Option<KeyPair>,
safety_selection: SafetySelection,
) -> VeilidAPIResult<(RecordKey, KeyPair)> {
// Get cryptosystem
let crypto = self.crypto();
let Some(vcrypto) = crypto.get(kind) else {
apibail_generic!("unsupported cryptosystem");
};
// Get local record store
let Some(local_record_store) = inner.local_record_store.as_mut() else {
apibail_not_initialized!();
};
// Verify the dht schema does not contain the node id
{
let config = self.config();
if let Some(node_id) = config.network.routing_table.public_keys.get(kind) {
let node_member_id = BareMemberId::new(node_id.ref_value());
if schema.is_member(&node_member_id) {
apibail_invalid_argument!(
"node id can not be schema member",
"schema",
node_id.value()
);
}
}
}
// Compile the dht schema
let schema_data = schema.compile();
// New values require a new owner key if not given
let owner = if let Some(owner) = owner {
if owner.kind() != vcrypto.kind() {
apibail_invalid_argument!("owner is wrong crypto kind", "owner", owner);
}
owner
} else {
vcrypto.generate_keypair()
};
// Always create a new encryption key
let encryption_key = Some(vcrypto.random_shared_secret().into_value());
// Calculate dht key
let record_key = Self::make_record_key(
&vcrypto,
owner.ref_value().ref_key(),
&schema_data,
encryption_key,
);
// Make a signed value descriptor for this dht value
let signed_value_descriptor = Arc::new(SignedValueDescriptor::make_signature(
owner.key(),
schema_data,
&vcrypto,
owner.secret(),
)?);
// Add new local value record
let cur_ts = Timestamp::now();
let local_record_detail = LocalRecordDetail::new(safety_selection);
let record =
Record::<LocalRecordDetail>::new(cur_ts, signed_value_descriptor, local_record_detail)?;
let opaque_record_key = record_key.opaque();
local_record_store
.new_record(opaque_record_key, record)
.await?;
Ok((record_key, owner))
}
}

View file

@ -0,0 +1,24 @@
use super::*;
impl StorageManager {
/// Delete a local record
#[instrument(level = "trace", target = "stor", skip_all)]
pub async fn delete_record(&self, record_key: RecordKey) -> VeilidAPIResult<()> {
let Ok(_guard) = self.startup_lock.enter() else {
apibail_not_initialized!();
};
// Ensure the record is closed
let mut inner = self.inner.lock().await;
Self::close_record_inner(&mut inner, record_key.clone())?;
// Get record from the local store
let Some(local_record_store) = inner.local_record_store.as_mut() else {
apibail_not_initialized!();
};
let opaque_record_key = record_key.opaque();
// Remove the record from the local store
local_record_store.delete_record(opaque_record_key).await
}
}

View file

@ -23,8 +23,147 @@ pub(super) struct OutboundGetValueResult {
pub get_result: GetResult,
}
/// The result of the inbound_get_value operation
#[derive(Clone, Debug)]
pub(crate) enum InboundGetValueResult {
/// Value got successfully, or there was no value
Success(GetResult),
/// Invalid transaction id
InvalidTransaction,
}
impl StorageManager {
/// Get the value of a subkey from an opened local record
#[instrument(level = "trace", target = "stor", skip_all)]
pub async fn get_value(
&self,
record_key: RecordKey,
subkey: ValueSubkey,
force_refresh: bool,
) -> VeilidAPIResult<Option<ValueData>> {
let Ok(_guard) = self.startup_lock.enter() else {
apibail_not_initialized!();
};
let opaque_record_key = record_key.opaque();
let mut inner = self.inner.lock().await;
let safety_selection = {
let Some(opened_record) = inner.opened_records.get(&opaque_record_key) else {
apibail_generic!("record not open");
};
opened_record.safety_selection()
};
// See if the requested subkey is our local record store
let last_get_result = self
.handle_get_local_value_inner(&mut inner, opaque_record_key.clone(), subkey, true)
.await?;
// Return the existing value if we have one unless we are forcing a refresh
if !force_refresh {
if let Some(last_get_result_value) = last_get_result.opt_value {
return Ok(Some(self.maybe_decrypt_value_data(
&record_key,
last_get_result_value.value_data(),
)?));
}
}
// Refresh if we can
if !self.dht_is_online() {
// Return the existing value if we have one if we aren't online
if let Some(last_get_result_value) = last_get_result.opt_value {
return Ok(Some(self.maybe_decrypt_value_data(
&record_key,
last_get_result_value.value_data(),
)?));
}
apibail_try_again!("offline, try again later");
};
// Drop the lock for network access
drop(inner);
// May have last descriptor / value
// Use the safety selection we opened the record with
let opt_last_seq = last_get_result
.opt_value
.as_ref()
.map(|v| v.value_data().seq());
let res_rx = self
.outbound_get_value(
opaque_record_key.clone(),
subkey,
safety_selection,
last_get_result,
)
.await?;
// Wait for the first result
let Ok(result) = res_rx.recv_async().await else {
apibail_internal!("failed to receive results");
};
let result = result?;
let partial = result.fanout_result.kind.is_incomplete();
// Process the returned result
let out_encrypted = self
.process_outbound_get_value_result(
opaque_record_key.clone(),
subkey,
opt_last_seq,
result,
)
.await?;
let out = if let Some(vd) = out_encrypted {
Some(self.maybe_decrypt_value_data(&record_key, &vd)?)
} else {
None
};
if let Some(out) = &out {
// If there's more to process, do it in the background
if partial {
self.process_deferred_outbound_get_value_result(
res_rx,
record_key.clone(),
subkey,
out.seq(),
);
}
}
Ok(out)
}
/// Handle a received 'Get Value' query
#[instrument(level = "trace", target = "dht", skip_all)]
pub async fn inbound_get_value(
&self,
opaque_record_key: OpaqueRecordKey,
transaction_id: Option<u64>,
subkey: ValueSubkey,
want_descriptor: bool,
) -> VeilidAPIResult<NetworkResult<InboundGetValueResult>> {
let mut inner = self.inner.lock().await;
// See if the subkey we are getting has a last known remote value
let last_get_result = Self::handle_get_remote_value_inner(
&mut inner,
opaque_record_key,
subkey,
want_descriptor,
)
.await?;
Ok(NetworkResult::value(InboundGetValueResult::Success(
last_get_result,
)))
}
////////////////////////////////////////////////////////////////////////
/// Perform a 'get value' query on the network
/// Performs the work without a transaction
#[instrument(level = "trace", target = "dht", skip_all, err)]
pub(super) async fn outbound_get_value(
&self,
@ -95,6 +234,7 @@ impl StorageManager {
Destination::direct(next_node.routing_domain_filtered(routing_domain))
.with_safety(safety_selection),
opaque_record_key.clone(),
None,
subkey,
last_descriptor.map(|x| (*x).clone()),
)
@ -408,42 +548,4 @@ impl StorageManager {
}
Ok(Some(get_result_value.value_data().clone()))
}
/// Handle a received 'Get Value' query
#[instrument(level = "trace", target = "dht", skip_all)]
pub async fn inbound_get_value(
&self,
opaque_record_key: OpaqueRecordKey,
subkey: ValueSubkey,
want_descriptor: bool,
) -> VeilidAPIResult<NetworkResult<GetResult>> {
let mut inner = self.inner.lock().await;
// See if this is a remote or local value
let (_is_local, last_get_result) = {
// See if the subkey we are getting has a last known local value
let mut last_get_result = self
.handle_get_local_value_inner(&mut inner, opaque_record_key.clone(), subkey, true)
.await?;
// If this is local, it must have a descriptor already
if last_get_result.opt_descriptor.is_some() {
if !want_descriptor {
last_get_result.opt_descriptor = None;
}
(true, last_get_result)
} else {
// See if the subkey we are getting has a last known remote value
let last_get_result = Self::handle_get_remote_value_inner(
&mut inner,
opaque_record_key,
subkey,
want_descriptor,
)
.await?;
(false, last_get_result)
}
};
Ok(NetworkResult::value(last_get_result))
}
}

View file

@ -35,7 +35,7 @@ struct SubkeySeqCount {
pub value_nodes: Vec<NodeRef>,
}
/// The context of the outbound_get_value operation
/// The context of the outbound_inspect_value operation
struct OutboundInspectValueContext {
/// The combined sequence numbers and result counts so far
pub seqcounts: Vec<SubkeySeqCount>,
@ -43,7 +43,7 @@ struct OutboundInspectValueContext {
pub opt_descriptor_info: Option<DescriptorInfo>,
}
/// The result of the outbound_get_value operation
/// The result of the outbound_inspect_value operation
#[derive(Debug, Clone)]
pub(super) struct OutboundInspectValueResult {
/// Fanout results for each subkey
@ -52,8 +52,159 @@ pub(super) struct OutboundInspectValueResult {
pub inspect_result: InspectResult,
}
/// The result of the inbound_inspect_value operation
#[derive(Clone, Debug)]
pub(crate) enum InboundInspectValueResult {
/// Value inspected successfully
Success(InspectResult),
/// Invalid transaction id
InvalidTransaction,
}
impl StorageManager {
/// Inspect an opened DHT record for its subkey sequence numbers
#[instrument(level = "trace", target = "stor", skip_all)]
pub async fn inspect_record(
&self,
record_key: RecordKey,
subkeys: ValueSubkeyRangeSet,
scope: DHTReportScope,
) -> VeilidAPIResult<DHTRecordReport> {
let Ok(_guard) = self.startup_lock.enter() else {
apibail_not_initialized!();
};
let opaque_record_key = record_key.opaque();
let subkeys = if subkeys.is_empty() {
ValueSubkeyRangeSet::full()
} else {
subkeys
};
let mut inner = self.inner.lock().await;
let safety_selection = {
let Some(opened_record) = inner.opened_records.get(&opaque_record_key) else {
apibail_generic!("record not open");
};
opened_record.safety_selection()
};
// See if the requested record is our local record store
let mut local_inspect_result = self
.handle_inspect_local_value_inner(
&mut inner,
opaque_record_key.clone(),
subkeys.clone(),
true,
)
.await?;
// Get the offline subkeys for this record still only returning the ones we're inspecting
// Merge in the currently offline in-flight records and the actively written records as well
let active_subkey_writes = inner
.active_subkey_writes
.get(&opaque_record_key)
.cloned()
.unwrap_or_default();
let offline_subkey_writes = inner
.offline_subkey_writes
.get(&opaque_record_key)
.map(|o| o.subkeys.union(&o.subkeys_in_flight))
.unwrap_or_default()
.union(&active_subkey_writes)
.intersect(&subkeys);
// If this is the maximum scope we're interested in, return the report
if matches!(scope, DHTReportScope::Local) {
return DHTRecordReport::new(
local_inspect_result.subkeys().clone(),
offline_subkey_writes,
local_inspect_result.seqs().to_vec(),
vec![None; local_inspect_result.seqs().len()],
)
.inspect_err(|e| {
veilid_log!(self error "invalid record report generated: {}", e);
});
}
// Get rpc processor and drop mutex so we don't block while getting the value from the network
if !self.dht_is_online() {
apibail_try_again!("offline, try again later");
};
// Drop the lock for network access
drop(inner);
// If we're simulating a set, increase the previous sequence number we have by 1
if matches!(scope, DHTReportScope::UpdateSet) {
for seq in local_inspect_result.seqs_mut() {
*seq = if let Some(s) = seq {
let (v, ov) = s.overflowing_add(1);
if ov || v == ValueSubkey::MAX {
None
} else {
Some(v)
}
} else {
Some(0)
};
}
}
// Get the inspect record report from the network
let result = self
.outbound_inspect_value(
opaque_record_key.clone(),
subkeys,
safety_selection,
if matches!(scope, DHTReportScope::SyncGet | DHTReportScope::SyncSet) {
InspectResult::default()
} else {
local_inspect_result.clone()
},
matches!(scope, DHTReportScope::UpdateSet | DHTReportScope::SyncSet),
)
.await?;
// Keep the list of nodes that returned a value for later reference
let mut inner = self.inner.lock().await;
let results_iter = result
.inspect_result
.subkeys()
.iter()
.map(ValueSubkeyRangeSet::single)
.zip(result.subkey_fanout_results.into_iter());
Self::process_fanout_results_inner(
&mut inner,
opaque_record_key.clone(),
results_iter,
false,
self.config().network.dht.consensus_width as usize,
);
if result.inspect_result.subkeys().is_empty() {
DHTRecordReport::new(
local_inspect_result.subkeys().clone(),
offline_subkey_writes,
local_inspect_result.seqs().to_vec(),
vec![None; local_inspect_result.seqs().len()],
)
} else {
DHTRecordReport::new(
result.inspect_result.subkeys().clone(),
offline_subkey_writes,
local_inspect_result.seqs().to_vec(),
result.inspect_result.seqs().to_vec(),
)
}
}
////////////////////////////////////////////////////////////////////////
/// Perform a 'inspect value' query on the network
/// Performs the work without a transaction
#[instrument(level = "trace", target = "dht", skip_all, err)]
pub(super) async fn outbound_inspect_value(
&self,
@ -143,6 +294,7 @@ impl StorageManager {
.rpc_call_inspect_value(
Destination::direct(next_node.routing_domain_filtered(routing_domain)).with_safety(safety_selection),
opaque_record_key.clone(),
None,
subkeys.clone(),
opt_descriptor.map(|x| (*x).clone()),
)
@ -363,9 +515,10 @@ impl StorageManager {
pub async fn inbound_inspect_value(
&self,
opaque_record_key: OpaqueRecordKey,
transaction_id: Option<u64>,
subkeys: ValueSubkeyRangeSet,
want_descriptor: bool,
) -> VeilidAPIResult<NetworkResult<InspectResult>> {
) -> VeilidAPIResult<NetworkResult<InboundInspectValueResult>> {
let mut inner = self.inner.lock().await;
let subkeys = if subkeys.is_empty() {
ValueSubkeyRangeSet::full()
@ -373,37 +526,18 @@ impl StorageManager {
subkeys
};
// See if this is a remote or local value
let (_is_local, inspect_result) = {
// See if the subkey we are getting has a last known local value
let mut local_inspect_result = self
.handle_inspect_local_value_inner(
&mut inner,
opaque_record_key.clone(),
subkeys.clone(),
true,
)
.await?;
// If this is local, it must have a descriptor already
if local_inspect_result.opt_descriptor().is_some() {
if !want_descriptor {
local_inspect_result.drop_descriptor();
}
(true, local_inspect_result)
} else {
// See if the subkey we are getting has a last known remote value
let remote_inspect_result = self
.handle_inspect_remote_value_inner(
&mut inner,
opaque_record_key,
subkeys,
want_descriptor,
)
.await?;
(false, remote_inspect_result)
}
};
// See if the subkey we are getting has a last known remote value
let inspect_result = self
.handle_inspect_remote_value_inner(
&mut inner,
opaque_record_key,
subkeys,
want_descriptor,
)
.await?;
Ok(NetworkResult::value(inspect_result))
Ok(NetworkResult::value(InboundInspectValueResult::Success(
inspect_result,
)))
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,244 @@
use super::*;
impl StorageManager {
/// Open an existing local record if it exists, and if it doesnt exist locally, try to pull it from the network and open it and return the opened descriptor
#[instrument(level = "trace", target = "stor", skip_all)]
pub async fn open_record(
&self,
record_key: RecordKey,
writer: Option<KeyPair>,
safety_selection: SafetySelection,
) -> VeilidAPIResult<DHTRecordDescriptor> {
let Ok(_guard) = self.startup_lock.enter() else {
apibail_not_initialized!();
};
let mut inner = self.inner.lock().await;
// See if we have a local record already or not
if let Some(res) = self
.open_existing_record_inner(
&mut inner,
record_key.clone(),
writer.clone(),
safety_selection.clone(),
)
.await?
{
drop(inner);
// We had an existing record, so check the network to see if we should
// update it with what we have here
let set_consensus = self.config().network.dht.set_value_count as usize;
self.add_rehydration_request(
record_key.opaque(),
ValueSubkeyRangeSet::full(),
set_consensus,
)
.await;
return Ok(res);
}
// No record yet, try to get it from the network
if !self.dht_is_online() {
apibail_try_again!("offline, try again later");
};
// Drop the mutex so we dont block during network access
drop(inner);
// No last descriptor, no last value
// Use the safety selection we opened the record with
let result = self
.outbound_inspect_value(
record_key.opaque(),
ValueSubkeyRangeSet::single(0),
safety_selection.clone(),
InspectResult::default(),
false,
)
.await?;
// If we got nothing back, the key wasn't found
if result.inspect_result.opt_descriptor().is_none() {
// No result
apibail_key_not_found!(record_key.opaque());
};
// Check again to see if we have a local record already or not
// because waiting for the outbound_inspect_value action could result in the key being opened
// via some parallel process
let mut inner = self.inner.lock().await;
if let Some(res) = self
.open_existing_record_inner(
&mut inner,
record_key.clone(),
writer.clone(),
safety_selection.clone(),
)
.await?
{
// Don't bother to rehydrate in this edge case
// We already checked above and won't have anything better than what
// is on the network in this case
return Ok(res);
}
// Open the new record
self.open_new_record_inner(
&mut inner,
record_key,
writer,
result.inspect_result,
safety_selection,
)
.await
}
////////////////////////////////////////////////////////////////////////
#[instrument(level = "trace", target = "stor", skip_all, err)]
pub(super) async fn open_existing_record_inner(
&self,
inner: &mut StorageManagerInner,
record_key: RecordKey,
writer: Option<KeyPair>,
safety_selection: SafetySelection,
) -> VeilidAPIResult<Option<DHTRecordDescriptor>> {
// Get local record store
let Some(local_record_store) = inner.local_record_store.as_mut() else {
apibail_not_initialized!();
};
// See if we have a local record already or not
let cb = |r: &mut Record<LocalRecordDetail>| {
// Process local record
// Keep the safety selection we opened the record with
r.detail_mut().safety_selection = safety_selection.clone();
// Return record details
(r.owner(), r.schema())
};
let opaque_record_key = record_key.opaque();
let (owner, schema) = match local_record_store.with_record_mut(&opaque_record_key, cb) {
Some(v) => v,
None => {
return Ok(None);
}
};
// Had local record
// If the writer we chose is also the owner, we have the owner secret
// Otherwise this is just another subkey writer
let owner_secret = if let Some(writer) = writer.clone() {
if writer.key() == owner {
Some(writer.secret())
} else {
None
}
} else {
None
};
let crypto = self.crypto();
let mut crypto_with_key: Option<(CryptoSystemGuard, BareSharedSecret)> = None;
if let Some(k) = record_key.ref_value().encryption_key() {
let Some(value_crypto) = crypto.get(record_key.kind()) else {
apibail_generic!("unsupported cryptosystem for record encryption key");
};
crypto_with_key = Some((value_crypto, k));
}
// Write open record
inner
.opened_records
.entry(opaque_record_key)
.and_modify(|e| {
e.set_writer(writer.clone());
e.set_safety_selection(safety_selection.clone());
e.set_encryption_key(crypto_with_key.as_ref().map(|(_, k)| k.clone()));
})
.or_insert_with(|| {
OpenedRecord::new(
writer.clone(),
safety_selection.clone(),
crypto_with_key.map(|(_, k)| k),
)
});
// Make DHT Record Descriptor to return
let descriptor = DHTRecordDescriptor::new(record_key, owner, owner_secret, schema);
Ok(Some(descriptor))
}
#[instrument(level = "trace", target = "stor", skip_all, err)]
pub(super) async fn open_new_record_inner(
&self,
inner: &mut StorageManagerInner,
record_key: RecordKey,
writer: Option<KeyPair>,
inspect_result: InspectResult,
safety_selection: SafetySelection,
) -> VeilidAPIResult<DHTRecordDescriptor> {
// Ensure the record is closed
let opaque_record_key = record_key.opaque();
if inner.opened_records.contains_key(&opaque_record_key) {
panic!("new record should never be opened at this point");
}
// Must have descriptor
let Some(signed_value_descriptor) = inspect_result.opt_descriptor() else {
// No descriptor for new record, can't store this
apibail_generic!("no descriptor");
};
// Get owner
let owner = signed_value_descriptor.owner();
// If the writer we chose is also the owner, we have the owner secret
// Otherwise this is just another subkey writer
let owner_secret = if let Some(writer) = &writer {
if writer.key() == owner {
Some(writer.secret())
} else {
None
}
} else {
None
};
let schema = signed_value_descriptor.schema()?;
// Get local record store
let Some(local_record_store) = inner.local_record_store.as_mut() else {
apibail_not_initialized!();
};
// Make and store a new record for this descriptor
let record = Record::<LocalRecordDetail>::new(
Timestamp::now(),
signed_value_descriptor,
LocalRecordDetail::new(safety_selection.clone()),
)?;
local_record_store
.new_record(opaque_record_key.clone(), record)
.await?;
let encryption_key = record_key.ref_value().encryption_key();
// Write open record
inner.opened_records.insert(
opaque_record_key,
OpenedRecord::new(writer, safety_selection, encryption_key),
);
// Make DHT Record Descriptor to return
let descriptor = DHTRecordDescriptor::new(record_key, owner, owner_secret, schema);
Ok(descriptor)
}
}

View file

@ -0,0 +1,97 @@
use super::*;
impl StorageManager {
/// Get the encryption key for an opened OpaqueRecordKey
/// Opaque record keys must have been opened with their full record key in order to be read
pub(super) async fn get_encryption_key_for_opaque_record_key(
&self,
opaque_record_key: &OpaqueRecordKey,
) -> VeilidAPIResult<Option<BareSharedSecret>> {
let inner = self.inner.lock().await;
let Some(opened_record) = inner.opened_records.get(opaque_record_key) else {
apibail_generic!("decrypt_value_data: opened_records does not contain an expected key");
};
Ok(opened_record.encryption_key().cloned())
}
/// Encrypt value data if the record key contains an encryption key.
/// Leave it unchanged otherwise.
pub(super) fn maybe_encrypt_value_data(
&self,
record_key: &RecordKey,
value_data: &ValueData,
) -> VeilidAPIResult<EncryptedValueData> {
if let Some(encryption_key) = record_key.ref_value().ref_encryption_key() {
let crypto = self.registry.crypto();
let Some(vcrypto) = crypto.get(record_key.kind()) else {
apibail_generic!("decrypt_value_data: unsupported crypto kind")
};
let mut data = value_data.data().to_vec();
let nonce = vcrypto.random_nonce();
let encryption_key = SharedSecret::new(record_key.kind(), encryption_key.clone());
vcrypto.crypt_in_place_no_auth(&mut data, &nonce, &encryption_key)?;
Ok(EncryptedValueData::new_with_seq(
value_data.seq(),
data,
value_data.writer(),
Some(nonce),
)?)
} else {
Ok(EncryptedValueData::new_with_seq(
value_data.seq(),
value_data.data().to_vec(),
value_data.writer(),
None,
)?)
}
}
/// Decrypt value data if the record key contains an encryption key and value data contains nonce.
/// Leave data unchanged if both are none.
/// Returns error if either encryption key or nonce is None.
pub(super) fn maybe_decrypt_value_data(
&self,
record_key: &RecordKey,
encrypted_value_data: &EncryptedValueData,
) -> VeilidAPIResult<ValueData> {
match (
record_key.ref_value().ref_encryption_key(),
encrypted_value_data.nonce(),
) {
(Some(encryption_key), Some(nonce)) => {
let crypto = self.registry.crypto();
let Some(vcrypto) = crypto.get(record_key.kind()) else {
apibail_generic!("cannot decrypt value data: unsupported crypto kind")
};
let mut data = encrypted_value_data.data().to_vec();
let encryption_key = SharedSecret::new(record_key.kind(), encryption_key.clone());
vcrypto.crypt_in_place_no_auth(&mut data, &nonce, &encryption_key)?;
Ok(ValueData::new_with_seq(
encrypted_value_data.seq(),
data,
encrypted_value_data.writer(),
)?)
}
(None, None) => Ok(ValueData::new_with_seq(
encrypted_value_data.seq(),
encrypted_value_data.data().to_vec(),
encrypted_value_data.writer(),
)?),
(Some(_), None) => {
// Should not happen in normal circumstances
apibail_generic!("cannot decrypt value data: missing nonce")
}
(None, Some(_)) => {
// Should not happen in normal circumstances
apibail_generic!("cannot decrypt value data: missing encryption key")
}
}
}
}

View file

@ -0,0 +1,91 @@
use super::*;
impl StorageManager {
/// Builds the record key for a given schema and owner
#[instrument(level = "trace", target = "stor", skip_all)]
pub fn get_record_key(
&self,
schema: DHTSchema,
owner_key: &PublicKey,
encryption_key: Option<SharedSecret>,
) -> VeilidAPIResult<RecordKey> {
// Get cryptosystem
let crypto = self.crypto();
let Some(vcrypto) = crypto.get(owner_key.kind()) else {
apibail_generic!("unsupported cryptosystem");
};
// Encryption key must match owner key
if let Some(ek) = &encryption_key {
vcrypto.check_shared_secret(ek)?;
}
// Validate schema
schema.validate()?;
let schema_data = schema.compile();
Ok(Self::make_record_key(
&vcrypto,
owner_key.ref_value(),
&schema_data,
encryption_key.map(|x| x.into_value()),
))
}
/// Validate a record key
pub fn check_record_key(&self, record_key: &RecordKey) -> VeilidAPIResult<()> {
let crypto = self.crypto();
let Some(vcrypto) = crypto.get(record_key.kind()) else {
apibail_generic!("unsupported record key kind");
};
if record_key.value().key().len() != HASH_COORDINATE_LENGTH {
apibail_generic!(format!(
"invalid record key length: {} != {}",
record_key.value().key().len(),
HASH_COORDINATE_LENGTH
));
}
if let Some(encryption_key) = record_key.value().encryption_key() {
if encryption_key.len() != vcrypto.shared_secret_length() {
apibail_generic!(format!(
"invalid encryption key length: {} != {}",
encryption_key.len(),
vcrypto.shared_secret_length()
));
}
}
Ok(())
}
////////////////////////////////////////////////////////////////////////
pub(super) fn make_opaque_record_key(
vcrypto: &CryptoSystemGuard<'_>,
owner_key: &BarePublicKey,
schema_data: &[u8],
) -> OpaqueRecordKey {
let mut hash_data = Vec::<u8>::with_capacity(owner_key.len() + 4 + schema_data.len());
hash_data.extend_from_slice(vcrypto.kind().bytes());
hash_data.extend_from_slice(owner_key);
hash_data.extend_from_slice(schema_data);
let hash = vcrypto.generate_hash(&hash_data);
OpaqueRecordKey::new(vcrypto.kind(), BareOpaqueRecordKey::new(hash.ref_value()))
}
pub(super) fn make_record_key(
vcrypto: &CryptoSystemGuard<'_>,
owner_key: &BarePublicKey,
schema_data: &[u8],
encryption_key: Option<BareSharedSecret>,
) -> RecordKey {
let opaque = Self::make_opaque_record_key(vcrypto, owner_key, schema_data);
RecordKey::new(
vcrypto.kind(),
BareRecordKey::new(opaque.into_value(), encryption_key),
)
}
}

View file

@ -14,6 +14,9 @@ pub(in crate::storage_manager) struct OpenedRecord {
/// Encryption key, for newer records
encryption_key: Option<BareSharedSecret>,
/// Outbound transaction state
outbound_transaction: Option<OutboundTransactionState>,
}
impl OpenedRecord {

View file

@ -0,0 +1,305 @@
use super::*;
impl StorageManager {
#[instrument(level = "trace", target = "stor", skip_all, err)]
pub(super) async fn handle_get_local_value_inner(
&self,
inner: &mut StorageManagerInner,
opaque_record_key: OpaqueRecordKey,
subkey: ValueSubkey,
want_descriptor: bool,
) -> VeilidAPIResult<GetResult> {
// See if the value is in the offline subkey writes first,
// since it may not have been committed yet to the local record store
if let Some(get_result) = self.get_offline_subkey_writes_subkey(
inner,
&opaque_record_key,
subkey,
want_descriptor,
)? {
return Ok(get_result);
}
// See if it's in the local record store
let Some(local_record_store) = inner.local_record_store.as_mut() else {
apibail_not_initialized!();
};
if let Some(get_result) = local_record_store
.get_subkey(opaque_record_key, subkey, want_descriptor)
.await?
{
return Ok(get_result);
}
Ok(GetResult {
opt_value: None,
opt_descriptor: None,
})
}
#[instrument(level = "trace", target = "stor", skip_all, err)]
pub(super) async fn handle_set_local_value_inner(
&self,
inner: &mut StorageManagerInner,
opaque_record_key: OpaqueRecordKey,
subkey: ValueSubkey,
signed_value_data: Arc<SignedValueData>,
watch_update_mode: InboundWatchUpdateMode,
) -> VeilidAPIResult<()> {
// See if this new data supercedes any offline subkey writes
self.remove_old_offline_subkey_writes_inner(
inner,
opaque_record_key.clone(),
subkey,
signed_value_data.clone(),
);
// See if it's in the local record store
let Some(local_record_store) = inner.local_record_store.as_mut() else {
apibail_not_initialized!();
};
// Write subkey to local store
local_record_store
.set_subkey(
opaque_record_key,
subkey,
signed_value_data,
watch_update_mode,
)
.await?;
Ok(())
}
#[instrument(level = "trace", target = "stor", skip_all, err)]
pub(super) async fn handle_inspect_local_value_inner(
&self,
inner: &mut StorageManagerInner,
opaque_record_key: OpaqueRecordKey,
subkeys: ValueSubkeyRangeSet,
want_descriptor: bool,
) -> VeilidAPIResult<InspectResult> {
// See if it's in the local record store
let Some(local_record_store) = inner.local_record_store.as_mut() else {
apibail_not_initialized!();
};
if let Some(inspect_result) = local_record_store
.inspect_record(opaque_record_key, &subkeys, want_descriptor)
.await?
{
return Ok(inspect_result);
}
InspectResult::new(
self,
subkeys,
"handle_inspect_local_value_inner",
ValueSubkeyRangeSet::new(),
vec![],
None,
)
}
#[instrument(level = "trace", target = "stor", skip_all, err)]
pub(super) async fn handle_get_remote_value_inner(
inner: &mut StorageManagerInner,
opaque_record_key: OpaqueRecordKey,
subkey: ValueSubkey,
want_descriptor: bool,
) -> VeilidAPIResult<GetResult> {
// See if it's in the remote record store
let Some(remote_record_store) = inner.remote_record_store.as_mut() else {
apibail_not_initialized!();
};
if let Some(get_result) = remote_record_store
.get_subkey(opaque_record_key, subkey, want_descriptor)
.await?
{
return Ok(get_result);
}
Ok(GetResult {
opt_value: None,
opt_descriptor: None,
})
}
#[instrument(level = "trace", target = "stor", skip_all, err)]
pub(super) async fn handle_set_remote_value_inner(
inner: &mut StorageManagerInner,
opaque_record_key: OpaqueRecordKey,
subkey: ValueSubkey,
signed_value_data: Arc<SignedValueData>,
signed_value_descriptor: Arc<SignedValueDescriptor>,
watch_update_mode: InboundWatchUpdateMode,
) -> VeilidAPIResult<()> {
// See if it's in the remote record store
let Some(remote_record_store) = inner.remote_record_store.as_mut() else {
apibail_not_initialized!();
};
// See if we have a remote record already or not
if remote_record_store
.with_record(&opaque_record_key, |_| {})
.is_none()
{
// record didn't exist, make it
let cur_ts = Timestamp::now();
let remote_record_detail = RemoteRecordDetail {};
let record = Record::<RemoteRecordDetail>::new(
cur_ts,
signed_value_descriptor,
remote_record_detail,
)?;
remote_record_store
.new_record(opaque_record_key.clone(), record)
.await?
};
// Write subkey to remote store
remote_record_store
.set_subkey(
opaque_record_key,
subkey,
signed_value_data,
watch_update_mode,
)
.await?;
Ok(())
}
#[instrument(level = "trace", target = "stor", skip_all, err)]
pub(super) async fn handle_inspect_remote_value_inner(
&self,
inner: &mut StorageManagerInner,
opaque_record_key: OpaqueRecordKey,
subkeys: ValueSubkeyRangeSet,
want_descriptor: bool,
) -> VeilidAPIResult<InspectResult> {
// See if it's in the local record store
let Some(remote_record_store) = inner.remote_record_store.as_mut() else {
apibail_not_initialized!();
};
if let Some(inspect_result) = remote_record_store
.inspect_record(opaque_record_key, &subkeys, want_descriptor)
.await?
{
return Ok(inspect_result);
}
InspectResult::new(
self,
subkeys,
"handle_inspect_remote_value_inner",
ValueSubkeyRangeSet::new(),
vec![],
None,
)
}
#[instrument(level = "trace", target = "stor", skip_all, err)]
pub(super) async fn get_value_nodes(
&self,
opaque_record_key: OpaqueRecordKey,
) -> VeilidAPIResult<Option<Vec<NodeRef>>> {
let inner = self.inner.lock().await;
// Get local record store
let Some(local_record_store) = inner.local_record_store.as_ref() else {
apibail_not_initialized!();
};
// Get routing table to see if we still know about these nodes
let routing_table = self.routing_table();
let opt_value_nodes = local_record_store.peek_record(&opaque_record_key, |r| {
let d = r.detail();
d.nodes
.keys()
.cloned()
.filter_map(|nr| routing_table.lookup_node_ref(nr).ok().flatten())
.collect()
});
Ok(opt_value_nodes)
}
// #[instrument(level = "trace", target = "stor", skip_all, err)]
// async fn move_remote_record_to_local_inner(
// &self,
// inner: &mut StorageManagerInner,
// record_key: RecordKey,
// safety_selection: SafetySelection,
// ) -> VeilidAPIResult<Option<(PublicKey, DHTSchema)>> {
// // Get local record store
// let Some(local_record_store) = inner.local_record_store.as_mut() else {
// apibail_not_initialized!();
// };
// // Get remote record store
// let Some(remote_record_store) = inner.remote_record_store.as_mut() else {
// apibail_not_initialized!();
// };
// let rcb = |r: &Record<RemoteRecordDetail>| {
// // Return record details
// r.clone()
// };
// let opaque_record_key = record_key.opaque();
// let Some(remote_record) = remote_record_store.with_record(&opaque_record_key, rcb) else {
// // No local or remote record found, return None
// return Ok(None);
// };
// // Make local record
// let cur_ts = Timestamp::now();
// let local_record = Record::new(
// cur_ts,
// remote_record.descriptor().clone(),
// LocalRecordDetail::new(safety_selection),
// )?;
// local_record_store
// .new_record(opaque_record_key.clone(), local_record)
// .await?;
// // Move copy subkey data from remote to local store
// for subkey in remote_record.stored_subkeys().iter() {
// let Some(get_result) = remote_record_store
// .get_subkey(opaque_record_key.clone(), subkey, false)
// .await?
// else {
// // Subkey was missing
// veilid_log!(self warn "Subkey was missing: {} #{}", record_key, subkey);
// continue;
// };
// let Some(subkey_data) = get_result.opt_value else {
// // Subkey was missing
// veilid_log!(self warn "Subkey data was missing: {} #{}", record_key, subkey);
// continue;
// };
// local_record_store
// .set_subkey(
// opaque_record_key.clone(),
// subkey,
// subkey_data,
// InboundWatchUpdateMode::NoUpdate,
// )
// .await?;
// }
// // Move watches
// local_record_store.move_watches(
// opaque_record_key.clone(),
// remote_record_store.move_watches(opaque_record_key.clone(), None),
// );
// // Delete remote record from store
// remote_record_store
// .delete_record(opaque_record_key.clone())
// .await?;
// // Return record information as transferred to local record
// Ok(Some((remote_record.owner(), remote_record.schema())))
// }
}

View file

@ -30,12 +30,321 @@ pub(crate) enum InboundSetValueResult {
Success,
/// Newer value or conflicting value present
Ignored(Arc<SignedValueData>),
/// Invalid transaction id
InvalidTransaction,
/// Descriptor is needed for first set
NeedsDescriptor,
}
impl StorageManager {
/// Set the value of a subkey on an opened local record
#[instrument(level = "trace", target = "stor", skip_all)]
pub async fn set_value(
&self,
record_key: RecordKey,
subkey: ValueSubkey,
data: Vec<u8>,
options: Option<SetDHTValueOptions>,
) -> VeilidAPIResult<Option<ValueData>> {
let Ok(_guard) = self.startup_lock.enter() else {
apibail_not_initialized!();
};
let opaque_record_key = record_key.opaque();
let mut inner = self.inner.lock().await;
// Get cryptosystem
let crypto = self.crypto();
let Some(vcrypto) = crypto.get(record_key.kind()) else {
apibail_generic!("unsupported cryptosystem for record key");
};
let (safety_selection, opt_writer) = {
let Some(opened_record) = inner.opened_records.get(&opaque_record_key) else {
apibail_generic!("record not open");
};
(
opened_record.safety_selection(),
opened_record.writer().cloned(),
)
};
// Use the specified writer, or if not specified, the default writer when the record was opened
let opt_writer = options
.as_ref()
.and_then(|o| o.writer.clone())
.or(opt_writer);
let allow_offline = options
.unwrap_or_default()
.allow_offline
.unwrap_or_default();
// If we don't have a writer then we can't write
let Some(writer) = opt_writer else {
apibail_generic!("value is not writable");
};
// See if the subkey we are modifying has a last known local value
let last_get_result = self
.handle_get_local_value_inner(&mut inner, opaque_record_key.clone(), subkey, true)
.await?;
// Get the descriptor and schema for the key
let Some(descriptor) = last_get_result.opt_descriptor else {
apibail_generic!("must have a descriptor");
};
let schema = descriptor.schema()?;
let mut seq = 0;
// Check if the subkey value already exists
if let Some(last_signed_value_data) = last_get_result.opt_value {
let decrypted =
self.maybe_decrypt_value_data(&record_key, last_signed_value_data.value_data())?;
if decrypted.data() == data
&& last_signed_value_data.value_data().writer() == writer.key()
{
// Data and writer is the same, nothing is changing,
// just return that we set it, but no network activity needs to happen
return Ok(None);
}
// New value is different, increment sequence number
seq = last_signed_value_data.value_data().seq() + 1;
};
// Make new subkey data
let value_data = ValueData::new_with_seq(seq, data, writer.key())?;
let encrypted_value_data = self.maybe_encrypt_value_data(&record_key, &value_data)?;
// Validate with schema
if let Err(e) = self.check_subkey_value_data(
&schema,
descriptor.ref_owner(),
subkey,
&encrypted_value_data,
) {
veilid_log!(self debug "schema validation error: {}", e);
// Validation failed, ignore this value
apibail_generic!(format!(
"failed schema validation: {}:{}",
record_key, subkey
));
}
// Sign the new value data with the writer
let signed_value_data = Arc::new(SignedValueData::make_signature(
encrypted_value_data,
&descriptor.owner(),
subkey,
&vcrypto,
&writer.secret(),
)?);
// Check if we are offline
// This is a race, but an optimization to avoid fanout if it is likely to fail
if !self.dht_is_online() {
if allow_offline == AllowOffline(false) {
apibail_try_again!("offline, try again later");
}
veilid_log!(self debug "Writing subkey offline because we are offline: {}:{} len={}", opaque_record_key, subkey, signed_value_data.value_data().data().len() );
// Add to offline writes to flush
self.add_offline_subkey_write_inner(
&mut inner,
opaque_record_key,
subkey,
safety_selection,
signed_value_data,
);
return Ok(None);
};
// Note that we are writing this subkey in the foreground
// If it appears we are already doing this, then put it to the background/offline queue
let opt_guard =
self.mark_active_subkey_write_inner(&mut inner, opaque_record_key.clone(), subkey);
if opt_guard.is_none() {
if allow_offline == AllowOffline(false) {
apibail_try_again!("offline, try again later");
}
veilid_log!(self debug "Writing subkey offline due to concurrent foreground write: {}:{} len={}", opaque_record_key, subkey, signed_value_data.value_data().data().len() );
// Add to offline writes to flush
self.add_offline_subkey_write_inner(
&mut inner,
opaque_record_key,
subkey,
safety_selection,
signed_value_data,
);
return Ok(None);
}
let guard = opt_guard.unwrap();
// Drop the lock for network access
drop(inner);
veilid_log!(self debug "Writing subkey to the network: {}:{} len={}", opaque_record_key, subkey, signed_value_data.value_data().data().len() );
// Use the safety selection we opened the record with
let res_rx = match self
.outbound_set_value(
opaque_record_key.clone(),
subkey,
safety_selection.clone(),
signed_value_data.clone(),
descriptor,
)
.await
{
Ok(v) => v,
Err(e) => {
// Failed to write, try again later
let mut inner = self.inner.lock().await;
// Remove from active subkey writes
self.unmark_active_subkey_write_inner(&mut inner, guard);
if allow_offline == AllowOffline(true) {
self.add_offline_subkey_write_inner(
&mut inner,
opaque_record_key.clone(),
subkey,
safety_selection,
signed_value_data.clone(),
);
} else {
apibail_try_again!("offline, try again later");
}
if matches!(e, VeilidAPIError::TryAgain { message: _ }) {
return Ok(None);
}
return Err(e);
}
};
let out = if allow_offline == AllowOffline(true) {
// Process one fanout result in the foreground, and if necessary, more in the background
// This trades off possibly having a consensus conflict, which requires watching for ValueChanged
// for lower latency. Can only be done if we are allowing offline processing because
// the network could go down after the first fanout result is processed and before we complete fanout.
self.background_process_set_value_results(
res_rx,
record_key,
subkey,
value_data,
safety_selection,
)
.await
} else {
// Process all fanout results in the foreground.
// Takes longer but ensures the value is fully committed to the network.
self.foreground_process_set_value_results(
res_rx,
record_key,
subkey,
value_data,
safety_selection,
)
.await
};
// Remove active subkey write
let mut inner = self.inner.lock().await;
// Remove from active subkey writes
self.unmark_active_subkey_write_inner(&mut inner, guard);
if matches!(out, Err(VeilidAPIError::TryAgain { message: _ })) {
return Ok(None);
}
out
}
////////////////////////////////////////////////////////////////////////
async fn background_process_set_value_results(
&self,
res_rx: flume::Receiver<VeilidAPIResult<set_value::OutboundSetValueResult>>,
record_key: RecordKey,
subkey: ValueSubkey,
value_data: ValueData,
safety_selection: SafetySelection,
) -> VeilidAPIResult<Option<ValueData>> {
// Wait for the first result
let Ok(result) = res_rx.recv_async().await else {
apibail_internal!("failed to receive results");
};
let result = result?;
let partial = result.fanout_result.kind.is_incomplete();
// Process the returned result
let out = self
.process_outbound_set_value_result(
record_key.clone(),
subkey,
value_data.clone(),
safety_selection.clone(),
result,
)
.await?;
// If there's more to process, do it in the background
if partial {
self.process_deferred_outbound_set_value_result(
res_rx,
record_key,
subkey,
value_data,
safety_selection,
);
}
Ok(out)
}
async fn foreground_process_set_value_results(
&self,
res_rx: flume::Receiver<VeilidAPIResult<set_value::OutboundSetValueResult>>,
record_key: RecordKey,
subkey: ValueSubkey,
value_data: ValueData,
safety_selection: SafetySelection,
) -> VeilidAPIResult<Option<ValueData>> {
let Some(stop_token) = self.startup_lock.stop_token() else {
apibail_not_initialized!();
};
loop {
let timeout_res = res_rx.recv_async().timeout_at(stop_token.clone()).await;
let Ok(res) = timeout_res else {
apibail_not_initialized!();
};
let Ok(result) = res else {
apibail_internal!("failed to receive results");
};
let result = result?;
let is_incomplete = result.fanout_result.kind.is_incomplete();
let opt_value_data = self
.process_outbound_set_value_result(
record_key.clone(),
subkey,
value_data.clone(),
safety_selection.clone(),
result,
)
.await?;
if !is_incomplete {
return Ok(opt_value_data);
}
}
}
/// Perform a 'set value' query on the network
/// Performs the work without a transaction
#[instrument(level = "trace", target = "dht", skip_all, err)]
pub(super) async fn outbound_set_value(
&self,
@ -120,6 +429,7 @@ impl StorageManager {
Destination::direct(next_node.routing_domain_filtered(routing_domain))
.with_safety(safety_selection.clone()),
opaque_record_key.clone(),
None,
subkey,
(*value).clone(),
(*descriptor).clone(),
@ -450,6 +760,7 @@ impl StorageManager {
pub async fn inbound_set_value(
&self,
opaque_record_key: OpaqueRecordKey,
transaction_id: Option<u64>,
subkey: ValueSubkey,
value: Arc<SignedValueData>,
descriptor: Option<Arc<SignedValueDescriptor>>,
@ -457,27 +768,14 @@ impl StorageManager {
) -> VeilidAPIResult<NetworkResult<InboundSetValueResult>> {
let mut inner = self.inner.lock().await;
// See if this is a remote or local value
let (is_local, last_get_result) = {
// See if the subkey we are modifying has a last known local value
let last_get_result = self
.handle_get_local_value_inner(&mut inner, opaque_record_key.clone(), subkey, true)
.await?;
// If this is local, it must have a descriptor already
if last_get_result.opt_descriptor.is_some() {
(true, last_get_result)
} else {
// See if the subkey we are modifying has a last known remote value
let last_get_result = Self::handle_get_remote_value_inner(
&mut inner,
opaque_record_key.clone(),
subkey,
true,
)
.await?;
(false, last_get_result)
}
};
// See if the subkey we are modifying has a last known remote value
let last_get_result = Self::handle_get_remote_value_inner(
&mut inner,
opaque_record_key.clone(),
subkey,
true,
)
.await?;
// Make sure this value would actually be newer
if let Some(last_value) = &last_get_result.opt_value {
@ -543,26 +841,15 @@ impl StorageManager {
}
// Do the set and return no new value
let res = if is_local {
self.handle_set_local_value_inner(
&mut inner,
opaque_record_key.clone(),
subkey,
value,
InboundWatchUpdateMode::ExcludeTarget(target),
)
.await
} else {
Self::handle_set_remote_value_inner(
&mut inner,
opaque_record_key.clone(),
subkey,
value,
actual_descriptor,
InboundWatchUpdateMode::ExcludeTarget(target),
)
.await
};
let res = Self::handle_set_remote_value_inner(
&mut inner,
opaque_record_key.clone(),
subkey,
value,
actual_descriptor,
InboundWatchUpdateMode::ExcludeTarget(target),
)
.await;
match res {
Ok(()) => {}
Err(VeilidAPIError::Internal { message }) => {

View file

@ -66,4 +66,29 @@ impl StorageManager {
Ok(())
}
// Send single value change out to the network
#[instrument(level = "trace", target = "stor", skip(self), err)]
async fn send_value_change(&self, vc: ValueChangedInfo) -> VeilidAPIResult<()> {
if !self.dht_is_online() {
apibail_try_again!("network is not available");
};
let rpc_processor = self.rpc_processor();
let dest = rpc_processor
.resolve_target_to_destination(
vc.target.clone(),
SafetySelection::Unsafe(Sequencing::PreferOrdered),
)
.await
.map_err(VeilidAPIError::from)?;
network_result_value_or_log!(self rpc_processor
.rpc_call_value_changed(dest, vc.record_key.clone(), vc.subkeys.clone(), vc.count, vc.watch_id, vc.value.map(|v| (*v).clone()) )
.await
.map_err(VeilidAPIError::from)? => [format!(": dest={:?} vc={:?}", dest, vc)] {});
Ok(())
}
}

View file

@ -0,0 +1,417 @@
use super::*;
impl_veilid_log_facility!("stor");
/// All of the transaction ids and state for an outbound transaction on a single record
#[derive(Debug, Clone)]
pub(super) struct OutboundTransactionState {
per_node_state: HashMap<NodeId, PerNodeOutboundTransactionState>,
}
/// The transaction state per node
#[derive(Debug, Clone)]
pub(super) struct PerNodeOutboundTransactionState {
/// The transaction id from the BEGIN operation
xid: u64
}
// /// The fully parsed descriptor
// struct DescriptorInfo {
// /// The descriptor itself
// descriptor: Arc<SignedValueDescriptor>,
// /// The in-schema subkeys that overlap the inspected range
// subkeys: ValueSubkeyRangeSet,
// }
// impl DescriptorInfo {
// pub fn new(
// descriptor: Arc<SignedValueDescriptor>,
// subkeys: &ValueSubkeyRangeSet,
// ) -> VeilidAPIResult<Self> {
// let schema = descriptor.schema().map_err(RPCError::invalid_format)?;
// let subkeys = schema.truncate_subkeys(subkeys, Some(MAX_INSPECT_VALUE_A_SEQS_LEN));
// Ok(Self {
// descriptor,
// subkeys,
// })
// }
// // }
// /// Info tracked per subkey
// struct SubkeySeqCount {
// /// The newest sequence number found for a subkey
// pub seq: Option<ValueSeqNum>,
// /// The set of nodes that had the most recent value for this subkey
// pub consensus_nodes: Vec<NodeRef>,
// /// The set of nodes that had any value for this subkey
// pub value_nodes: Vec<NodeRef>,
// }
/// The context of the outbound_transact_value operation
struct OutboundTransactValueContext {
// /// The combined sequence numbers and result counts so far
// pub seqcounts: Vec<SubkeySeqCount>,
// /// The descriptor if we got a fresh one or empty if no descriptor was needed
// pub opt_descriptor_info: Option<DescriptorInfo>,
}
/// The result of the outbound_transact_value operation
#[derive(Debug, Clone)]
pub(super) struct OutboundTransactValueBeginResult {
/// Fanout results for each subkey
pub fanout_result: FanoutResult,
/// The transactions that were retrieved
pub transact_result: TransactResult,
}
/// The result of the inbound_transact_value operation
#[derive(Clone, Debug)]
pub(crate) enum InboundTransactValueResult {
/// Value transacted successfully
Success(TransactResult),
/// Invalid transaction id
InvalidTransaction,
}
impl StorageManager {
/// Perform a begin transact value query on the network for a single record
/// This routine uses fanout and stores the fanout result and individual transaction ids in xxxx
#[instrument(level = "trace", target = "dht", skip_all, err)]
pub(super) async fn outbound_begin_transact_value(
&self,
opaque_record_key: OpaqueRecordKey,
descriptor: Option<SignedValueDescriptor,
safety_selection: SafetySelection,
) -> VeilidAPIResult<OutboundTransactValueResult> {
let routing_domain = RoutingDomain::PublicInternet;
let requested_subkeys = subkeys.clone();
// Get the DHT parameters for 'TransactValue'
let config = self.config();
let (key_count, consensus_count, fanout, timeout_us) = (
config.network.dht.max_find_node_count as usize,
config.network.dht.set_value_count as usize,
config.network.dht.set_value_fanout as usize,
TimestampDuration::from(ms_to_us(config.network.dht.set_value_timeout_ms)),
);
// Get the nodes we know are caching this value to seed the fanout
let init_fanout_queue = {
self.get_value_nodes(opaque_record_key.clone())
.await?
.unwrap_or_default()
.into_iter()
.filter(|x| {
x.node_info(routing_domain)
.map(|ni| ni.has_all_capabilities(&[CAP_DHT]))
.unwrap_or_default()
})
.collect()
};
// Make do-inspect-value answer context
let opt_descriptor_info = if let Some(descriptor) = local_inspect_result.opt_descriptor() {
// Get the descriptor info. This also truncates the subkeys list to what can be returned from the network.
Some(DescriptorInfo::new(descriptor, &subkeys)?)
} else {
None
};
let context = Arc::new(Mutex::new(OutboundInspectValueContext {
seqcounts: local_inspect_result
.seqs()
.iter()
.map(|s| SubkeySeqCount {
seq: *s,
consensus_nodes: vec![],
value_nodes: vec![],
})
.collect(),
opt_descriptor_info,
}));
// Routine to call to generate fanout
let call_routine = {
let context = context.clone();
let registry = self.registry();
let opaque_record_key = opaque_record_key.clone();
let safety_selection = safety_selection.clone();
Arc::new(
move |next_node: NodeRef| -> PinBoxFutureStatic<FanoutCallResult> {
let context = context.clone();
let registry = registry.clone();
let opt_descriptor = local_inspect_result.opt_descriptor();
let subkeys = subkeys.clone();
let opaque_record_key = opaque_record_key.clone();
let safety_selection = safety_selection.clone();
Box::pin(async move {
let rpc_processor = registry.rpc_processor();
let iva = match
rpc_processor
.rpc_call_inspect_value(
Destination::direct(next_node.routing_domain_filtered(routing_domain)).with_safety(safety_selection),
opaque_record_key.clone(),
subkeys.clone(),
opt_descriptor.map(|x| (*x).clone()),
)
.await? {
NetworkResult::Timeout => {
return Ok(FanoutCallOutput{peer_info_list: vec![], disposition: FanoutCallDisposition::Timeout});
}
NetworkResult::ServiceUnavailable(_) |
NetworkResult::NoConnection(_) |
NetworkResult::AlreadyExists(_) |
NetworkResult::InvalidMessage(_) => {
return Ok(FanoutCallOutput{peer_info_list: vec![], disposition: FanoutCallDisposition::Invalid});
}
NetworkResult::Value(v) => v
};
let answer = iva.answer;
// Keep the descriptor if we got one. If we had a last_descriptor it will
// already be validated by rpc_call_inspect_value
if let Some(descriptor) = answer.descriptor {
let mut ctx = context.lock();
if ctx.opt_descriptor_info.is_none() {
// Get the descriptor info. This also truncates the subkeys list to what can be returned from the network.
let descriptor_info =
match DescriptorInfo::new(Arc::new(descriptor.clone()), &subkeys) {
Ok(v) => v,
Err(e) => {
veilid_log!(registry debug target:"network_result", "InspectValue returned an invalid descriptor: {}", e);
return Ok(FanoutCallOutput{peer_info_list: vec![], disposition: FanoutCallDisposition::Invalid});
}
};
ctx.opt_descriptor_info = Some(descriptor_info);
}
}
// Keep the value if we got one and it is newer and it passes schema validation
if answer.seqs.is_empty() {
veilid_log!(registry debug target:"network_result", "InspectValue returned no seq, fanout call returned peers {}", answer.peers.len());
return Ok(FanoutCallOutput{peer_info_list: answer.peers, disposition: FanoutCallDisposition::Rejected});
}
veilid_log!(registry debug target:"network_result", "Got seqs back: len={}", answer.seqs.len());
let mut ctx = context.lock();
// Ensure we have a schema and descriptor etc
let Some(descriptor_info) = &ctx.opt_descriptor_info else {
// Got a value but no descriptor for it
// Move to the next node
veilid_log!(registry debug target:"network_result", "InspectValue returned a value with no descriptor invalid descriptor");
return Ok(FanoutCallOutput{peer_info_list: vec![], disposition: FanoutCallDisposition::Invalid});
};
// Get number of subkeys from schema and ensure we are getting the
// right number of sequence numbers betwen that and what we asked for
#[allow(clippy::unnecessary_cast)]
if answer.seqs.len() as u64 != descriptor_info.subkeys.len() as u64 {
// Not the right number of sequence numbers
// Move to the next node
veilid_log!(registry debug target:"network_result", "wrong number of seqs returned {} (wanted {})",
answer.seqs.len(),
descriptor_info.subkeys.len());
return Ok(FanoutCallOutput{peer_info_list: vec![], disposition: FanoutCallDisposition::Invalid});
}
// If we have a prior seqs list, merge in the new seqs
if ctx.seqcounts.is_empty() {
ctx.seqcounts = answer
.seqs
.iter()
.map(|s| SubkeySeqCount {
seq: *s,
// One node has shown us the newest sequence numbers so far
consensus_nodes: vec![next_node.clone()],
value_nodes: vec![next_node.clone()],
})
.collect();
} else {
if ctx.seqcounts.len() != answer.seqs.len() {
veilid_log!(registry debug target:"network_result", "seqs list length should always be equal by now: {} (wanted {})",
answer.seqs.len(),
ctx.seqcounts.len());
return Ok(FanoutCallOutput{peer_info_list: vec![], disposition: FanoutCallDisposition::Invalid});
}
for pair in ctx.seqcounts.iter_mut().zip(answer.seqs.iter()) {
let ctx_seqcnt = pair.0;
let answer_seq = *pair.1;
// If we already have consensus for this subkey, don't bother updating it any more
// While we may find a better sequence number if we keep looking, this does not mimic the behavior
// of get and set unless we stop here
if ctx_seqcnt.consensus_nodes.len() >= consensus_count {
continue;
}
// If the new seq isn't undefined and is better than the old seq (either greater or old is undefined)
// Then take that sequence number and note that we have gotten newer sequence numbers so we keep
// looking for consensus
// If the sequence number matches the old sequence number, then we keep the value node for reference later
if let Some(answer_seq) = answer_seq {
if ctx_seqcnt.seq.is_none() || answer_seq > ctx_seqcnt.seq.unwrap()
{
// One node has shown us the latest sequence numbers so far
ctx_seqcnt.seq = Some(answer_seq);
ctx_seqcnt.consensus_nodes = vec![next_node.clone()];
} else if answer_seq == ctx_seqcnt.seq.unwrap() {
// Keep the nodes that showed us the latest values
ctx_seqcnt.consensus_nodes.push(next_node.clone());
}
}
ctx_seqcnt.value_nodes.push(next_node.clone());
}
}
// Return peers if we have some
veilid_log!(registry debug target:"network_result", "InspectValue fanout call returned peers {}", answer.peers.len());
// Inspect doesn't actually use the fanout queue consensus tracker
Ok(FanoutCallOutput { peer_info_list: answer.peers, disposition: FanoutCallDisposition::Accepted})
}.instrument(tracing::trace_span!("outbound_inspect_value fanout call"))) as PinBoxFuture<FanoutCallResult>
},
)
};
// Routine to call to check if we're done at each step
// For inspect, we are tracking consensus externally from the FanoutCall,
// for each subkey, rather than a single consensus, so the single fanoutresult
// that is passed in here is ignored in favor of our own per-subkey tracking
let check_done = {
let context = context.clone();
Arc::new(move |_: &FanoutResult| {
// If we have reached sufficient consensus on all subkeys, return done
let ctx = context.lock();
let mut has_consensus = true;
for cs in ctx.seqcounts.iter() {
if cs.consensus_nodes.len() < consensus_count {
has_consensus = false;
break;
}
}
!ctx.seqcounts.is_empty() && ctx.opt_descriptor_info.is_some() && has_consensus
})
};
// Call the fanout
let routing_table = self.routing_table();
let fanout_call = FanoutCall::new(
&routing_table,
opaque_record_key.to_hash_coordinate(),
key_count,
fanout,
consensus_count,
timeout_us,
capability_fanout_peer_info_filter(vec![CAP_DHT]),
call_routine,
check_done,
);
let fanout_result = fanout_call.run(init_fanout_queue).await?;
let ctx = context.lock();
let mut subkey_fanout_results = vec![];
for cs in &ctx.seqcounts {
let has_consensus = cs.consensus_nodes.len() >= consensus_count;
let subkey_fanout_result = FanoutResult {
kind: if has_consensus {
FanoutResultKind::Consensus
} else {
fanout_result.kind
},
consensus_nodes: cs.consensus_nodes.clone(),
value_nodes: cs.value_nodes.clone(),
};
subkey_fanout_results.push(subkey_fanout_result);
}
if subkey_fanout_results.len() == 1 {
veilid_log!(self debug "InspectValue Fanout: {:#}\n{:#}", fanout_result, subkey_fanout_results.first().unwrap());
} else {
veilid_log!(self debug "InspectValue Fanout: {:#}:\n{}", fanout_result, debug_fanout_results(&subkey_fanout_results));
}
let result = OutboundInspectValueResult {
subkey_fanout_results,
inspect_result: InspectResult::new(
self,
requested_subkeys,
"outbound_inspect_value",
ctx.opt_descriptor_info
.as_ref()
.map(|d| d.subkeys.clone())
.unwrap_or_default(),
ctx.seqcounts.iter().map(|cs| cs.seq).collect(),
ctx.opt_descriptor_info
.as_ref()
.map(|d| d.descriptor.clone()),
)?,
};
#[allow(clippy::unnecessary_cast)]
{
if result.inspect_result.subkeys().len() as u64
!= result.subkey_fanout_results.len() as u64
{
veilid_log!(self error "mismatch between subkeys returned and fanout results returned: {}!={}", result.inspect_result.subkeys().len(), result.subkey_fanout_results.len());
apibail_internal!("subkey and fanout list length mismatched");
}
}
Ok(result)
}
/// Perform end transaction queries on the network for a single record
#[instrument(level = "trace", target = "dht", skip_all, err)]
pub(super) async fn outbound_end_transact_value(
&self,
opaque_record_key: OpaqueRecordKey,
) -> VeilidAPIResult<OutboundTransactValueResult> {
}
/// Perform commit transaction queries on the network for a single record
#[instrument(level = "trace", target = "dht", skip_all, err)]
pub(super) async fn outbound_commit_transact_value(
&self,
opaque_record_key: OpaqueRecordKey,
) -> VeilidAPIResult<OutboundTransactValueResult> {
}
/// Perform rollback transaction queries on the network for a single record
#[instrument(level = "trace", target = "dht", skip_all, err)]
pub(super) async fn outbound_rollback_transact_value(
&self,
opaque_record_key: OpaqueRecordKey,
) -> VeilidAPIResult<OutboundTransactValueResult> {
}
/// Handle a received 'TransactValue' query
#[instrument(level = "trace", target = "dht", skip_all)]
pub async fn inbound_transact_value(
&self,
opaque_record_key: OpaqueRecordKey,
transaction_id: Option<u64>,
command: TransactValueCommand,
descriptor: Option<SignedValueDescriptor>,
writer: PublicKey,
) -> VeilidAPIResult<NetworkResult<InboundTransactValueResult>> {
let mut inner = self.inner.lock().await;
Ok(NetworkResult::value(InboundTransactValueResult::Success(transact_result)))
}
}

View file

@ -36,7 +36,8 @@ impl SignedValueData {
vcrypto.verify(&writer, value_data_bytes, &self.signature)
} else {
// old approach, use make_signature_bytes()
let value_data_bytes = Self::make_signature_bytes(&self.value_data, owner, subkey)?;
let value_data_bytes =
Self::legacy_make_signature_bytes(&self.value_data, owner, subkey)?;
// validate signature
vcrypto.verify(&writer, &value_data_bytes, &self.signature)
}
@ -58,7 +59,7 @@ impl SignedValueData {
vcrypto.sign(&writer, writer_secret, value_data_bytes)?
} else {
// old approach, use make_signature_bytes()
let value_data_bytes = Self::make_signature_bytes(&value_data, owner, subkey)?;
let value_data_bytes = Self::legacy_make_signature_bytes(&value_data, owner, subkey)?;
// create signature
vcrypto.sign(&writer, writer_secret, &value_data_bytes)?
};
@ -86,7 +87,7 @@ impl SignedValueData {
+ self.value_data.total_size()
}
fn make_signature_bytes(
fn legacy_make_signature_bytes(
value_data: &EncryptedValueData,
owner: &PublicKey,
subkey: ValueSubkey,

View file

@ -43,6 +43,216 @@ impl OutboundWatchValueResult {
}
impl StorageManager {
/// Get the set of nodes in our active watches
pub async fn get_outbound_watch_nodes(&self) -> Vec<Destination> {
let inner = self.inner.lock().await;
let mut out = vec![];
let mut node_set: HashSet<HashAtom<BucketEntry>> = HashSet::new();
for v in inner.outbound_watch_manager.outbound_watches.values() {
if let Some(current) = v.state() {
let node_refs =
current.watch_node_refs(&inner.outbound_watch_manager.per_node_states);
for node_ref in &node_refs {
if node_set.contains(&node_ref.entry().hash_atom()) {
continue;
}
node_set.insert(node_ref.entry().hash_atom());
out.push(
Destination::direct(
node_ref.routing_domain_filtered(RoutingDomain::PublicInternet),
)
.with_safety(current.params().safety_selection.clone()),
)
}
}
}
out
}
/// Create, update or cancel an outbound watch to a DHT value
#[instrument(level = "trace", target = "stor", skip_all)]
pub async fn watch_values(
&self,
record_key: RecordKey,
subkeys: ValueSubkeyRangeSet,
expiration: Timestamp,
count: u32,
) -> VeilidAPIResult<bool> {
let Ok(_guard) = self.startup_lock.enter() else {
apibail_not_initialized!();
};
// Obtain the watch change lock
// (may need to wait for background operations to complete on the watch)
let watch_lock = self.outbound_watch_lock_table.lock_tag(record_key).await;
self.watch_values_inner(watch_lock, subkeys, expiration, count)
.await
}
#[instrument(level = "trace", target = "stor", skip_all)]
pub async fn cancel_watch_values(
&self,
record_key: RecordKey,
subkeys: ValueSubkeyRangeSet,
) -> VeilidAPIResult<bool> {
let Ok(_guard) = self.startup_lock.enter() else {
apibail_not_initialized!();
};
// Obtain the watch change lock
// (may need to wait for background operations to complete on the watch)
let watch_lock = self
.outbound_watch_lock_table
.lock_tag(record_key.clone())
.await;
// Calculate change to existing watch
let (subkeys, count, expiration_ts) = {
let inner = self.inner.lock().await;
let opaque_record_key = record_key.opaque();
let Some(_opened_record) = inner.opened_records.get(&opaque_record_key) else {
apibail_generic!("record not open");
};
// See what watch we have currently if any
let Some(outbound_watch) = inner
.outbound_watch_manager
.outbound_watches
.get(&record_key)
else {
// If we didn't have an active watch, then we can just return false because there's nothing to do here
return Ok(false);
};
// Ensure we have a 'desired' watch state
let Some(desired) = outbound_watch.desired() else {
// If we didn't have a desired watch, then we're already cancelling
let still_active = outbound_watch.state().is_some();
return Ok(still_active);
};
// Rewrite subkey range if empty to full
let subkeys = if subkeys.is_empty() {
ValueSubkeyRangeSet::full()
} else {
subkeys
};
// Reduce the subkey range
let new_subkeys = desired.subkeys.difference(&subkeys);
// If no change is happening return false
if new_subkeys == desired.subkeys {
return Ok(false);
}
// If we have no subkeys left, then set the count to zero to indicate a full cancellation
let count = if new_subkeys.is_empty() {
0
} else if let Some(state) = outbound_watch.state() {
state.remaining_count()
} else {
desired.count
};
(new_subkeys, count, desired.expiration_ts)
};
// Update the watch. This just calls through to the above watch_values_inner() function
// This will update the active_watch so we don't need to do that in this routine.
self.watch_values_inner(watch_lock, subkeys, expiration_ts, count)
.await
}
////////////////////////////////////////////////////////////////////////
#[instrument(level = "trace", target = "stor", skip_all)]
async fn watch_values_inner(
&self,
watch_lock: AsyncTagLockGuard<RecordKey>,
subkeys: ValueSubkeyRangeSet,
expiration: Timestamp,
count: u32,
) -> VeilidAPIResult<bool> {
let record_key = watch_lock.tag();
// Obtain the inner state lock
let mut inner = self.inner.lock().await;
let opaque_record_key = record_key.opaque();
// Get the safety selection and the writer we opened this record
let (safety_selection, opt_watcher) = {
let Some(opened_record) = inner.opened_records.get(&opaque_record_key) else {
// Record must be opened already to change watch
apibail_generic!("record not open");
};
(
opened_record.safety_selection(),
opened_record.writer().cloned(),
)
};
// Rewrite subkey range if empty to full
let subkeys = if subkeys.is_empty() {
ValueSubkeyRangeSet::full()
} else {
subkeys
};
// Get the schema so we can truncate the watch to the number of subkeys
let schema = if let Some(lrs) = inner.local_record_store.as_ref() {
let Some(schema) = lrs.peek_record(&opaque_record_key, |r| r.schema()) else {
apibail_generic!("no local record found");
};
schema
} else {
apibail_not_initialized!();
};
let subkeys = schema.truncate_subkeys(&subkeys, None);
// Calculate desired watch parameters
let desired_params = if count == 0 {
// Cancel
None
} else {
// Get the minimum expiration timestamp we will accept
let rpc_timeout =
TimestampDuration::new_ms(self.config().network.rpc.timeout_ms.into());
let min_expiration_ts = Timestamp::now() + rpc_timeout;
let expiration_ts = if expiration.as_u64() == 0 {
expiration
} else if expiration < min_expiration_ts {
apibail_invalid_argument!("expiration is too soon", "expiration", expiration);
} else {
expiration
};
// Create or modify
Some(OutboundWatchParameters {
expiration_ts,
count,
subkeys,
opt_watcher,
safety_selection,
})
};
// Modify the 'desired' state of the watch or add one if it does not exist
let active = desired_params.is_some();
inner
.outbound_watch_manager
.set_desired_watch(record_key, desired_params);
// Drop the lock for network access
drop(inner);
Ok(active)
}
/// Perform a 'watch value cancel' on the network without fanout
#[instrument(target = "watch", level = "debug", skip_all, err)]
pub(super) async fn outbound_watch_value_cancel(
@ -1021,16 +1231,6 @@ impl StorageManager {
));
}
// Try from local and remote record stores
let Some(local_record_store) = inner.local_record_store.as_mut() else {
apibail_not_initialized!();
};
if local_record_store.contains_record(&opaque_record_key) {
return local_record_store
.watch_record(opaque_record_key, params, watch_id)
.await
.map(NetworkResult::value);
}
let Some(remote_record_store) = inner.remote_record_store.as_mut() else {
apibail_not_initialized!();
};
@ -1040,6 +1240,7 @@ impl StorageManager {
.await
.map(NetworkResult::value);
}
// No record found
Ok(NetworkResult::value(InboundWatchResult::Rejected))
}

View file

@ -11,7 +11,7 @@ use super::*;
pub struct DHTRecordReport {
/// The actual subkey range within the schema being reported on
/// This may be a subset of the requested range if it exceeds the schema limits
/// or has more than 512 subkeys
/// or has more than `DHTSchema::MAX_SUBKEY_COUNT` (1024) subkeys
subkeys: ValueSubkeyRangeSet,
/// The subkeys that have been writen offline that still need to be flushed
offline_subkeys: ValueSubkeyRangeSet,

View file

@ -25,14 +25,7 @@ impl EncryptedValueData {
apibail_generic!("invalid size");
}
let estimated_capacity = 128
+ data.len()
+ writer.ref_value().len()
+ nonce.as_ref().map_or(0, |nonce| nonce.len());
let mut memory = vec![0; 32767 + 4096];
let allocator = capnp::message::SingleSegmentAllocator::new(&mut memory);
let mut message_builder = ::capnp::message::Builder::new(allocator);
let mut message_builder = ::capnp::message::Builder::new_default();
let mut builder = message_builder.init_root::<veilid_capnp::value_data::Builder>();
builder.set_seq(seq);
@ -47,11 +40,11 @@ impl EncryptedValueData {
capnp_encode_nonce(&nonce_val, &mut nb);
}
let mut blob = Vec::with_capacity(estimated_capacity);
capnp::serialize::write_message(&mut blob, &message_builder).unwrap();
let blob = canonical_message_builder_to_vec_unpacked(message_builder)
.map_err(VeilidAPIError::internal)?;
// Ensure the blob could be decoded without errors, allowing to do unwrap() in getter methods
validate_value_data_blob(&blob).map_err(VeilidAPIError::generic)?;
validate_value_data_blob(&blob).map_err(VeilidAPIError::internal)?;
Ok(Self { blob })
}