mirror of
https://gitlab.com/veilid/veilid.git
synced 2025-10-15 12:00:51 -04:00
Implement DHT record encryption
This commit is contained in:
parent
848da0ae4e
commit
285d98a185
84 changed files with 2353 additions and 1077 deletions
|
@ -1,9 +1,10 @@
|
|||
**UNRELEASED**
|
||||
|
||||
- _0.5.0 BREAKING CHANGES_
|
||||
- Rename crypto types:
|
||||
- Many on-the-wire encoding changes: [#453](https://gitlab.com/veilid/veilid/-/issues/453)
|
||||
- Rename crypto types: [#463](https://gitlab.com/veilid/veilid/-/issues/463)
|
||||
- {CryptoBytes} -> Bare{CryptoBytes}
|
||||
- Typed{CryptoBytes} -> {CryptoBytes}
|
||||
- Typed{CryptoBytes} -> {CryptoBytes} [#465](https://gitlab.com/veilid/veilid/-/issues/465)
|
||||
- Handle NotInSchema:
|
||||
- switch match alternative failures and list decode failures to `RPCError::Ignore` from `RPCError::Protocol` to keep from punishing newer nodes
|
||||
- add `.ignore_ok()` trait that makes it easy to 'soft fail' `RPCError::Ignore`
|
||||
|
@ -22,16 +23,19 @@
|
|||
- Eliminated DHTW capability, merged into DHTV capability, now there is only one DHT enabling/disabling capability and all operations are part of it
|
||||
- Crypto / CryptoSystem functions now use typed keys everywhere (#483)
|
||||
- Eliminated 'best' CryptoKind concept, crypto kinds must now be explicitly stated, otherwise upgrades of veilid-core that change the 'best' CryptoKind could break functionality silently.
|
||||
- Encryption is enabled by default for all DHT operations, closes ([#300](https://gitlab.com/veilid/veilid/-/issues/300)) (@neequ57)
|
||||
|
||||
- veilid-core:
|
||||
- Add private route example
|
||||
- Add `require_inbound_relay` option in VeilidConfig. Default is false, but if enabled, forces OutboundOnly/InboundRelay mode. Can be used as an extra layer of IP address obscurity for some threat models. (@neequ57)
|
||||
- Fix crash when peer info has missing or unsupported node ids
|
||||
- Add 'auto' mode for detect_address_changes
|
||||
- Hop counts removed from private routes [#466](https://gitlab.com/veilid/veilid/-/issues/466)
|
||||
- Improved `TypedXXX` conversion traits, including to and from `Vec<u8>`
|
||||
- Ensure utf8 replacement characters are never emitted in logs
|
||||
- Export `CRYPTO_KIND_VLD0` constant
|
||||
- Added SequenceOrdering enum to represent ordering mode for protocols rather than a bool
|
||||
- `RecordKey`s are now validated on both server side and client side of DHT RPC operations, closes ([#299](https://gitlab.com/veilid/veilid/-/issues/299))
|
||||
|
||||
- veilid-python:
|
||||
- Correction of type hints
|
||||
|
|
|
@ -82,10 +82,10 @@ impl CommandProcessor {
|
|||
pub fn set_client_api_connection(&self, capi: ClientApiConnection) {
|
||||
self.inner.lock().capi = Some(capi);
|
||||
}
|
||||
fn inner(&self) -> MutexGuard<CommandProcessorInner> {
|
||||
fn inner(&self) -> MutexGuard<'_, CommandProcessorInner> {
|
||||
self.inner.lock()
|
||||
}
|
||||
fn inner_mut(&self) -> MutexGuard<CommandProcessorInner> {
|
||||
fn inner_mut(&self) -> MutexGuard<'_, CommandProcessorInner> {
|
||||
self.inner.lock()
|
||||
}
|
||||
fn ui_sender(&self) -> Box<dyn UISender> {
|
||||
|
|
|
@ -20,8 +20,10 @@ path = "src/lib.rs"
|
|||
|
||||
[features]
|
||||
|
||||
default = ["default-tokio"]
|
||||
|
||||
# Common features
|
||||
default = ["enable-crypto-vld0", "rt-tokio"]
|
||||
default-tokio = ["enable-crypto-vld0", "rt-tokio"]
|
||||
default-async-std = ["enable-crypto-vld0", "rt-async-std"]
|
||||
default-wasm = ["enable-crypto-vld0"]
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
##############################
|
||||
|
||||
# DHT Record Key
|
||||
struct RecordKey @0x875582886b9407f2 {
|
||||
struct OpaqueRecordKey @0x875582886b9407f2 {
|
||||
kind @0 :CryptoKind;
|
||||
value @1 :Data;
|
||||
}
|
||||
|
@ -284,17 +284,23 @@ struct SubkeyRange @0xeda3078ac0f1ec6b {
|
|||
start @0 :Subkey; # the start of a subkey range
|
||||
end @1 :Subkey; # the end of a subkey range
|
||||
}
|
||||
|
||||
struct SignedValueData @0xbc21055c2442405f {
|
||||
|
||||
struct ValueData @0xacbe86e97ace772a {
|
||||
seq @0 :ValueSeqNum; # sequence number of value
|
||||
data @1 :Data; # value or subvalue contents
|
||||
writer @2 :PublicKey; # the public key of the writer
|
||||
signature @3 :Signature; # signature of data at this subkey, using the writer key (which may be the same as the owner key)
|
||||
nonce @3 :Nonce; # nonce used for `data` encryption
|
||||
}
|
||||
|
||||
struct SignedValueData @0xbc21055c2442405f {
|
||||
valueData @0 :Data; # ValueData serialized to bytes
|
||||
signature @1 :Signature; # signature of data at this subkey, using the writer key (which may be the same as the owner key)
|
||||
# signature covers:
|
||||
# * owner public key
|
||||
# * subkey
|
||||
# * sequence number
|
||||
# * data
|
||||
# * nonce
|
||||
# signature does not need to cover schema because schema is validated upon every set
|
||||
# so the data either fits, or it doesn't.
|
||||
}
|
||||
|
@ -308,7 +314,7 @@ struct SignedValueDescriptor @0xf6ffa63ef36d0f73 {
|
|||
|
||||
|
||||
struct OperationGetValueQ @0x83b34ce1e72afc7f {
|
||||
key @0 :RecordKey; # DHT Key = Hash(ownerKeyKind) of: [ ownerKeyValue, schema ]
|
||||
key @0 :OpaqueRecordKey; # DHT Key = Hash(ownerKeyKind) of: [ ownerKeyValue, schema ]
|
||||
subkey @1 :Subkey; # the index of the subkey
|
||||
wantDescriptor @2 :Bool; # whether or not to include the descriptor for the key
|
||||
}
|
||||
|
@ -321,7 +327,7 @@ struct OperationGetValueA @0xf97edb86a914d093 {
|
|||
}
|
||||
|
||||
struct OperationSetValueQ @0xb315a71cd3f555b3 {
|
||||
key @0 :RecordKey; # DHT Key = Hash(ownerKeyKind) of: [ ownerKeyValue, schema ]
|
||||
key @0 :OpaqueRecordKey; # DHT Key = Hash(ownerKeyKind) of: [ ownerKeyValue, schema ]
|
||||
subkey @1 :Subkey; # the index of the subkey
|
||||
value @2 :SignedValueData; # value or subvalue contents (older or equal seq number gets dropped)
|
||||
descriptor @3 :SignedValueDescriptor; # optional: the descriptor if needed
|
||||
|
@ -334,7 +340,7 @@ struct OperationSetValueA @0xb5ff5b18c0d7b918 {
|
|||
}
|
||||
|
||||
struct OperationWatchValueQ @0xddae6e08cea11e84 {
|
||||
key @0 :RecordKey; # key for value to watch
|
||||
key @0 :OpaqueRecordKey; # key for value to watch
|
||||
subkeys @1 :List(SubkeyRange); # subkey range to watch (up to 512 subranges). An empty range here should not be specified unless cancelling a watch (count=0).
|
||||
expiration @2 :UInt64; # requested timestamp when this watch will expire in usec since epoch (watch can return less, 0 for max)
|
||||
count @3 :UInt32; # requested number of changes to watch for (0 = cancel, 1 = single shot, 2+ = counter, UINT32_MAX = continuous)
|
||||
|
@ -351,7 +357,7 @@ struct OperationWatchValueA @0xaeed4433b1c35108 {
|
|||
}
|
||||
|
||||
struct OperationInspectValueQ @0xe4d014b5a2f6ffaf {
|
||||
key @0 :RecordKey; # DHT Key = Hash(ownerKeyKind) of: [ ownerKeyValue, schema ]
|
||||
key @0 :OpaqueRecordKey; # DHT Key = Hash(ownerKeyKind) of: [ ownerKeyValue, schema ]
|
||||
subkeys @1 :List(SubkeyRange); # subkey range to inspect (up to 512 total subkeys), if empty this implies 0..=511
|
||||
wantDescriptor @2 :Bool; # whether or not to include the descriptor for the key
|
||||
}
|
||||
|
@ -363,7 +369,7 @@ struct OperationInspectValueA @0x8540edb633391b2a {
|
|||
}
|
||||
|
||||
struct OperationValueChanged @0xbf9d00e88fd96623 {
|
||||
key @0 :RecordKey; # key for value that changed
|
||||
key @0 :OpaqueRecordKey; # key for value that changed
|
||||
subkeys @1 :List(SubkeyRange); # subkey range that changed (up to 512 ranges at a time, if empty this is a watch expiration notice)
|
||||
count @2 :UInt32; # remaining changes left (0 means watch has expired)
|
||||
watchId @3 :UInt64; # watch id this value change came from
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -19,7 +19,7 @@ pub fn fix_keypairs() -> Vec<KeyPair> {
|
|||
]
|
||||
}
|
||||
|
||||
#[expect(dead_code)]
|
||||
#[allow(dead_code)]
|
||||
pub fn fix_keypair() -> KeyPair {
|
||||
fix_keypairs()[0].clone()
|
||||
}
|
||||
|
@ -76,7 +76,12 @@ pub fn fix_fake_bare_public_key() -> BarePublicKey {
|
|||
pub fn fix_fake_bare_record_key() -> BareRecordKey {
|
||||
let mut fake_key = [0u8; VLD0_HASH_DIGEST_LENGTH];
|
||||
random_bytes(&mut fake_key);
|
||||
BareRecordKey::new(&fake_key)
|
||||
let mut fake_encryption_key = [0u8; VLD0_HASH_DIGEST_LENGTH];
|
||||
random_bytes(&mut fake_encryption_key);
|
||||
BareRecordKey::new(
|
||||
BareOpaqueRecordKey::new(&fake_key),
|
||||
Some(BareSharedSecret::new(&fake_encryption_key)),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn fix_fake_bare_route_id() -> BareRouteId {
|
||||
|
|
|
@ -262,7 +262,7 @@ byte_array_type!(pub BareHashDigest);
|
|||
// Untyped shared secret (variable length)
|
||||
byte_array_type!(pub BareSharedSecret);
|
||||
// Untyped record key (hashed to 32 bytes)
|
||||
byte_array_type!(pub BareRecordKey);
|
||||
byte_array_type!(pub BareOpaqueRecordKey);
|
||||
// Untyped route id (hashed to 32 bytes)
|
||||
byte_array_type!(pub BareRouteId);
|
||||
// Untyped node id (hashed to 32 bytes)
|
||||
|
|
|
@ -46,9 +46,11 @@ mod byte_array_types;
|
|||
mod crypto_typed;
|
||||
mod crypto_typed_group;
|
||||
mod keypair;
|
||||
mod record_key;
|
||||
|
||||
pub use byte_array_types::*;
|
||||
pub use keypair::*;
|
||||
pub use record_key::*;
|
||||
|
||||
macro_rules! impl_crypto_typed_and_group {
|
||||
($visibility:vis $name:ident) => {
|
||||
|
@ -73,13 +75,14 @@ impl_crypto_typed_and_group_and_vec!(pub SecretKey);
|
|||
impl_crypto_typed_and_group_and_vec!(pub Signature);
|
||||
impl_crypto_typed_and_group_and_vec!(pub SharedSecret);
|
||||
impl_crypto_typed_and_group_and_vec!(pub HashDigest);
|
||||
impl_crypto_typed_and_group_and_vec!(pub RecordKey);
|
||||
impl_crypto_typed_and_group_and_vec!(pub OpaqueRecordKey);
|
||||
impl_crypto_typed_and_group_and_vec!(pub NodeId);
|
||||
impl_crypto_typed_and_group_and_vec!(pub RouteId);
|
||||
impl_crypto_typed_and_group_and_vec!(pub MemberId);
|
||||
|
||||
// No vector representation
|
||||
impl_crypto_typed_and_group!(pub KeyPair);
|
||||
impl_crypto_typed_and_group!(pub RecordKey);
|
||||
|
||||
// Internal types
|
||||
impl_crypto_typed!(pub(crate) HashCoordinate);
|
||||
|
|
182
veilid-core/src/crypto/types/record_key.rs
Normal file
182
veilid-core/src/crypto/types/record_key.rs
Normal file
|
@ -0,0 +1,182 @@
|
|||
use super::*;
|
||||
|
||||
#[cfg_attr(
|
||||
all(target_arch = "wasm32", target_os = "unknown"),
|
||||
derive(Tsify),
|
||||
tsify(from_wasm_abi, into_wasm_abi)
|
||||
)]
|
||||
#[derive(Clone, Default, PartialOrd, Ord, PartialEq, Eq, Hash)]
|
||||
#[must_use]
|
||||
pub struct BareRecordKey {
|
||||
key: BareOpaqueRecordKey,
|
||||
encryption_key: Option<BareSharedSecret>,
|
||||
}
|
||||
|
||||
impl BareRecordKey {
|
||||
pub fn new(key: BareOpaqueRecordKey, encryption_key: Option<BareSharedSecret>) -> Self {
|
||||
Self {
|
||||
key,
|
||||
encryption_key,
|
||||
}
|
||||
}
|
||||
pub fn ref_key(&self) -> &BareOpaqueRecordKey {
|
||||
&self.key
|
||||
}
|
||||
pub fn ref_encryption_key(&self) -> Option<&BareSharedSecret> {
|
||||
self.encryption_key.as_ref()
|
||||
}
|
||||
pub fn split(&self) -> (BareOpaqueRecordKey, Option<BareSharedSecret>) {
|
||||
(self.key.clone(), self.encryption_key.clone())
|
||||
}
|
||||
pub fn into_split(self) -> (BareOpaqueRecordKey, Option<BareSharedSecret>) {
|
||||
(self.key, self.encryption_key)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), wasm_bindgen)]
|
||||
#[allow(dead_code)]
|
||||
impl BareRecordKey {
|
||||
pub fn key(&self) -> BareOpaqueRecordKey {
|
||||
self.key.clone()
|
||||
}
|
||||
pub fn encryption_key(&self) -> Option<BareSharedSecret> {
|
||||
self.encryption_key.clone()
|
||||
}
|
||||
pub fn encode(&self) -> String {
|
||||
if let Some(encryption_key) = &self.encryption_key {
|
||||
format!("{}:{}", self.key.encode(), encryption_key.encode())
|
||||
} else {
|
||||
self.key.encode()
|
||||
}
|
||||
}
|
||||
pub fn encoded_len(&self) -> usize {
|
||||
if let Some(encryption_key) = &self.encryption_key {
|
||||
self.key.encoded_len() + 1 + encryption_key.encoded_len()
|
||||
} else {
|
||||
self.key.encoded_len()
|
||||
}
|
||||
}
|
||||
pub fn try_decode(input: &str) -> VeilidAPIResult<Self> {
|
||||
let b = input.as_bytes();
|
||||
Self::try_decode_bytes(b)
|
||||
}
|
||||
pub fn try_decode_bytes(b: &[u8]) -> VeilidAPIResult<Self> {
|
||||
let parts: Vec<_> = b.split(|x| *x == b':').collect();
|
||||
match parts[..] {
|
||||
[key] => {
|
||||
let key = BareOpaqueRecordKey::try_decode_bytes(key)?;
|
||||
Ok(BareRecordKey {
|
||||
key,
|
||||
encryption_key: None,
|
||||
})
|
||||
}
|
||||
[key, encryption_key] => {
|
||||
let key = BareOpaqueRecordKey::try_decode_bytes(key)?;
|
||||
let encryption_key = BareSharedSecret::try_decode_bytes(encryption_key)?;
|
||||
Ok(BareRecordKey {
|
||||
key,
|
||||
encryption_key: Some(encryption_key),
|
||||
})
|
||||
}
|
||||
_ => {
|
||||
apibail_parse_error!(
|
||||
"input has incorrect parts",
|
||||
format!("parts={}", parts.len())
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for BareRecordKey {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.encode())
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for BareRecordKey {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "BareRecordKey({})", self.encode())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&BareRecordKey> for String {
|
||||
fn from(value: &BareRecordKey) -> Self {
|
||||
value.encode()
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for BareRecordKey {
|
||||
type Err = VeilidAPIError;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
BareRecordKey::try_from(s)
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<String> for BareRecordKey {
|
||||
type Error = VeilidAPIError;
|
||||
fn try_from(value: String) -> Result<Self, Self::Error> {
|
||||
BareRecordKey::try_from(value.as_str())
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<&str> for BareRecordKey {
|
||||
type Error = VeilidAPIError;
|
||||
fn try_from(value: &str) -> Result<Self, Self::Error> {
|
||||
Self::try_decode(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl serde::Serialize for BareRecordKey {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
let s = self.encode();
|
||||
serde::Serialize::serialize(&s, serializer)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> serde::Deserialize<'de> for BareRecordKey {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
let s = <String as serde::Deserialize>::deserialize(deserializer)?;
|
||||
if s.is_empty() {
|
||||
return Ok(BareRecordKey::default());
|
||||
}
|
||||
BareRecordKey::try_decode(s.as_str()).map_err(serde::de::Error::custom)
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
impl RecordKey {
|
||||
pub fn opaque(&self) -> OpaqueRecordKey {
|
||||
OpaqueRecordKey::new(self.kind, self.ref_value().key())
|
||||
}
|
||||
pub fn into_split(self) -> (OpaqueRecordKey, Option<SharedSecret>) {
|
||||
let kind = self.kind;
|
||||
let (bork, bss) = self.into_value().into_split();
|
||||
(
|
||||
OpaqueRecordKey::new(kind, bork),
|
||||
bss.map(|x| SharedSecret::new(kind, x)),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), wasm_bindgen)]
|
||||
#[allow(dead_code)]
|
||||
impl RecordKey {
|
||||
#[cfg_attr(
|
||||
all(target_arch = "wasm32", target_os = "unknown"),
|
||||
wasm_bindgen(getter)
|
||||
)]
|
||||
pub fn encryption_key(&self) -> Option<SharedSecret> {
|
||||
self.ref_value()
|
||||
.encryption_key()
|
||||
.map(|v| SharedSecret::new(self.kind, v.clone()))
|
||||
}
|
||||
}
|
|
@ -414,9 +414,9 @@ impl BootstrapRecord {
|
|||
let mut validated = false;
|
||||
for key in signing_keys {
|
||||
if let Some(valid_keys) = network_manager.crypto().verify_signatures(
|
||||
&[key.clone()],
|
||||
std::slice::from_ref(key),
|
||||
signed_str.as_bytes(),
|
||||
&[sig.clone()],
|
||||
std::slice::from_ref(&sig),
|
||||
)? {
|
||||
if valid_keys.contains(key) {
|
||||
validated = true;
|
||||
|
|
|
@ -336,9 +336,7 @@ impl ConnectionManager {
|
|||
Err(ConnectionTableAddError::AddressFilter(conn, e)) => {
|
||||
// Connection filtered
|
||||
let desc = conn.flow();
|
||||
let _ = inner
|
||||
.sender
|
||||
.send(ConnectionManagerEvent::Dead(Box::new(conn)));
|
||||
let _ = inner.sender.send(ConnectionManagerEvent::Dead(conn));
|
||||
return Ok(NetworkResult::no_connection_other(format!(
|
||||
"connection filtered: {:?} ({})",
|
||||
desc, e
|
||||
|
@ -348,9 +346,7 @@ impl ConnectionManager {
|
|||
// Connection already exists
|
||||
let desc = conn.flow();
|
||||
veilid_log!(self debug "== Connection already exists: {:?}", conn.debug_print(Timestamp::now()));
|
||||
let _ = inner
|
||||
.sender
|
||||
.send(ConnectionManagerEvent::Dead(Box::new(conn)));
|
||||
let _ = inner.sender.send(ConnectionManagerEvent::Dead(conn));
|
||||
return Ok(NetworkResult::no_connection_other(format!(
|
||||
"connection already exists: {:?}",
|
||||
desc
|
||||
|
@ -360,9 +356,7 @@ impl ConnectionManager {
|
|||
// Connection table is full
|
||||
let desc = conn.flow();
|
||||
veilid_log!(self debug "== Connection table full: {:?}", conn.debug_print(Timestamp::now()));
|
||||
let _ = inner
|
||||
.sender
|
||||
.send(ConnectionManagerEvent::Dead(Box::new(conn)));
|
||||
let _ = inner.sender.send(ConnectionManagerEvent::Dead(conn));
|
||||
return Ok(NetworkResult::no_connection_other(format!(
|
||||
"connection table is full: {:?}",
|
||||
desc
|
||||
|
|
|
@ -12,22 +12,22 @@ const PRIORITY_FLOW_PERCENTAGE: usize = 25;
|
|||
#[derive(ThisError, Debug)]
|
||||
pub enum ConnectionTableAddError {
|
||||
#[error("Connection already added to table")]
|
||||
AlreadyExists(NetworkConnection),
|
||||
AlreadyExists(Box<NetworkConnection>),
|
||||
#[error("Connection address was filtered")]
|
||||
AddressFilter(NetworkConnection, AddressFilterError),
|
||||
AddressFilter(Box<NetworkConnection>, AddressFilterError),
|
||||
#[error("Connection table is full")]
|
||||
TableFull(NetworkConnection),
|
||||
TableFull(Box<NetworkConnection>),
|
||||
}
|
||||
|
||||
impl ConnectionTableAddError {
|
||||
pub fn already_exists(conn: NetworkConnection) -> Self {
|
||||
ConnectionTableAddError::AlreadyExists(conn)
|
||||
ConnectionTableAddError::AlreadyExists(Box::new(conn))
|
||||
}
|
||||
pub fn address_filter(conn: NetworkConnection, err: AddressFilterError) -> Self {
|
||||
ConnectionTableAddError::AddressFilter(conn, err)
|
||||
ConnectionTableAddError::AddressFilter(Box::new(conn), err)
|
||||
}
|
||||
pub fn table_full(conn: NetworkConnection) -> Self {
|
||||
ConnectionTableAddError::TableFull(conn)
|
||||
ConnectionTableAddError::TableFull(Box::new(conn))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1123,7 +1123,7 @@ impl NetworkManager {
|
|||
// Peek at header and see if we need to relay this
|
||||
// If the recipient id is not our node id, then it needs relaying
|
||||
let recipient_id = envelope.get_recipient_id();
|
||||
if !routing_table.matches_own_node_id(&[recipient_id.clone()]) {
|
||||
if !routing_table.matches_own_node_id(std::slice::from_ref(&recipient_id)) {
|
||||
// See if the source node is allowed to resolve nodes
|
||||
// This is a costly operation, so only outbound-relay permitted
|
||||
// nodes are allowed to do this, for example PWA users
|
||||
|
|
|
@ -807,7 +807,10 @@ impl RoutingTableInner {
|
|||
/// Resolve an existing routing table entry and return a reference to it
|
||||
#[instrument(level = "trace", skip_all, err)]
|
||||
pub fn lookup_node_ref(&self, node_id: NodeId) -> EyreResult<Option<NodeRef>> {
|
||||
if self.routing_table().matches_own_node_id(&[node_id.clone()]) {
|
||||
if self
|
||||
.routing_table()
|
||||
.matches_own_node_id(std::slice::from_ref(&node_id))
|
||||
{
|
||||
bail!("can't look up own node id in routing table");
|
||||
}
|
||||
if !VALID_CRYPTO_KINDS.contains(&node_id.kind()) {
|
||||
|
@ -845,7 +848,10 @@ impl RoutingTableInner {
|
|||
where
|
||||
F: FnOnce(Arc<BucketEntry>) -> R,
|
||||
{
|
||||
if self.routing_table().matches_own_node_id(&[node_id.clone()]) {
|
||||
if self
|
||||
.routing_table()
|
||||
.matches_own_node_id(std::slice::from_ref(&node_id))
|
||||
{
|
||||
veilid_log!(self error "can't look up own node id in routing table");
|
||||
return None;
|
||||
}
|
||||
|
|
|
@ -16,12 +16,6 @@ pub struct StateReasonSpan {
|
|||
enter_ts: Timestamp,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub struct StateSpan {
|
||||
state: BucketEntryState,
|
||||
enter_ts: Timestamp,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct StateStatsAccounting {
|
||||
rolling_state_reason_spans: VecDeque<StateReasonSpan>,
|
||||
|
|
|
@ -22,6 +22,17 @@ impl BareNodeId {
|
|||
}
|
||||
}
|
||||
|
||||
impl OpaqueRecordKey {
|
||||
pub(crate) fn to_hash_coordinate(&self) -> HashCoordinate {
|
||||
HashCoordinate::new(self.kind(), self.ref_value().to_bare_hash_coordinate())
|
||||
}
|
||||
}
|
||||
impl BareOpaqueRecordKey {
|
||||
pub(crate) fn to_bare_hash_coordinate(&self) -> BareHashCoordinate {
|
||||
BareHashCoordinate::new(self)
|
||||
}
|
||||
}
|
||||
|
||||
impl RecordKey {
|
||||
pub(crate) fn to_hash_coordinate(&self) -> HashCoordinate {
|
||||
HashCoordinate::new(self.kind(), self.ref_value().to_bare_hash_coordinate())
|
||||
|
@ -29,7 +40,7 @@ impl RecordKey {
|
|||
}
|
||||
impl BareRecordKey {
|
||||
pub(crate) fn to_bare_hash_coordinate(&self) -> BareHashCoordinate {
|
||||
BareHashCoordinate::new(self)
|
||||
BareHashCoordinate::new(self.ref_key())
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -53,8 +53,8 @@ macro_rules! define_untyped_byte_data_coder {
|
|||
};
|
||||
}
|
||||
|
||||
// RecordKey
|
||||
define_typed_byte_data_coder!(record_key, RecordKey);
|
||||
// OpaqueRecordKey
|
||||
define_typed_byte_data_coder!(opaque_record_key, OpaqueRecordKey);
|
||||
// BlockId
|
||||
#[cfg(feature = "unstable-blockstore")]
|
||||
define_typed_byte_data_coder!(block_id, BlockId);
|
||||
|
|
|
@ -3,32 +3,23 @@ use crate::storage_manager::{SignedValueData, SignedValueDescriptor};
|
|||
|
||||
const MAX_GET_VALUE_A_PEERS_LEN: usize = 20;
|
||||
|
||||
#[derive(Clone)]
|
||||
#[derive(Debug, Clone)]
|
||||
pub(in crate::rpc_processor) struct ValidateGetValueContext {
|
||||
pub opaque_record_key: OpaqueRecordKey,
|
||||
pub last_descriptor: Option<SignedValueDescriptor>,
|
||||
pub subkey: ValueSubkey,
|
||||
pub crypto_kind: CryptoKind,
|
||||
}
|
||||
|
||||
impl fmt::Debug for ValidateGetValueContext {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("ValidateGetValueContext")
|
||||
.field("last_descriptor", &self.last_descriptor)
|
||||
.field("subkey", &self.subkey)
|
||||
.field("crypto_kind", &self.crypto_kind)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(in crate::rpc_processor) struct RPCOperationGetValueQ {
|
||||
key: RecordKey,
|
||||
key: OpaqueRecordKey,
|
||||
subkey: ValueSubkey,
|
||||
want_descriptor: bool,
|
||||
}
|
||||
|
||||
impl RPCOperationGetValueQ {
|
||||
pub fn new(key: RecordKey, subkey: ValueSubkey, want_descriptor: bool) -> Self {
|
||||
pub fn new(key: OpaqueRecordKey, subkey: ValueSubkey, want_descriptor: bool) -> Self {
|
||||
Self {
|
||||
key,
|
||||
subkey,
|
||||
|
@ -48,7 +39,7 @@ impl RPCOperationGetValueQ {
|
|||
// pub fn want_descriptor(&self) -> bool {
|
||||
// self.want_descriptor
|
||||
// }
|
||||
pub fn destructure(self) -> (RecordKey, ValueSubkey, bool) {
|
||||
pub fn destructure(self) -> (OpaqueRecordKey, ValueSubkey, bool) {
|
||||
(self.key, self.subkey, self.want_descriptor)
|
||||
}
|
||||
|
||||
|
@ -58,7 +49,7 @@ impl RPCOperationGetValueQ {
|
|||
) -> Result<Self, RPCError> {
|
||||
rpc_ignore_missing_property!(reader, key);
|
||||
let k_reader = reader.get_key()?;
|
||||
let key = decode_record_key(&k_reader)?;
|
||||
let key = decode_opaque_record_key(&k_reader)?;
|
||||
let subkey = reader.get_subkey();
|
||||
let want_descriptor = reader.get_want_descriptor();
|
||||
Ok(Self {
|
||||
|
@ -72,7 +63,7 @@ impl RPCOperationGetValueQ {
|
|||
builder: &mut veilid_capnp::operation_get_value_q::Builder,
|
||||
) -> Result<(), RPCError> {
|
||||
let mut k_builder = builder.reborrow().init_key();
|
||||
encode_record_key(&self.key, &mut k_builder);
|
||||
encode_opaque_record_key(&self.key, &mut k_builder);
|
||||
builder.set_subkey(self.subkey);
|
||||
builder.set_want_descriptor(self.want_descriptor);
|
||||
Ok(())
|
||||
|
@ -123,7 +114,9 @@ impl RPCOperationGetValueA {
|
|||
// Validate descriptor
|
||||
if let Some(descriptor) = &self.descriptor {
|
||||
// Ensure the descriptor itself validates
|
||||
descriptor.validate(&vcrypto).map_err(RPCError::protocol)?;
|
||||
descriptor
|
||||
.validate(&vcrypto, &get_value_context.opaque_record_key)
|
||||
.map_err(RPCError::protocol)?;
|
||||
|
||||
// Ensure descriptor matches last one
|
||||
if let Some(last_descriptor) = &get_value_context.last_descriptor {
|
||||
|
|
|
@ -7,6 +7,7 @@ const MAX_INSPECT_VALUE_A_PEERS_LEN: usize = 20;
|
|||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(in crate::rpc_processor) struct ValidateInspectValueContext {
|
||||
pub opaque_record_key: OpaqueRecordKey,
|
||||
pub last_descriptor: Option<SignedValueDescriptor>,
|
||||
pub subkeys: ValueSubkeyRangeSet,
|
||||
pub crypto_kind: CryptoKind,
|
||||
|
@ -14,14 +15,14 @@ pub(in crate::rpc_processor) struct ValidateInspectValueContext {
|
|||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(in crate::rpc_processor) struct RPCOperationInspectValueQ {
|
||||
key: RecordKey,
|
||||
key: OpaqueRecordKey,
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
want_descriptor: bool,
|
||||
}
|
||||
|
||||
impl RPCOperationInspectValueQ {
|
||||
pub fn new(
|
||||
key: RecordKey,
|
||||
key: OpaqueRecordKey,
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
want_descriptor: bool,
|
||||
) -> Result<Self, RPCError> {
|
||||
|
@ -44,7 +45,7 @@ impl RPCOperationInspectValueQ {
|
|||
// pub fn want_descriptor(&self) -> bool {
|
||||
// self.want_descriptor
|
||||
// }
|
||||
pub fn destructure(self) -> (RecordKey, ValueSubkeyRangeSet, bool) {
|
||||
pub fn destructure(self) -> (OpaqueRecordKey, ValueSubkeyRangeSet, bool) {
|
||||
(self.key, self.subkeys, self.want_descriptor)
|
||||
}
|
||||
|
||||
|
@ -54,7 +55,7 @@ impl RPCOperationInspectValueQ {
|
|||
) -> Result<Self, RPCError> {
|
||||
rpc_ignore_missing_property!(reader, key);
|
||||
let k_reader = reader.get_key()?;
|
||||
let key = decode_record_key(&k_reader)?;
|
||||
let key = decode_opaque_record_key(&k_reader)?;
|
||||
|
||||
rpc_ignore_missing_property!(reader, subkeys);
|
||||
let sk_reader = reader.get_subkeys()?;
|
||||
|
@ -89,7 +90,7 @@ impl RPCOperationInspectValueQ {
|
|||
builder: &mut veilid_capnp::operation_inspect_value_q::Builder,
|
||||
) -> Result<(), RPCError> {
|
||||
let mut k_builder = builder.reborrow().init_key();
|
||||
encode_record_key(&self.key, &mut k_builder);
|
||||
encode_opaque_record_key(&self.key, &mut k_builder);
|
||||
|
||||
let mut sk_builder = builder.reborrow().init_subkeys(
|
||||
self.subkeys
|
||||
|
@ -174,7 +175,9 @@ impl RPCOperationInspectValueA {
|
|||
// Validate descriptor
|
||||
if let Some(descriptor) = &self.descriptor {
|
||||
// Ensure the descriptor itself validates
|
||||
descriptor.validate(&vcrypto).map_err(RPCError::protocol)?;
|
||||
descriptor
|
||||
.validate(&vcrypto, &inspect_value_context.opaque_record_key)
|
||||
.map_err(RPCError::protocol)?;
|
||||
|
||||
// Ensure descriptor matches last one
|
||||
if let Some(last_descriptor) = &inspect_value_context.last_descriptor {
|
||||
|
|
|
@ -3,26 +3,17 @@ use crate::storage_manager::{SignedValueData, SignedValueDescriptor};
|
|||
|
||||
const MAX_SET_VALUE_A_PEERS_LEN: usize = 20;
|
||||
|
||||
#[derive(Clone)]
|
||||
#[derive(Debug, Clone)]
|
||||
pub(in crate::rpc_processor) struct ValidateSetValueContext {
|
||||
pub opaque_record_key: OpaqueRecordKey,
|
||||
pub descriptor: SignedValueDescriptor,
|
||||
pub subkey: ValueSubkey,
|
||||
pub crypto_kind: CryptoKind,
|
||||
}
|
||||
|
||||
impl fmt::Debug for ValidateSetValueContext {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("ValidateSetValueContext")
|
||||
.field("descriptor", &self.descriptor)
|
||||
.field("subkey", &self.subkey)
|
||||
.field("crypto_kind", &self.crypto_kind)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(in crate::rpc_processor) struct RPCOperationSetValueQ {
|
||||
key: RecordKey,
|
||||
key: OpaqueRecordKey,
|
||||
subkey: ValueSubkey,
|
||||
value: SignedValueData,
|
||||
descriptor: Option<SignedValueDescriptor>,
|
||||
|
@ -30,7 +21,7 @@ pub(in crate::rpc_processor) struct RPCOperationSetValueQ {
|
|||
|
||||
impl RPCOperationSetValueQ {
|
||||
pub fn new(
|
||||
key: RecordKey,
|
||||
key: OpaqueRecordKey,
|
||||
subkey: ValueSubkey,
|
||||
value: SignedValueData,
|
||||
descriptor: Option<SignedValueDescriptor>,
|
||||
|
@ -64,7 +55,7 @@ impl RPCOperationSetValueQ {
|
|||
pub fn destructure(
|
||||
self,
|
||||
) -> (
|
||||
RecordKey,
|
||||
OpaqueRecordKey,
|
||||
ValueSubkey,
|
||||
SignedValueData,
|
||||
Option<SignedValueDescriptor>,
|
||||
|
@ -78,7 +69,7 @@ impl RPCOperationSetValueQ {
|
|||
) -> Result<Self, RPCError> {
|
||||
rpc_ignore_missing_property!(reader, key);
|
||||
let k_reader = reader.get_key()?;
|
||||
let key = decode_record_key(&k_reader)?;
|
||||
let key = decode_opaque_record_key(&k_reader)?;
|
||||
|
||||
let subkey = reader.get_subkey();
|
||||
|
||||
|
@ -105,7 +96,7 @@ impl RPCOperationSetValueQ {
|
|||
builder: &mut veilid_capnp::operation_set_value_q::Builder,
|
||||
) -> Result<(), RPCError> {
|
||||
let mut k_builder = builder.reborrow().init_key();
|
||||
encode_record_key(&self.key, &mut k_builder);
|
||||
encode_opaque_record_key(&self.key, &mut k_builder);
|
||||
builder.set_subkey(self.subkey);
|
||||
let mut v_builder = builder.reborrow().init_value();
|
||||
encode_signed_value_data(&self.value, &mut v_builder)?;
|
||||
|
@ -157,7 +148,7 @@ impl RPCOperationSetValueA {
|
|||
// Ensure the descriptor itself validates
|
||||
set_value_context
|
||||
.descriptor
|
||||
.validate(&vcrypto)
|
||||
.validate(&vcrypto, &set_value_context.opaque_record_key)
|
||||
.map_err(RPCError::protocol)?;
|
||||
|
||||
if let Some(value) = &self.value {
|
||||
|
|
|
@ -5,7 +5,7 @@ const MAX_VALUE_CHANGED_SUBKEY_RANGES_LEN: usize = 512;
|
|||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(in crate::rpc_processor) struct RPCOperationValueChanged {
|
||||
key: RecordKey,
|
||||
key: OpaqueRecordKey,
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
count: u32,
|
||||
watch_id: u64,
|
||||
|
@ -14,7 +14,7 @@ pub(in crate::rpc_processor) struct RPCOperationValueChanged {
|
|||
|
||||
impl RPCOperationValueChanged {
|
||||
pub fn new(
|
||||
key: RecordKey,
|
||||
key: OpaqueRecordKey,
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
count: u32,
|
||||
watch_id: u64,
|
||||
|
@ -58,7 +58,7 @@ impl RPCOperationValueChanged {
|
|||
}
|
||||
|
||||
#[expect(dead_code)]
|
||||
pub fn key(&self) -> &RecordKey {
|
||||
pub fn key(&self) -> &OpaqueRecordKey {
|
||||
&self.key
|
||||
}
|
||||
|
||||
|
@ -85,7 +85,7 @@ impl RPCOperationValueChanged {
|
|||
pub fn destructure(
|
||||
self,
|
||||
) -> (
|
||||
RecordKey,
|
||||
OpaqueRecordKey,
|
||||
ValueSubkeyRangeSet,
|
||||
u32,
|
||||
u64,
|
||||
|
@ -106,7 +106,7 @@ impl RPCOperationValueChanged {
|
|||
) -> Result<Self, RPCError> {
|
||||
rpc_ignore_missing_property!(reader, key);
|
||||
let k_reader = reader.get_key()?;
|
||||
let key = decode_record_key(&k_reader)?;
|
||||
let key = decode_opaque_record_key(&k_reader)?;
|
||||
|
||||
rpc_ignore_missing_property!(reader, subkeys);
|
||||
let sk_reader = reader.get_subkeys()?;
|
||||
|
@ -149,7 +149,7 @@ impl RPCOperationValueChanged {
|
|||
builder: &mut veilid_capnp::operation_value_changed::Builder,
|
||||
) -> Result<(), RPCError> {
|
||||
let mut k_builder = builder.reborrow().init_key();
|
||||
encode_record_key(&self.key, &mut k_builder);
|
||||
encode_opaque_record_key(&self.key, &mut k_builder);
|
||||
|
||||
let mut sk_builder = builder.reborrow().init_subkeys(
|
||||
self.subkeys
|
||||
|
|
|
@ -5,7 +5,7 @@ const MAX_WATCH_VALUE_A_PEERS_LEN: usize = 20;
|
|||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(in crate::rpc_processor) struct RPCOperationWatchValueQ {
|
||||
key: RecordKey,
|
||||
key: OpaqueRecordKey,
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
expiration: u64,
|
||||
count: u32,
|
||||
|
@ -16,7 +16,7 @@ pub(in crate::rpc_processor) struct RPCOperationWatchValueQ {
|
|||
|
||||
impl RPCOperationWatchValueQ {
|
||||
pub fn new(
|
||||
key: RecordKey,
|
||||
key: OpaqueRecordKey,
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
expiration: u64,
|
||||
count: u32,
|
||||
|
@ -51,7 +51,7 @@ impl RPCOperationWatchValueQ {
|
|||
|
||||
// signature covers: key, subkeys, expiration, count, using watcher key
|
||||
fn make_signature_data(
|
||||
key: &RecordKey,
|
||||
key: &OpaqueRecordKey,
|
||||
subkeys: &ValueSubkeyRangeSet,
|
||||
expiration: u64,
|
||||
count: u32,
|
||||
|
@ -104,7 +104,7 @@ impl RPCOperationWatchValueQ {
|
|||
}
|
||||
|
||||
#[expect(dead_code)]
|
||||
pub fn key(&self) -> &RecordKey {
|
||||
pub fn key(&self) -> &OpaqueRecordKey {
|
||||
&self.key
|
||||
}
|
||||
|
||||
|
@ -139,7 +139,7 @@ impl RPCOperationWatchValueQ {
|
|||
pub fn destructure(
|
||||
self,
|
||||
) -> (
|
||||
RecordKey,
|
||||
OpaqueRecordKey,
|
||||
ValueSubkeyRangeSet,
|
||||
u64,
|
||||
u32,
|
||||
|
@ -164,7 +164,7 @@ impl RPCOperationWatchValueQ {
|
|||
) -> Result<Self, RPCError> {
|
||||
rpc_ignore_missing_property!(reader, key);
|
||||
let k_reader = reader.get_key()?;
|
||||
let key = decode_record_key(&k_reader)?;
|
||||
let key = decode_opaque_record_key(&k_reader)?;
|
||||
|
||||
rpc_ignore_missing_property!(reader, subkeys);
|
||||
let sk_reader = reader.get_subkeys()?;
|
||||
|
@ -217,7 +217,7 @@ impl RPCOperationWatchValueQ {
|
|||
builder: &mut veilid_capnp::operation_watch_value_q::Builder,
|
||||
) -> Result<(), RPCError> {
|
||||
let mut k_builder = builder.reborrow().init_key();
|
||||
encode_record_key(&self.key, &mut k_builder);
|
||||
encode_opaque_record_key(&self.key, &mut k_builder);
|
||||
|
||||
let mut sk_builder = builder.reborrow().init_subkeys(
|
||||
self.subkeys
|
||||
|
|
|
@ -1,34 +1,122 @@
|
|||
use super::*;
|
||||
use crate::storage_manager::*;
|
||||
|
||||
fn decode_value_data(
|
||||
reader: &veilid_capnp::value_data::Reader,
|
||||
) -> Result<EncryptedValueData, RPCError> {
|
||||
let seq = reader.get_seq();
|
||||
|
||||
rpc_ignore_missing_property!(reader, data);
|
||||
let data = reader.get_data()?.to_vec();
|
||||
|
||||
rpc_ignore_missing_property!(reader, writer);
|
||||
let wr = reader.get_writer()?;
|
||||
let writer = decode_public_key(&wr)?;
|
||||
|
||||
let n = reader.get_nonce()?;
|
||||
let nonce = if n.has_value() {
|
||||
let nonce = decode_nonce(&n)?;
|
||||
if nonce.len() != 24 {
|
||||
return Err(RPCError::protocol("value data nonce has invalid size"));
|
||||
}
|
||||
Some(nonce)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
EncryptedValueData::new_with_seq(seq, data, writer, nonce).map_err(RPCError::protocol)
|
||||
}
|
||||
|
||||
pub fn decode_signed_value_data(
|
||||
reader: &veilid_capnp::signed_value_data::Reader,
|
||||
) -> Result<SignedValueData, RPCError> {
|
||||
let seq = reader.get_seq();
|
||||
rpc_ignore_missing_property!(reader, data);
|
||||
let data = reader.get_data()?.to_vec();
|
||||
rpc_ignore_missing_property!(reader, writer);
|
||||
let wr = reader.get_writer()?;
|
||||
let writer = decode_public_key(&wr)?;
|
||||
rpc_ignore_missing_property!(reader, value_data);
|
||||
let value_data_buf = reader.get_value_data()?;
|
||||
let mut value_data_cursor = &mut &value_data_buf[..];
|
||||
let tmp_reader = capnp::serialize::read_message(
|
||||
&mut value_data_cursor,
|
||||
capnp::message::ReaderOptions::new(),
|
||||
)?;
|
||||
let value_data_reader = tmp_reader.get_root::<veilid_capnp::value_data::Reader>()?;
|
||||
|
||||
let encrypted_value_data = decode_value_data(&value_data_reader)?;
|
||||
|
||||
rpc_ignore_missing_property!(reader, signature);
|
||||
let sr = reader.get_signature()?;
|
||||
let signature = decode_signature(&sr)?;
|
||||
|
||||
Ok(SignedValueData::new(
|
||||
ValueData::new_with_seq(seq, data, writer).map_err(RPCError::protocol)?,
|
||||
signature,
|
||||
))
|
||||
Ok(SignedValueData::new(encrypted_value_data, signature))
|
||||
}
|
||||
|
||||
pub fn encode_signed_value_data(
|
||||
signed_value_data: &SignedValueData,
|
||||
builder: &mut veilid_capnp::signed_value_data::Builder,
|
||||
) -> Result<(), RPCError> {
|
||||
builder.set_seq(signed_value_data.value_data().seq());
|
||||
builder.set_data(signed_value_data.value_data().data());
|
||||
let mut wb = builder.reborrow().init_writer();
|
||||
encode_public_key(signed_value_data.value_data().ref_writer(), &mut wb);
|
||||
let encoded_value_data = signed_value_data.value_data().raw_blob();
|
||||
builder.set_value_data(encoded_value_data);
|
||||
|
||||
let mut sb = builder.reborrow().init_signature();
|
||||
encode_signature(signed_value_data.signature(), &mut sb);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{decode_signed_value_data, encode_signed_value_data};
|
||||
use crate::crypto::tests::fixtures::*;
|
||||
use crate::storage_manager::SignedValueData;
|
||||
use crate::{veilid_capnp, BareSignature, EncryptedValueData, Nonce, Signature};
|
||||
|
||||
#[test]
|
||||
fn test_encode_and_decode_signed_value_data() {
|
||||
let keypair = fix_keypair();
|
||||
let fake_nonce = [0x22; 24];
|
||||
let fake_signature = [0x55; 64];
|
||||
|
||||
let mut message_builder = ::capnp::message::Builder::new_default();
|
||||
let mut builder = message_builder.init_root::<veilid_capnp::signed_value_data::Builder>();
|
||||
|
||||
let signed_value_data = SignedValueData::new(
|
||||
EncryptedValueData::new_with_seq(
|
||||
10,
|
||||
vec![1, 2, 3, 4, 5, 6],
|
||||
keypair.key(),
|
||||
Some(Nonce::new(&fake_nonce)),
|
||||
)
|
||||
.unwrap(),
|
||||
Signature::new(keypair.kind(), BareSignature::new(&fake_signature)),
|
||||
);
|
||||
encode_signed_value_data(&signed_value_data, &mut builder).unwrap();
|
||||
let mut buffer = Vec::with_capacity(32768 + 4096);
|
||||
capnp::serialize_packed::write_message(&mut buffer, &message_builder).unwrap();
|
||||
|
||||
println!("buffer[{}] = {:02x?}", buffer.len(), &buffer);
|
||||
|
||||
let mut value_data_cursor = &mut &buffer[..];
|
||||
let tmp_reader = capnp::serialize_packed::read_message(
|
||||
&mut value_data_cursor,
|
||||
capnp::message::ReaderOptions::new(),
|
||||
)
|
||||
.unwrap();
|
||||
let reader = tmp_reader
|
||||
.get_root::<veilid_capnp::signed_value_data::Reader>()
|
||||
.unwrap();
|
||||
|
||||
let decoded = decode_signed_value_data(&reader).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
signed_value_data.value_data().seq(),
|
||||
decoded.value_data().seq()
|
||||
);
|
||||
assert_eq!(
|
||||
signed_value_data.value_data().data(),
|
||||
decoded.value_data().data()
|
||||
);
|
||||
assert_eq!(
|
||||
signed_value_data.value_data().writer(),
|
||||
decoded.value_data().writer()
|
||||
);
|
||||
assert_eq!(signed_value_data.signature(), decoded.signature());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,6 +27,9 @@ mod rpc_worker;
|
|||
mod sender_info;
|
||||
mod sender_peer_info;
|
||||
|
||||
pub use coders::encode_nonce as capnp_encode_nonce;
|
||||
pub use coders::encode_public_key as capnp_encode_public_key;
|
||||
|
||||
#[cfg(feature = "unstable-blockstore")]
|
||||
mod rpc_find_block;
|
||||
#[cfg(feature = "unstable-blockstore")]
|
||||
|
@ -368,7 +371,7 @@ impl RPCProcessor {
|
|||
let routing_domain = RoutingDomain::PublicInternet;
|
||||
|
||||
// Ignore own node
|
||||
if routing_table.matches_own_node_id(&[node_id.clone()]) {
|
||||
if routing_table.matches_own_node_id(std::slice::from_ref(&node_id)) {
|
||||
return TimeoutOr::Value(Err(RPCError::network("can't search for own node id")));
|
||||
}
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@ impl RPCProcessor {
|
|||
pub async fn rpc_call_get_value(
|
||||
&self,
|
||||
dest: Destination,
|
||||
record_key: RecordKey,
|
||||
opaque_record_key: OpaqueRecordKey,
|
||||
subkey: ValueSubkey,
|
||||
last_descriptor: Option<SignedValueDescriptor>,
|
||||
) -> RPCNetworkResult<Answer<GetValueAnswer>> {
|
||||
|
@ -48,16 +48,16 @@ impl RPCProcessor {
|
|||
|
||||
// Get the target node id
|
||||
let crypto = self.crypto();
|
||||
let Some(vcrypto) = crypto.get(record_key.kind()) else {
|
||||
let Some(vcrypto) = crypto.get(opaque_record_key.kind()) else {
|
||||
return Err(RPCError::internal("unsupported cryptosystem"));
|
||||
};
|
||||
let Some(target_node_id) = target_node_ids.get(record_key.kind()) else {
|
||||
let Some(target_node_id) = target_node_ids.get(opaque_record_key.kind()) else {
|
||||
return Err(RPCError::internal("No node id for crypto kind"));
|
||||
};
|
||||
|
||||
let debug_string = format!(
|
||||
"OUT ==> GetValueQ({} #{}{}) => {}",
|
||||
record_key,
|
||||
opaque_record_key,
|
||||
subkey,
|
||||
if last_descriptor.is_some() {
|
||||
" +lastdesc"
|
||||
|
@ -68,14 +68,18 @@ impl RPCProcessor {
|
|||
);
|
||||
|
||||
// Send the getvalue question
|
||||
let get_value_q =
|
||||
RPCOperationGetValueQ::new(record_key.clone(), subkey, last_descriptor.is_none());
|
||||
let get_value_q = RPCOperationGetValueQ::new(
|
||||
opaque_record_key.clone(),
|
||||
subkey,
|
||||
last_descriptor.is_none(),
|
||||
);
|
||||
let question = RPCQuestion::new(
|
||||
network_result_try!(self.get_destination_respond_to(&dest)?),
|
||||
RPCQuestionDetail::GetValueQ(Box::new(get_value_q)),
|
||||
);
|
||||
|
||||
let question_context = QuestionContext::GetValue(ValidateGetValueContext {
|
||||
opaque_record_key: opaque_record_key.clone(),
|
||||
last_descriptor,
|
||||
subkey,
|
||||
crypto_kind: vcrypto.kind(),
|
||||
|
@ -123,7 +127,7 @@ impl RPCProcessor {
|
|||
|
||||
let debug_string_answer = format!(
|
||||
"OUT <== GetValueA({} #{}{}{} peers={}) <= {}",
|
||||
record_key,
|
||||
opaque_record_key,
|
||||
subkey,
|
||||
debug_string_value,
|
||||
if descriptor.is_some() { " +desc" } else { "" },
|
||||
|
@ -135,7 +139,11 @@ impl RPCProcessor {
|
|||
|
||||
let peer_ids: Vec<String> = peers
|
||||
.iter()
|
||||
.filter_map(|p| p.node_ids().get(record_key.kind()).map(|k| k.to_string()))
|
||||
.filter_map(|p| {
|
||||
p.node_ids()
|
||||
.get(opaque_record_key.kind())
|
||||
.map(|k| k.to_string())
|
||||
})
|
||||
.collect();
|
||||
veilid_log!(self debug target: "dht", "Peers: {:#?}", peer_ids);
|
||||
}
|
||||
|
@ -143,7 +151,7 @@ impl RPCProcessor {
|
|||
// Validate peers returned are, in fact, closer to the key than the node we sent this to
|
||||
let valid = match self.routing_table().verify_peers_closer(
|
||||
target_node_id.to_hash_coordinate(),
|
||||
record_key.to_hash_coordinate(),
|
||||
opaque_record_key.to_hash_coordinate(),
|
||||
&peers,
|
||||
) {
|
||||
Ok(v) => v,
|
||||
|
@ -219,20 +227,20 @@ impl RPCProcessor {
|
|||
};
|
||||
|
||||
// Destructure
|
||||
let (record_key, subkey, want_descriptor) = get_value_q.destructure();
|
||||
let (opaque_record_key, subkey, want_descriptor) = get_value_q.destructure();
|
||||
|
||||
// Get the nodes that we know about that are closer to the the key than our own node
|
||||
let closer_to_key_peers = network_result_try!(routing_table
|
||||
.find_preferred_peers_closer_to_key(
|
||||
routing_domain,
|
||||
record_key.to_hash_coordinate(),
|
||||
opaque_record_key.to_hash_coordinate(),
|
||||
vec![CAP_DHT]
|
||||
));
|
||||
|
||||
if debug_target_enabled!("dht") {
|
||||
let debug_string = format!(
|
||||
"IN <=== GetValueQ({} #{}{}) <== {}",
|
||||
record_key,
|
||||
opaque_record_key,
|
||||
subkey,
|
||||
if want_descriptor { " +wantdesc" } else { "" },
|
||||
msg.header.direct_sender_node_id()
|
||||
|
@ -255,7 +263,7 @@ impl RPCProcessor {
|
|||
// See if we have this record ourselves
|
||||
let storage_manager = self.storage_manager();
|
||||
let get_result = network_result_try!(storage_manager
|
||||
.inbound_get_value(record_key.clone(), subkey, want_descriptor)
|
||||
.inbound_get_value(opaque_record_key.clone(), subkey, want_descriptor)
|
||||
.await
|
||||
.map_err(RPCError::internal)?);
|
||||
(get_result.opt_value, get_result.opt_descriptor)
|
||||
|
@ -276,7 +284,7 @@ impl RPCProcessor {
|
|||
|
||||
let debug_string_answer = format!(
|
||||
"IN ===> GetValueA({} #{}{}{} peers={}) ==> {}",
|
||||
record_key,
|
||||
opaque_record_key,
|
||||
subkey,
|
||||
debug_string_value,
|
||||
if get_result_descriptor.is_some() {
|
||||
|
|
|
@ -30,7 +30,7 @@ impl RPCProcessor {
|
|||
pub async fn rpc_call_inspect_value(
|
||||
&self,
|
||||
dest: Destination,
|
||||
record_key: RecordKey,
|
||||
opaque_record_key: OpaqueRecordKey,
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
last_descriptor: Option<SignedValueDescriptor>,
|
||||
) -> RPCNetworkResult<Answer<InspectValueAnswer>> {
|
||||
|
@ -50,16 +50,16 @@ impl RPCProcessor {
|
|||
|
||||
// Get the target node id
|
||||
let crypto = self.crypto();
|
||||
let Some(vcrypto) = crypto.get(record_key.kind()) else {
|
||||
let Some(vcrypto) = crypto.get(opaque_record_key.kind()) else {
|
||||
return Err(RPCError::internal("unsupported cryptosystem"));
|
||||
};
|
||||
let Some(target_node_id) = target_node_ids.get(record_key.kind()) else {
|
||||
let Some(target_node_id) = target_node_ids.get(opaque_record_key.kind()) else {
|
||||
return Err(RPCError::internal("No node id for crypto kind"));
|
||||
};
|
||||
|
||||
let debug_string = format!(
|
||||
"OUT ==> InspectValueQ({} #{}{}) => {}",
|
||||
record_key,
|
||||
opaque_record_key,
|
||||
&subkeys,
|
||||
if last_descriptor.is_some() {
|
||||
" +lastdesc"
|
||||
|
@ -71,7 +71,7 @@ impl RPCProcessor {
|
|||
|
||||
// Send the inspectvalue question
|
||||
let inspect_value_q = RPCOperationInspectValueQ::new(
|
||||
record_key.clone(),
|
||||
opaque_record_key.clone(),
|
||||
subkeys.clone(),
|
||||
last_descriptor.is_none(),
|
||||
)?;
|
||||
|
@ -81,6 +81,7 @@ impl RPCProcessor {
|
|||
);
|
||||
|
||||
let question_context = QuestionContext::InspectValue(ValidateInspectValueContext {
|
||||
opaque_record_key: opaque_record_key.clone(),
|
||||
last_descriptor,
|
||||
subkeys,
|
||||
crypto_kind: vcrypto.kind(),
|
||||
|
@ -121,7 +122,7 @@ impl RPCProcessor {
|
|||
if debug_target_enabled!("dht") {
|
||||
let debug_string_answer = format!(
|
||||
"OUT <== InspectValueA({} {} peers={}) <= {} seqs:\n{}",
|
||||
record_key,
|
||||
opaque_record_key,
|
||||
if descriptor.is_some() { " +desc" } else { "" },
|
||||
peers.len(),
|
||||
dest,
|
||||
|
@ -132,7 +133,11 @@ impl RPCProcessor {
|
|||
|
||||
let peer_ids: Vec<String> = peers
|
||||
.iter()
|
||||
.filter_map(|p| p.node_ids().get(record_key.kind()).map(|k| k.to_string()))
|
||||
.filter_map(|p| {
|
||||
p.node_ids()
|
||||
.get(opaque_record_key.kind())
|
||||
.map(|k| k.to_string())
|
||||
})
|
||||
.collect();
|
||||
veilid_log!(self debug target: "dht", "Peers: {:#?}", peer_ids);
|
||||
}
|
||||
|
@ -140,7 +145,7 @@ impl RPCProcessor {
|
|||
// Validate peers returned are, in fact, closer to the key than the node we sent this to
|
||||
let valid = match self.routing_table().verify_peers_closer(
|
||||
target_node_id.to_hash_coordinate(),
|
||||
record_key.to_hash_coordinate(),
|
||||
opaque_record_key.to_hash_coordinate(),
|
||||
&peers,
|
||||
) {
|
||||
Ok(v) => v,
|
||||
|
@ -207,20 +212,20 @@ impl RPCProcessor {
|
|||
};
|
||||
|
||||
// Destructure
|
||||
let (record_key, subkeys, want_descriptor) = inspect_value_q.destructure();
|
||||
let (opaque_record_key, subkeys, want_descriptor) = inspect_value_q.destructure();
|
||||
|
||||
// Get the nodes that we know about that are closer to the the key than our own node
|
||||
let closer_to_key_peers = network_result_try!(routing_table
|
||||
.find_preferred_peers_closer_to_key(
|
||||
routing_domain,
|
||||
record_key.to_hash_coordinate(),
|
||||
opaque_record_key.to_hash_coordinate(),
|
||||
vec![CAP_DHT]
|
||||
));
|
||||
|
||||
if debug_target_enabled!("dht") {
|
||||
let debug_string = format!(
|
||||
"IN <=== InspectValueQ({} {}{}) <== {}",
|
||||
record_key,
|
||||
opaque_record_key,
|
||||
subkeys,
|
||||
if want_descriptor { " +wantdesc" } else { "" },
|
||||
msg.header.direct_sender_node_id()
|
||||
|
@ -244,7 +249,7 @@ impl RPCProcessor {
|
|||
// See if we have this record ourselves
|
||||
let storage_manager = self.storage_manager();
|
||||
let inspect_result = network_result_try!(storage_manager
|
||||
.inbound_inspect_value(record_key.clone(), subkeys, want_descriptor)
|
||||
.inbound_inspect_value(opaque_record_key.clone(), subkeys, want_descriptor)
|
||||
.await
|
||||
.map_err(RPCError::internal)?);
|
||||
(
|
||||
|
@ -260,7 +265,7 @@ impl RPCProcessor {
|
|||
if debug_target_enabled!("dht") {
|
||||
let debug_string_answer = format!(
|
||||
"IN ===> InspectValueA({} {:?}{} peers={}) ==> {}",
|
||||
record_key,
|
||||
opaque_record_key,
|
||||
inspect_result_seqs,
|
||||
if inspect_result_descriptor.is_some() {
|
||||
" +desc"
|
||||
|
|
|
@ -30,7 +30,7 @@ impl RPCProcessor {
|
|||
pub async fn rpc_call_set_value(
|
||||
&self,
|
||||
dest: Destination,
|
||||
record_key: RecordKey,
|
||||
opaque_record_key: OpaqueRecordKey,
|
||||
subkey: ValueSubkey,
|
||||
value: SignedValueData,
|
||||
descriptor: SignedValueDescriptor,
|
||||
|
@ -52,16 +52,16 @@ impl RPCProcessor {
|
|||
|
||||
// Get the target node id
|
||||
let crypto = self.crypto();
|
||||
let Some(vcrypto) = crypto.get(record_key.kind()) else {
|
||||
let Some(vcrypto) = crypto.get(opaque_record_key.kind()) else {
|
||||
return Err(RPCError::internal("unsupported cryptosystem"));
|
||||
};
|
||||
let Some(target_node_id) = target_node_ids.get(record_key.kind()) else {
|
||||
let Some(target_node_id) = target_node_ids.get(opaque_record_key.kind()) else {
|
||||
return Err(RPCError::internal("No node id for crypto kind"));
|
||||
};
|
||||
|
||||
let debug_string = format!(
|
||||
"OUT ==> SetValueQ({} #{} len={} seq={} writer={}{}) => {}",
|
||||
record_key,
|
||||
opaque_record_key,
|
||||
subkey,
|
||||
value.value_data().data().len(),
|
||||
value.value_data().seq(),
|
||||
|
@ -72,7 +72,7 @@ impl RPCProcessor {
|
|||
|
||||
// Send the setvalue question
|
||||
let set_value_q = RPCOperationSetValueQ::new(
|
||||
record_key.clone(),
|
||||
opaque_record_key.clone(),
|
||||
subkey,
|
||||
value,
|
||||
if send_descriptor {
|
||||
|
@ -86,6 +86,7 @@ impl RPCProcessor {
|
|||
RPCQuestionDetail::SetValueQ(Box::new(set_value_q)),
|
||||
);
|
||||
let question_context = QuestionContext::SetValue(ValidateSetValueContext {
|
||||
opaque_record_key: opaque_record_key.clone(),
|
||||
descriptor,
|
||||
subkey,
|
||||
crypto_kind: vcrypto.kind(),
|
||||
|
@ -136,7 +137,7 @@ impl RPCProcessor {
|
|||
|
||||
let debug_string_answer = format!(
|
||||
"OUT <== SetValueA({} #{}{}{} peers={}) <= {}",
|
||||
record_key,
|
||||
opaque_record_key,
|
||||
subkey,
|
||||
if set { " +set" } else { "" },
|
||||
debug_string_value,
|
||||
|
@ -148,7 +149,11 @@ impl RPCProcessor {
|
|||
|
||||
let peer_ids: Vec<String> = peers
|
||||
.iter()
|
||||
.filter_map(|p| p.node_ids().get(record_key.kind()).map(|k| k.to_string()))
|
||||
.filter_map(|p| {
|
||||
p.node_ids()
|
||||
.get(opaque_record_key.kind())
|
||||
.map(|k| k.to_string())
|
||||
})
|
||||
.collect();
|
||||
veilid_log!(self debug target: "dht", "Peers: {:#?}", peer_ids);
|
||||
}
|
||||
|
@ -156,7 +161,7 @@ impl RPCProcessor {
|
|||
// Validate peers returned are, in fact, closer to the key than the node we sent this to
|
||||
let valid = match self.routing_table().verify_peers_closer(
|
||||
target_node_id.to_hash_coordinate(),
|
||||
record_key.to_hash_coordinate(),
|
||||
opaque_record_key.to_hash_coordinate(),
|
||||
&peers,
|
||||
) {
|
||||
Ok(v) => v,
|
||||
|
@ -230,7 +235,7 @@ impl RPCProcessor {
|
|||
};
|
||||
|
||||
// Destructure
|
||||
let (record_key, subkey, value, descriptor) = set_value_q.destructure();
|
||||
let (opaque_record_key, subkey, value, descriptor) = set_value_q.destructure();
|
||||
|
||||
// Get target for ValueChanged notifications
|
||||
let dest = network_result_try!(self.get_respond_to_destination(&msg));
|
||||
|
@ -240,13 +245,13 @@ impl RPCProcessor {
|
|||
let closer_to_key_peers = network_result_try!(routing_table
|
||||
.find_preferred_peers_closer_to_key(
|
||||
routing_domain,
|
||||
record_key.to_hash_coordinate(),
|
||||
opaque_record_key.to_hash_coordinate(),
|
||||
vec![CAP_DHT]
|
||||
));
|
||||
|
||||
let debug_string = format!(
|
||||
"IN <=== SetValueQ({} #{} len={} seq={} writer={}{}) <== {}",
|
||||
record_key,
|
||||
opaque_record_key,
|
||||
subkey,
|
||||
value.value_data().data().len(),
|
||||
value.value_data().seq(),
|
||||
|
@ -272,7 +277,7 @@ impl RPCProcessor {
|
|||
let storage_manager = self.storage_manager();
|
||||
let new_value = network_result_try!(storage_manager
|
||||
.inbound_set_value(
|
||||
record_key.clone(),
|
||||
opaque_record_key.clone(),
|
||||
subkey,
|
||||
Arc::new(value),
|
||||
descriptor.map(Arc::new),
|
||||
|
@ -299,7 +304,7 @@ impl RPCProcessor {
|
|||
|
||||
let debug_string_answer = format!(
|
||||
"IN ===> SetValueA({} #{}{}{} peers={}) ==> {}",
|
||||
record_key,
|
||||
opaque_record_key,
|
||||
subkey,
|
||||
if set { " +set" } else { "" },
|
||||
debug_string_value,
|
||||
|
|
|
@ -9,7 +9,7 @@ impl RPCProcessor {
|
|||
pub async fn rpc_call_value_changed(
|
||||
&self,
|
||||
dest: Destination,
|
||||
key: RecordKey,
|
||||
record_key: OpaqueRecordKey,
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
count: u32,
|
||||
watch_id: u64,
|
||||
|
@ -27,7 +27,8 @@ impl RPCProcessor {
|
|||
"Never send value changes over safety routes",
|
||||
));
|
||||
}
|
||||
let value_changed = RPCOperationValueChanged::new(key, subkeys, count, watch_id, value)?;
|
||||
let value_changed =
|
||||
RPCOperationValueChanged::new(record_key, subkeys, count, watch_id, value)?;
|
||||
let statement =
|
||||
RPCStatement::new(RPCStatementDetail::ValueChanged(Box::new(value_changed)));
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@ impl RPCProcessor {
|
|||
pub async fn rpc_call_watch_value(
|
||||
&self,
|
||||
dest: Destination,
|
||||
record_key: RecordKey,
|
||||
opaque_record_key: OpaqueRecordKey,
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
expiration: Timestamp,
|
||||
count: u32,
|
||||
|
@ -50,10 +50,10 @@ impl RPCProcessor {
|
|||
|
||||
// Get the target node id
|
||||
let crypto = self.crypto();
|
||||
let Some(vcrypto) = crypto.get(record_key.kind()) else {
|
||||
let Some(vcrypto) = crypto.get(opaque_record_key.kind()) else {
|
||||
return Err(RPCError::internal("unsupported cryptosystem"));
|
||||
};
|
||||
let Some(target_node_id) = target_node_ids.get(record_key.kind()) else {
|
||||
let Some(target_node_id) = target_node_ids.get(opaque_record_key.kind()) else {
|
||||
return Err(RPCError::internal("No node id for crypto kind"));
|
||||
};
|
||||
|
||||
|
@ -64,7 +64,7 @@ impl RPCProcessor {
|
|||
} else {
|
||||
"".to_owned()
|
||||
},
|
||||
record_key,
|
||||
opaque_record_key,
|
||||
subkeys,
|
||||
expiration,
|
||||
count,
|
||||
|
@ -74,7 +74,7 @@ impl RPCProcessor {
|
|||
|
||||
// Send the watchvalue question
|
||||
let watch_value_q = RPCOperationWatchValueQ::new(
|
||||
record_key.clone(),
|
||||
opaque_record_key.clone(),
|
||||
subkeys.clone(),
|
||||
expiration.as_u64(),
|
||||
count,
|
||||
|
@ -117,7 +117,7 @@ impl RPCProcessor {
|
|||
"OUT <== WatchValueA({}id={} {} #{:?}@{} peers={}) <= {}",
|
||||
if accepted { "+accept " } else { "" },
|
||||
watch_id,
|
||||
record_key,
|
||||
opaque_record_key,
|
||||
subkeys,
|
||||
expiration,
|
||||
peers.len(),
|
||||
|
@ -128,7 +128,11 @@ impl RPCProcessor {
|
|||
|
||||
let peer_ids: Vec<String> = peers
|
||||
.iter()
|
||||
.filter_map(|p| p.node_ids().get(record_key.kind()).map(|k| k.to_string()))
|
||||
.filter_map(|p| {
|
||||
p.node_ids()
|
||||
.get(opaque_record_key.kind())
|
||||
.map(|k| k.to_string())
|
||||
})
|
||||
.collect();
|
||||
veilid_log!(self debug target: "dht", "Peers: {:#?}", peer_ids);
|
||||
}
|
||||
|
@ -155,7 +159,7 @@ impl RPCProcessor {
|
|||
// Validate peers returned are, in fact, closer to the key than the node we sent this to
|
||||
let valid = match self.routing_table().verify_peers_closer(
|
||||
target_node_id.to_hash_coordinate(),
|
||||
record_key.to_hash_coordinate(),
|
||||
opaque_record_key.to_hash_coordinate(),
|
||||
&peers,
|
||||
) {
|
||||
Ok(v) => v,
|
||||
|
@ -228,7 +232,7 @@ impl RPCProcessor {
|
|||
};
|
||||
|
||||
// Destructure
|
||||
let (record_key, subkeys, expiration, count, watch_id, watcher, _signature) =
|
||||
let (opaque_record_key, subkeys, expiration, count, watch_id, watcher, _signature) =
|
||||
watch_value_q.destructure();
|
||||
|
||||
// Extract member id for watcher
|
||||
|
@ -250,7 +254,7 @@ impl RPCProcessor {
|
|||
} else {
|
||||
"".to_owned()
|
||||
},
|
||||
record_key,
|
||||
opaque_record_key,
|
||||
subkeys,
|
||||
expiration,
|
||||
count,
|
||||
|
@ -265,7 +269,7 @@ impl RPCProcessor {
|
|||
let closer_to_key_peers = network_result_try!(routing_table
|
||||
.find_preferred_peers_closer_to_key(
|
||||
routing_domain,
|
||||
record_key.to_hash_coordinate(),
|
||||
opaque_record_key.to_hash_coordinate(),
|
||||
vec![CAP_DHT]
|
||||
));
|
||||
|
||||
|
@ -292,7 +296,7 @@ impl RPCProcessor {
|
|||
// See if we have this record ourselves, if so, accept the watch
|
||||
let storage_manager = self.storage_manager();
|
||||
let watch_result = network_result_try!(storage_manager
|
||||
.inbound_watch_value(record_key.clone(), params, watch_id)
|
||||
.inbound_watch_value(opaque_record_key.clone(), params, watch_id)
|
||||
.await
|
||||
.map_err(RPCError::internal)?);
|
||||
|
||||
|
@ -314,7 +318,7 @@ impl RPCProcessor {
|
|||
"IN ===> WatchValueA({}id={} {} #{} expiration={} peers={}) ==> {}",
|
||||
if ret_accepted { "+accept " } else { "" },
|
||||
ret_watch_id,
|
||||
record_key,
|
||||
opaque_record_key,
|
||||
subkeys,
|
||||
ret_expiration,
|
||||
closer_to_key_peers.len(),
|
||||
|
|
|
@ -5,7 +5,7 @@ impl_veilid_log_facility!("stor");
|
|||
pub(super) struct ActiveSubkeyWriteGuard {
|
||||
registry: VeilidComponentRegistry,
|
||||
done: bool,
|
||||
record_key: RecordKey,
|
||||
opaque_record_key: OpaqueRecordKey,
|
||||
subkey: ValueSubkey,
|
||||
}
|
||||
|
||||
|
@ -19,7 +19,7 @@ impl Drop for ActiveSubkeyWriteGuard {
|
|||
fn drop(&mut self) {
|
||||
if !self.done {
|
||||
let registry = &self.registry;
|
||||
veilid_log!(registry error "active subkey write finished without being marked done: {}:{}", self.record_key, self.subkey);
|
||||
veilid_log!(registry error "active subkey write finished without being marked done: {}:{}", self.opaque_record_key, self.subkey);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -31,15 +31,15 @@ impl StorageManager {
|
|||
pub(super) fn mark_active_subkey_write_inner(
|
||||
&self,
|
||||
inner: &mut StorageManagerInner,
|
||||
record_key: RecordKey,
|
||||
opaque_record_key: OpaqueRecordKey,
|
||||
subkey: ValueSubkey,
|
||||
) -> Option<ActiveSubkeyWriteGuard> {
|
||||
let asw = inner
|
||||
.active_subkey_writes
|
||||
.entry(record_key.clone())
|
||||
.entry(opaque_record_key.clone())
|
||||
.or_default();
|
||||
if asw.contains(subkey) {
|
||||
veilid_log!(self debug "already writing to this subkey: {}:{}", record_key, subkey);
|
||||
veilid_log!(self debug "already writing to this subkey: {}:{}", opaque_record_key, subkey);
|
||||
None
|
||||
} else {
|
||||
// Add to our list of active subkey writes
|
||||
|
@ -47,7 +47,7 @@ impl StorageManager {
|
|||
Some(ActiveSubkeyWriteGuard {
|
||||
registry: self.registry(),
|
||||
done: false,
|
||||
record_key,
|
||||
opaque_record_key,
|
||||
subkey,
|
||||
})
|
||||
}
|
||||
|
@ -62,13 +62,13 @@ impl StorageManager {
|
|||
// Remove from active subkey writes
|
||||
let asw = inner
|
||||
.active_subkey_writes
|
||||
.get_mut(&guard.record_key)
|
||||
.get_mut(&guard.opaque_record_key)
|
||||
.unwrap();
|
||||
if !asw.remove(guard.subkey) {
|
||||
veilid_log!(self error "missing active subkey write: {}:{}", guard.record_key, guard.subkey);
|
||||
veilid_log!(self error "missing active subkey write: {}:{}", guard.opaque_record_key, guard.subkey);
|
||||
}
|
||||
if asw.is_empty() {
|
||||
inner.active_subkey_writes.remove(&guard.record_key);
|
||||
inner.active_subkey_writes.remove(&guard.opaque_record_key);
|
||||
}
|
||||
guard.set_done();
|
||||
}
|
||||
|
|
|
@ -24,7 +24,12 @@ impl StorageManager {
|
|||
} else {
|
||||
"".to_owned()
|
||||
};
|
||||
out += &format!(" {} {}\n", k, writer);
|
||||
let encryption_key = if let Some(e) = v.encryption_key() {
|
||||
format!(":{}", e)
|
||||
} else {
|
||||
"".to_owned()
|
||||
};
|
||||
out += &format!(" {}{} {}\n", k, encryption_key, writer);
|
||||
}
|
||||
format!("{}]\n", out)
|
||||
}
|
||||
|
@ -86,8 +91,9 @@ impl StorageManager {
|
|||
let Some(local_record_store) = &inner.local_record_store else {
|
||||
return "not initialized".to_owned();
|
||||
};
|
||||
let opaque_record_key = record_key.opaque();
|
||||
local_record_store
|
||||
.debug_record_subkey_info(record_key, subkey)
|
||||
.debug_record_subkey_info(opaque_record_key, subkey)
|
||||
.await
|
||||
}
|
||||
pub async fn debug_remote_record_subkey_info(
|
||||
|
@ -99,8 +105,9 @@ impl StorageManager {
|
|||
let Some(remote_record_store) = &inner.remote_record_store else {
|
||||
return "not initialized".to_owned();
|
||||
};
|
||||
let opaque_record_key = record_key.opaque();
|
||||
remote_record_store
|
||||
.debug_record_subkey_info(record_key, subkey)
|
||||
.debug_record_subkey_info(opaque_record_key, subkey)
|
||||
.await
|
||||
}
|
||||
pub async fn debug_local_record_info(&self, record_key: RecordKey) -> String {
|
||||
|
@ -108,9 +115,11 @@ impl StorageManager {
|
|||
let Some(local_record_store) = &inner.local_record_store else {
|
||||
return "not initialized".to_owned();
|
||||
};
|
||||
let local_debug = local_record_store.debug_record_info(record_key.clone());
|
||||
let opaque_record_key = record_key.opaque();
|
||||
let local_debug = local_record_store.debug_record_info(opaque_record_key);
|
||||
|
||||
let opened_debug = if let Some(o) = inner.opened_records.get(&record_key) {
|
||||
let opaque_record_key = record_key.opaque();
|
||||
let opened_debug = if let Some(o) = inner.opened_records.get(&opaque_record_key) {
|
||||
format!("Opened Record: {:#?}\n", o)
|
||||
} else {
|
||||
"".to_owned()
|
||||
|
@ -124,6 +133,7 @@ impl StorageManager {
|
|||
let Some(remote_record_store) = &inner.remote_record_store else {
|
||||
return "not initialized".to_owned();
|
||||
};
|
||||
remote_record_store.debug_record_info(record_key)
|
||||
let opaque_record_key = record_key.opaque();
|
||||
remote_record_store.debug_record_info(opaque_record_key)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,7 +28,7 @@ impl StorageManager {
|
|||
#[instrument(level = "trace", target = "dht", skip_all, err)]
|
||||
pub(super) async fn outbound_get_value(
|
||||
&self,
|
||||
record_key: RecordKey,
|
||||
opaque_record_key: OpaqueRecordKey,
|
||||
subkey: ValueSubkey,
|
||||
safety_selection: SafetySelection,
|
||||
last_get_result: GetResult,
|
||||
|
@ -47,7 +47,7 @@ impl StorageManager {
|
|||
|
||||
// Get the nodes we know are caching this value to seed the fanout
|
||||
let init_fanout_queue = {
|
||||
self.get_value_nodes(record_key.clone())
|
||||
self.get_value_nodes(opaque_record_key.clone())
|
||||
.await?
|
||||
.unwrap_or_default()
|
||||
.into_iter()
|
||||
|
@ -81,13 +81,13 @@ impl StorageManager {
|
|||
let call_routine = {
|
||||
let context = context.clone();
|
||||
let registry = self.registry();
|
||||
let record_key = record_key.clone();
|
||||
let opaque_record_key = opaque_record_key.clone();
|
||||
let safety_selection = safety_selection.clone();
|
||||
Arc::new(
|
||||
move |next_node: NodeRef| -> PinBoxFutureStatic<FanoutCallResult> {
|
||||
let context = context.clone();
|
||||
let registry = registry.clone();
|
||||
let record_key = record_key.clone();
|
||||
let opaque_record_key = opaque_record_key.clone();
|
||||
let last_descriptor = last_get_result.opt_descriptor.clone();
|
||||
let safety_selection = safety_selection.clone();
|
||||
Box::pin(async move {
|
||||
|
@ -97,7 +97,7 @@ impl StorageManager {
|
|||
.rpc_call_get_value(
|
||||
Destination::direct(next_node.routing_domain_filtered(routing_domain))
|
||||
.with_safety(safety_selection),
|
||||
record_key.clone(),
|
||||
opaque_record_key.clone(),
|
||||
subkey,
|
||||
last_descriptor.map(|x| (*x).clone()),
|
||||
)
|
||||
|
@ -259,7 +259,7 @@ impl StorageManager {
|
|||
let routing_table = registry.routing_table();
|
||||
let fanout_call = FanoutCall::new(
|
||||
&routing_table,
|
||||
record_key.to_hash_coordinate(),
|
||||
opaque_record_key.to_hash_coordinate(),
|
||||
key_count,
|
||||
fanout,
|
||||
consensus_count,
|
||||
|
@ -331,7 +331,7 @@ impl StorageManager {
|
|||
}
|
||||
};
|
||||
let is_incomplete = result.fanout_result.kind.is_incomplete();
|
||||
let value_data = match this.process_outbound_get_value_result(key.clone(), subkey, Some(last_seq), result).await {
|
||||
let value_data = match this.process_outbound_get_value_result(key.opaque(), subkey, Some(last_seq), result).await {
|
||||
Ok(Some(v)) => v,
|
||||
Ok(None) => {
|
||||
return is_incomplete;
|
||||
|
@ -349,7 +349,20 @@ impl StorageManager {
|
|||
// if the sequence number changed since our first partial update
|
||||
// Send with a max count as this is not attached to any watch
|
||||
if last_seq != value_data.seq() {
|
||||
this.update_callback_value_change(key.clone(),ValueSubkeyRangeSet::single(subkey), u32::MAX, Some(value_data));
|
||||
let value_data = match this.maybe_decrypt_value_data(&key, &value_data) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
veilid_log!(this debug "Deferred fanout error: {}", e);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
this.update_callback_value_change(
|
||||
key.clone(),
|
||||
ValueSubkeyRangeSet::single(subkey),
|
||||
u32::MAX,
|
||||
Some(value_data),
|
||||
);
|
||||
}
|
||||
|
||||
// Return done
|
||||
|
@ -363,11 +376,11 @@ impl StorageManager {
|
|||
#[instrument(level = "trace", target = "dht", skip_all)]
|
||||
pub(super) async fn process_outbound_get_value_result(
|
||||
&self,
|
||||
record_key: RecordKey,
|
||||
opaque_record_key: OpaqueRecordKey,
|
||||
subkey: ValueSubkey,
|
||||
opt_last_seq: Option<u32>,
|
||||
result: get_value::OutboundGetValueResult,
|
||||
) -> Result<Option<ValueData>, VeilidAPIError> {
|
||||
) -> Result<Option<EncryptedValueData>, VeilidAPIError> {
|
||||
// See if we got a value back
|
||||
let Some(get_result_value) = result.get_result.opt_value else {
|
||||
// If we got nothing back then we also had nothing beforehand, return nothing
|
||||
|
@ -379,7 +392,7 @@ impl StorageManager {
|
|||
|
||||
Self::process_fanout_results_inner(
|
||||
&mut inner,
|
||||
record_key.clone(),
|
||||
opaque_record_key.clone(),
|
||||
core::iter::once((ValueSubkeyRangeSet::single(subkey), result.fanout_result)),
|
||||
false,
|
||||
self.config()
|
||||
|
@ -390,7 +403,7 @@ impl StorageManager {
|
|||
if Some(get_result_value.value_data().seq()) != opt_last_seq {
|
||||
self.handle_set_local_value_inner(
|
||||
&mut inner,
|
||||
record_key,
|
||||
opaque_record_key,
|
||||
subkey,
|
||||
get_result_value.clone(),
|
||||
InboundWatchUpdateMode::UpdateAll,
|
||||
|
@ -404,7 +417,7 @@ impl StorageManager {
|
|||
#[instrument(level = "trace", target = "dht", skip_all)]
|
||||
pub async fn inbound_get_value(
|
||||
&self,
|
||||
key: RecordKey,
|
||||
opaque_record_key: OpaqueRecordKey,
|
||||
subkey: ValueSubkey,
|
||||
want_descriptor: bool,
|
||||
) -> VeilidAPIResult<NetworkResult<GetResult>> {
|
||||
|
@ -414,7 +427,7 @@ impl StorageManager {
|
|||
let (_is_local, last_get_result) = {
|
||||
// See if the subkey we are getting has a last known local value
|
||||
let mut last_get_result = self
|
||||
.handle_get_local_value_inner(&mut inner, key.clone(), subkey, true)
|
||||
.handle_get_local_value_inner(&mut inner, opaque_record_key.clone(), subkey, true)
|
||||
.await?;
|
||||
// If this is local, it must have a descriptor already
|
||||
if last_get_result.opt_descriptor.is_some() {
|
||||
|
@ -424,9 +437,13 @@ impl StorageManager {
|
|||
(true, last_get_result)
|
||||
} else {
|
||||
// See if the subkey we are getting has a last known remote value
|
||||
let last_get_result =
|
||||
Self::handle_get_remote_value_inner(&mut inner, key, subkey, want_descriptor)
|
||||
.await?;
|
||||
let last_get_result = Self::handle_get_remote_value_inner(
|
||||
&mut inner,
|
||||
opaque_record_key,
|
||||
subkey,
|
||||
want_descriptor,
|
||||
)
|
||||
.await?;
|
||||
(false, last_get_result)
|
||||
}
|
||||
};
|
||||
|
|
|
@ -57,7 +57,7 @@ impl StorageManager {
|
|||
#[instrument(level = "trace", target = "dht", skip_all, err)]
|
||||
pub(super) async fn outbound_inspect_value(
|
||||
&self,
|
||||
record_key: RecordKey,
|
||||
opaque_record_key: OpaqueRecordKey,
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
safety_selection: SafetySelection,
|
||||
local_inspect_result: InspectResult,
|
||||
|
@ -88,7 +88,7 @@ impl StorageManager {
|
|||
|
||||
// Get the nodes we know are caching this value to seed the fanout
|
||||
let init_fanout_queue = {
|
||||
self.get_value_nodes(record_key.clone())
|
||||
self.get_value_nodes(opaque_record_key.clone())
|
||||
.await?
|
||||
.unwrap_or_default()
|
||||
.into_iter()
|
||||
|
@ -125,7 +125,7 @@ impl StorageManager {
|
|||
let call_routine = {
|
||||
let context = context.clone();
|
||||
let registry = self.registry();
|
||||
let record_key = record_key.clone();
|
||||
let opaque_record_key = opaque_record_key.clone();
|
||||
let safety_selection = safety_selection.clone();
|
||||
Arc::new(
|
||||
move |next_node: NodeRef| -> PinBoxFutureStatic<FanoutCallResult> {
|
||||
|
@ -133,7 +133,7 @@ impl StorageManager {
|
|||
let registry = registry.clone();
|
||||
let opt_descriptor = local_inspect_result.opt_descriptor();
|
||||
let subkeys = subkeys.clone();
|
||||
let record_key = record_key.clone();
|
||||
let opaque_record_key = opaque_record_key.clone();
|
||||
let safety_selection = safety_selection.clone();
|
||||
Box::pin(async move {
|
||||
let rpc_processor = registry.rpc_processor();
|
||||
|
@ -142,7 +142,7 @@ impl StorageManager {
|
|||
rpc_processor
|
||||
.rpc_call_inspect_value(
|
||||
Destination::direct(next_node.routing_domain_filtered(routing_domain)).with_safety(safety_selection),
|
||||
record_key.clone(),
|
||||
opaque_record_key.clone(),
|
||||
subkeys.clone(),
|
||||
opt_descriptor.map(|x| (*x).clone()),
|
||||
)
|
||||
|
@ -294,7 +294,7 @@ impl StorageManager {
|
|||
let routing_table = self.routing_table();
|
||||
let fanout_call = FanoutCall::new(
|
||||
&routing_table,
|
||||
record_key.to_hash_coordinate(),
|
||||
opaque_record_key.to_hash_coordinate(),
|
||||
key_count,
|
||||
fanout,
|
||||
consensus_count,
|
||||
|
@ -362,7 +362,7 @@ impl StorageManager {
|
|||
#[instrument(level = "trace", target = "dht", skip_all)]
|
||||
pub async fn inbound_inspect_value(
|
||||
&self,
|
||||
record_key: RecordKey,
|
||||
opaque_record_key: OpaqueRecordKey,
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
want_descriptor: bool,
|
||||
) -> VeilidAPIResult<NetworkResult<InspectResult>> {
|
||||
|
@ -374,7 +374,7 @@ impl StorageManager {
|
|||
let mut local_inspect_result = self
|
||||
.handle_inspect_local_value_inner(
|
||||
&mut inner,
|
||||
record_key.clone(),
|
||||
opaque_record_key.clone(),
|
||||
subkeys.clone(),
|
||||
true,
|
||||
)
|
||||
|
@ -390,7 +390,7 @@ impl StorageManager {
|
|||
let remote_inspect_result = self
|
||||
.handle_inspect_remote_value_inner(
|
||||
&mut inner,
|
||||
record_key,
|
||||
opaque_record_key,
|
||||
subkeys,
|
||||
want_descriptor,
|
||||
)
|
||||
|
|
|
@ -32,7 +32,7 @@ impl_veilid_log_facility!("stor");
|
|||
/// Fixed length of MemberId (DHT Schema member id) in bytes
|
||||
pub const MEMBER_ID_LENGTH: usize = 32;
|
||||
/// The maximum size of a single subkey
|
||||
pub(crate) const MAX_SUBKEY_SIZE: usize = ValueData::MAX_LEN;
|
||||
pub(crate) const MAX_SUBKEY_SIZE: usize = EncryptedValueData::MAX_LEN;
|
||||
/// The maximum total size of all subkeys of a record
|
||||
pub(crate) const MAX_RECORD_DATA_SIZE: usize = 1_048_576;
|
||||
/// Frequency to flush record stores to disk
|
||||
|
@ -72,7 +72,7 @@ const REHYDRATION_REQUESTS: &[u8] = b"rehydration_requests";
|
|||
/// A single 'value changed' message to send
|
||||
struct ValueChangedInfo {
|
||||
target: Target,
|
||||
record_key: RecordKey,
|
||||
record_key: OpaqueRecordKey,
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
count: u32,
|
||||
watch_id: u64,
|
||||
|
@ -83,18 +83,18 @@ struct ValueChangedInfo {
|
|||
#[derive(Default)]
|
||||
struct StorageManagerInner {
|
||||
/// Records that have been 'opened' and are not yet closed
|
||||
pub opened_records: HashMap<RecordKey, OpenedRecord>,
|
||||
pub opened_records: HashMap<OpaqueRecordKey, OpenedRecord>,
|
||||
/// Records that have ever been 'created' or 'opened' by this node, things we care about that we must republish to keep alive
|
||||
pub local_record_store: Option<RecordStore<LocalRecordDetail>>,
|
||||
/// Records that have been pushed to this node for distribution by other nodes, that we make an effort to republish
|
||||
pub remote_record_store: Option<RecordStore<RemoteRecordDetail>>,
|
||||
/// Record subkeys to commit to the network in the background,
|
||||
/// either because they were written to offline, or due to a rehydration action
|
||||
pub offline_subkey_writes: LinkedHashMap<RecordKey, OfflineSubkeyWrite>,
|
||||
pub offline_subkey_writes: LinkedHashMap<OpaqueRecordKey, OfflineSubkeyWrite>,
|
||||
/// Record subkeys that are currently being written to in the foreground
|
||||
pub active_subkey_writes: HashMap<RecordKey, ValueSubkeyRangeSet>,
|
||||
pub active_subkey_writes: HashMap<OpaqueRecordKey, ValueSubkeyRangeSet>,
|
||||
/// Records that have pending rehydration requests
|
||||
pub rehydration_requests: HashMap<RecordKey, RehydrationRequest>,
|
||||
pub rehydration_requests: HashMap<OpaqueRecordKey, RehydrationRequest>,
|
||||
/// State management for outbound watches
|
||||
pub outbound_watch_manager: OutboundWatchManager,
|
||||
/// Storage manager metadata that is persistent, including copy of offline subkey writes
|
||||
|
@ -506,6 +506,7 @@ impl StorageManager {
|
|||
&self,
|
||||
schema: DHTSchema,
|
||||
owner_key: &PublicKey,
|
||||
encryption_key: Option<SharedSecret>,
|
||||
) -> VeilidAPIResult<RecordKey> {
|
||||
// Get cryptosystem
|
||||
let crypto = self.crypto();
|
||||
|
@ -513,6 +514,11 @@ impl StorageManager {
|
|||
apibail_generic!("unsupported cryptosystem");
|
||||
};
|
||||
|
||||
// Encryption key must match owner key
|
||||
if let Some(ek) = &encryption_key {
|
||||
vcrypto.check_shared_secret(ek)?;
|
||||
}
|
||||
|
||||
// Validate schema
|
||||
schema.validate()?;
|
||||
let schema_data = schema.compile();
|
||||
|
@ -521,20 +527,36 @@ impl StorageManager {
|
|||
&vcrypto,
|
||||
owner_key.ref_value(),
|
||||
&schema_data,
|
||||
encryption_key.map(|x| x.into_value()),
|
||||
))
|
||||
}
|
||||
|
||||
fn make_opaque_record_key(
|
||||
vcrypto: &CryptoSystemGuard<'_>,
|
||||
owner_key: &BarePublicKey,
|
||||
schema_data: &[u8],
|
||||
) -> OpaqueRecordKey {
|
||||
let mut hash_data = Vec::<u8>::with_capacity(owner_key.len() + 4 + schema_data.len());
|
||||
hash_data.extend_from_slice(vcrypto.kind().bytes());
|
||||
hash_data.extend_from_slice(owner_key);
|
||||
hash_data.extend_from_slice(schema_data);
|
||||
let hash = vcrypto.generate_hash(&hash_data);
|
||||
|
||||
OpaqueRecordKey::new(vcrypto.kind(), BareOpaqueRecordKey::new(hash.ref_value()))
|
||||
}
|
||||
|
||||
fn make_record_key(
|
||||
vcrypto: &CryptoSystemGuard<'_>,
|
||||
owner_key: &BarePublicKey,
|
||||
schema_data: &[u8],
|
||||
encryption_key: Option<BareSharedSecret>,
|
||||
) -> RecordKey {
|
||||
let mut hash_data = Vec::<u8>::with_capacity(owner_key.len() + 4 + schema_data.len());
|
||||
hash_data.extend_from_slice(vcrypto.kind().bytes());
|
||||
hash_data.extend_from_slice(owner_key);
|
||||
hash_data.extend_from_slice(schema_data);
|
||||
let hash = vcrypto.generate_hash(&hash_data);
|
||||
RecordKey::new(vcrypto.kind(), BareRecordKey::new(hash.ref_value()))
|
||||
let opaque = Self::make_opaque_record_key(vcrypto, owner_key, schema_data);
|
||||
|
||||
RecordKey::new(
|
||||
vcrypto.kind(),
|
||||
BareRecordKey::new(opaque.into_value(), encryption_key),
|
||||
)
|
||||
}
|
||||
|
||||
/// Create a local record from scratch with a new owner key, open it, and return the opened descriptor
|
||||
|
@ -605,8 +627,12 @@ impl StorageManager {
|
|||
.config()
|
||||
.with(|c| c.network.dht.get_value_count as usize);
|
||||
|
||||
self.add_rehydration_request(record_key, ValueSubkeyRangeSet::full(), get_consensus)
|
||||
.await;
|
||||
self.add_rehydration_request(
|
||||
record_key.opaque(),
|
||||
ValueSubkeyRangeSet::full(),
|
||||
get_consensus,
|
||||
)
|
||||
.await;
|
||||
|
||||
return Ok(res);
|
||||
}
|
||||
|
@ -623,7 +649,7 @@ impl StorageManager {
|
|||
// Use the safety selection we opened the record with
|
||||
let result = self
|
||||
.outbound_inspect_value(
|
||||
record_key.clone(),
|
||||
record_key.opaque(),
|
||||
ValueSubkeyRangeSet::single(0),
|
||||
safety_selection.clone(),
|
||||
InspectResult::default(),
|
||||
|
@ -634,7 +660,8 @@ impl StorageManager {
|
|||
// If we got nothing back, the key wasn't found
|
||||
if result.inspect_result.opt_descriptor().is_none() {
|
||||
// No result
|
||||
apibail_key_not_found!(record_key);
|
||||
let opaque_record_key = record_key.opaque();
|
||||
apibail_key_not_found!(opaque_record_key);
|
||||
};
|
||||
|
||||
// Check again to see if we have a local record already or not
|
||||
|
@ -690,7 +717,16 @@ impl StorageManager {
|
|||
|
||||
// Attempt to close the record, returning the opened record if it wasn't already closed
|
||||
let mut inner = self.inner.lock().await;
|
||||
let keys = inner.opened_records.keys().cloned().collect::<Vec<_>>();
|
||||
let keys = inner
|
||||
.opened_records
|
||||
.iter()
|
||||
.map(|(k, v)| {
|
||||
RecordKey::new(
|
||||
k.kind(),
|
||||
BareRecordKey::new(k.value(), v.encryption_key().cloned()),
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
for key in keys {
|
||||
Self::close_record_inner(&mut inner, key)?;
|
||||
}
|
||||
|
@ -714,8 +750,9 @@ impl StorageManager {
|
|||
apibail_not_initialized!();
|
||||
};
|
||||
|
||||
let opaque_record_key = record_key.opaque();
|
||||
// Remove the record from the local store
|
||||
local_record_store.delete_record(record_key).await
|
||||
local_record_store.delete_record(opaque_record_key).await
|
||||
}
|
||||
|
||||
/// Get the value of a subkey from an opened local record
|
||||
|
@ -729,10 +766,11 @@ impl StorageManager {
|
|||
let Ok(_guard) = self.startup_lock.enter() else {
|
||||
apibail_not_initialized!();
|
||||
};
|
||||
let opaque_record_key = record_key.opaque();
|
||||
|
||||
let mut inner = self.inner.lock().await;
|
||||
let safety_selection = {
|
||||
let Some(opened_record) = inner.opened_records.get(&record_key) else {
|
||||
let Some(opened_record) = inner.opened_records.get(&opaque_record_key) else {
|
||||
apibail_generic!("record not open");
|
||||
};
|
||||
opened_record.safety_selection()
|
||||
|
@ -740,13 +778,16 @@ impl StorageManager {
|
|||
|
||||
// See if the requested subkey is our local record store
|
||||
let last_get_result = self
|
||||
.handle_get_local_value_inner(&mut inner, record_key.clone(), subkey, true)
|
||||
.handle_get_local_value_inner(&mut inner, opaque_record_key.clone(), subkey, true)
|
||||
.await?;
|
||||
|
||||
// Return the existing value if we have one unless we are forcing a refresh
|
||||
if !force_refresh {
|
||||
if let Some(last_get_result_value) = last_get_result.opt_value {
|
||||
return Ok(Some(last_get_result_value.value_data().clone()));
|
||||
return Ok(Some(self.maybe_decrypt_value_data(
|
||||
&record_key,
|
||||
last_get_result_value.value_data(),
|
||||
)?));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -754,7 +795,10 @@ impl StorageManager {
|
|||
if !self.dht_is_online() {
|
||||
// Return the existing value if we have one if we aren't online
|
||||
if let Some(last_get_result_value) = last_get_result.opt_value {
|
||||
return Ok(Some(last_get_result_value.value_data().clone()));
|
||||
return Ok(Some(self.maybe_decrypt_value_data(
|
||||
&record_key,
|
||||
last_get_result_value.value_data(),
|
||||
)?));
|
||||
}
|
||||
apibail_try_again!("offline, try again later");
|
||||
};
|
||||
|
@ -770,7 +814,7 @@ impl StorageManager {
|
|||
.map(|v| v.value_data().seq());
|
||||
let res_rx = self
|
||||
.outbound_get_value(
|
||||
record_key.clone(),
|
||||
opaque_record_key.clone(),
|
||||
subkey,
|
||||
safety_selection,
|
||||
last_get_result,
|
||||
|
@ -785,9 +829,19 @@ impl StorageManager {
|
|||
let partial = result.fanout_result.kind.is_incomplete();
|
||||
|
||||
// Process the returned result
|
||||
let out = self
|
||||
.process_outbound_get_value_result(record_key.clone(), subkey, opt_last_seq, result)
|
||||
let out_encrypted = self
|
||||
.process_outbound_get_value_result(
|
||||
opaque_record_key.clone(),
|
||||
subkey,
|
||||
opt_last_seq,
|
||||
result,
|
||||
)
|
||||
.await?;
|
||||
let out = if let Some(vd) = out_encrypted {
|
||||
Some(self.maybe_decrypt_value_data(&record_key, &vd)?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
if let Some(out) = &out {
|
||||
// If there's more to process, do it in the background
|
||||
|
@ -816,17 +870,18 @@ impl StorageManager {
|
|||
let Ok(_guard) = self.startup_lock.enter() else {
|
||||
apibail_not_initialized!();
|
||||
};
|
||||
let opaque_record_key = record_key.opaque();
|
||||
|
||||
let mut inner = self.inner.lock().await;
|
||||
|
||||
// Get cryptosystem
|
||||
let crypto = self.crypto();
|
||||
let Some(vcrypto) = crypto.get(record_key.kind()) else {
|
||||
apibail_generic!("unsupported cryptosystem");
|
||||
apibail_generic!("unsupported cryptosystem for record key");
|
||||
};
|
||||
|
||||
let (safety_selection, opt_writer) = {
|
||||
let Some(opened_record) = inner.opened_records.get(&record_key) else {
|
||||
let Some(opened_record) = inner.opened_records.get(&opaque_record_key) else {
|
||||
apibail_generic!("record not open");
|
||||
};
|
||||
(
|
||||
|
@ -852,7 +907,7 @@ impl StorageManager {
|
|||
|
||||
// See if the subkey we are modifying has a last known local value
|
||||
let last_get_result = self
|
||||
.handle_get_local_value_inner(&mut inner, record_key.clone(), subkey, true)
|
||||
.handle_get_local_value_inner(&mut inner, opaque_record_key.clone(), subkey, true)
|
||||
.await?;
|
||||
|
||||
// Get the descriptor and schema for the key
|
||||
|
@ -863,23 +918,31 @@ impl StorageManager {
|
|||
|
||||
// Make new subkey data
|
||||
let value_data = if let Some(last_signed_value_data) = last_get_result.opt_value {
|
||||
if last_signed_value_data.value_data().data() == data
|
||||
&& last_signed_value_data.value_data().ref_writer() == &writer.key()
|
||||
let decrypted =
|
||||
self.maybe_decrypt_value_data(&record_key, last_signed_value_data.value_data())?;
|
||||
if decrypted.data() == data
|
||||
&& last_signed_value_data.value_data().writer() == writer.key()
|
||||
{
|
||||
// Data and writer is the same, nothing is changing,
|
||||
// just return that we set it, but no network activity needs to happen
|
||||
return Ok(None);
|
||||
}
|
||||
let seq = last_signed_value_data.value_data().seq();
|
||||
|
||||
ValueData::new_with_seq(seq + 1, data, writer.key())?
|
||||
} else {
|
||||
ValueData::new(data, writer.key())?
|
||||
};
|
||||
|
||||
let encrypted_value_data = self.maybe_encrypt_value_data(&record_key, &value_data)?;
|
||||
|
||||
// Validate with schema
|
||||
if let Err(e) =
|
||||
self.check_subkey_value_data(&schema, descriptor.ref_owner(), subkey, &value_data)
|
||||
{
|
||||
if let Err(e) = self.check_subkey_value_data(
|
||||
&schema,
|
||||
descriptor.ref_owner(),
|
||||
subkey,
|
||||
&encrypted_value_data,
|
||||
) {
|
||||
veilid_log!(self debug "schema validation error: {}", e);
|
||||
// Validation failed, ignore this value
|
||||
apibail_generic!(format!(
|
||||
|
@ -890,7 +953,7 @@ impl StorageManager {
|
|||
|
||||
// Sign the new value data with the writer
|
||||
let signed_value_data = Arc::new(SignedValueData::make_signature(
|
||||
value_data,
|
||||
encrypted_value_data,
|
||||
&descriptor.owner(),
|
||||
subkey,
|
||||
&vcrypto,
|
||||
|
@ -903,11 +966,11 @@ impl StorageManager {
|
|||
if allow_offline == AllowOffline(false) {
|
||||
apibail_try_again!("offline, try again later");
|
||||
}
|
||||
veilid_log!(self debug "Writing subkey offline because we are offline: {}:{} len={}", record_key, subkey, signed_value_data.value_data().data().len() );
|
||||
veilid_log!(self debug "Writing subkey offline because we are offline: {}:{} len={}", opaque_record_key, subkey, signed_value_data.value_data().data().len() );
|
||||
// Add to offline writes to flush
|
||||
self.add_offline_subkey_write_inner(
|
||||
&mut inner,
|
||||
record_key,
|
||||
opaque_record_key,
|
||||
subkey,
|
||||
safety_selection,
|
||||
signed_value_data,
|
||||
|
@ -917,16 +980,17 @@ impl StorageManager {
|
|||
|
||||
// Note that we are writing this subkey in the foreground
|
||||
// If it appears we are already doing this, then put it to the background/offline queue
|
||||
let opt_guard = self.mark_active_subkey_write_inner(&mut inner, record_key.clone(), subkey);
|
||||
let opt_guard =
|
||||
self.mark_active_subkey_write_inner(&mut inner, opaque_record_key.clone(), subkey);
|
||||
if opt_guard.is_none() {
|
||||
if allow_offline == AllowOffline(false) {
|
||||
apibail_try_again!("offline, try again later");
|
||||
}
|
||||
veilid_log!(self debug "Writing subkey offline due to concurrent foreground write: {}:{} len={}", record_key, subkey, signed_value_data.value_data().data().len() );
|
||||
veilid_log!(self debug "Writing subkey offline due to concurrent foreground write: {}:{} len={}", opaque_record_key, subkey, signed_value_data.value_data().data().len() );
|
||||
// Add to offline writes to flush
|
||||
self.add_offline_subkey_write_inner(
|
||||
&mut inner,
|
||||
record_key,
|
||||
opaque_record_key,
|
||||
subkey,
|
||||
safety_selection,
|
||||
signed_value_data,
|
||||
|
@ -938,12 +1002,12 @@ impl StorageManager {
|
|||
// Drop the lock for network access
|
||||
drop(inner);
|
||||
|
||||
veilid_log!(self debug "Writing subkey to the network: {}:{} len={}", record_key, subkey, signed_value_data.value_data().data().len() );
|
||||
veilid_log!(self debug "Writing subkey to the network: {}:{} len={}", opaque_record_key, subkey, signed_value_data.value_data().data().len() );
|
||||
|
||||
// Use the safety selection we opened the record with
|
||||
let res_rx = match self
|
||||
.outbound_set_value(
|
||||
record_key.clone(),
|
||||
opaque_record_key.clone(),
|
||||
subkey,
|
||||
safety_selection.clone(),
|
||||
signed_value_data.clone(),
|
||||
|
@ -962,7 +1026,7 @@ impl StorageManager {
|
|||
if allow_offline == AllowOffline(true) {
|
||||
self.add_offline_subkey_write_inner(
|
||||
&mut inner,
|
||||
record_key.clone(),
|
||||
opaque_record_key.clone(),
|
||||
subkey,
|
||||
safety_selection,
|
||||
signed_value_data.clone(),
|
||||
|
@ -987,7 +1051,7 @@ impl StorageManager {
|
|||
res_rx,
|
||||
record_key,
|
||||
subkey,
|
||||
signed_value_data,
|
||||
value_data,
|
||||
safety_selection,
|
||||
)
|
||||
.await
|
||||
|
@ -998,7 +1062,7 @@ impl StorageManager {
|
|||
res_rx,
|
||||
record_key,
|
||||
subkey,
|
||||
signed_value_data,
|
||||
value_data,
|
||||
safety_selection,
|
||||
)
|
||||
.await
|
||||
|
@ -1022,7 +1086,7 @@ impl StorageManager {
|
|||
res_rx: flume::Receiver<VeilidAPIResult<set_value::OutboundSetValueResult>>,
|
||||
record_key: RecordKey,
|
||||
subkey: ValueSubkey,
|
||||
signed_value_data: Arc<SignedValueData>,
|
||||
value_data: ValueData,
|
||||
safety_selection: SafetySelection,
|
||||
) -> VeilidAPIResult<Option<ValueData>> {
|
||||
// Wait for the first result
|
||||
|
@ -1037,7 +1101,7 @@ impl StorageManager {
|
|||
.process_outbound_set_value_result(
|
||||
record_key.clone(),
|
||||
subkey,
|
||||
signed_value_data.value_data().clone(),
|
||||
value_data.clone(),
|
||||
safety_selection.clone(),
|
||||
result,
|
||||
)
|
||||
|
@ -1049,8 +1113,7 @@ impl StorageManager {
|
|||
res_rx,
|
||||
record_key,
|
||||
subkey,
|
||||
out.clone()
|
||||
.unwrap_or_else(|| signed_value_data.value_data().clone()),
|
||||
value_data,
|
||||
safety_selection,
|
||||
);
|
||||
}
|
||||
|
@ -1063,7 +1126,7 @@ impl StorageManager {
|
|||
res_rx: flume::Receiver<VeilidAPIResult<set_value::OutboundSetValueResult>>,
|
||||
record_key: RecordKey,
|
||||
subkey: ValueSubkey,
|
||||
signed_value_data: Arc<SignedValueData>,
|
||||
value_data: ValueData,
|
||||
safety_selection: SafetySelection,
|
||||
) -> VeilidAPIResult<Option<ValueData>> {
|
||||
let Some(stop_token) = self.startup_lock.stop_token() else {
|
||||
|
@ -1085,7 +1148,7 @@ impl StorageManager {
|
|||
.process_outbound_set_value_result(
|
||||
record_key.clone(),
|
||||
subkey,
|
||||
signed_value_data.value_data().clone(),
|
||||
value_data.clone(),
|
||||
safety_selection.clone(),
|
||||
result,
|
||||
)
|
||||
|
@ -1132,7 +1195,8 @@ impl StorageManager {
|
|||
|
||||
// Get the safety selection and the writer we opened this record
|
||||
let (safety_selection, opt_watcher) = {
|
||||
let Some(opened_record) = inner.opened_records.get(&record_key) else {
|
||||
let opaque_record_key = record_key.opaque();
|
||||
let Some(opened_record) = inner.opened_records.get(&opaque_record_key) else {
|
||||
// Record must be opened already to change watch
|
||||
apibail_generic!("record not open");
|
||||
};
|
||||
|
@ -1151,7 +1215,8 @@ impl StorageManager {
|
|||
|
||||
// Get the schema so we can truncate the watch to the number of subkeys
|
||||
let schema = if let Some(lrs) = inner.local_record_store.as_ref() {
|
||||
let Some(schema) = lrs.peek_record(&record_key, |r| r.schema()) else {
|
||||
let opaque_record_key = record_key.opaque();
|
||||
let Some(schema) = lrs.peek_record(&opaque_record_key, |r| r.schema()) else {
|
||||
apibail_generic!("no local record found");
|
||||
};
|
||||
schema
|
||||
|
@ -1221,7 +1286,8 @@ impl StorageManager {
|
|||
// Calculate change to existing watch
|
||||
let (subkeys, count, expiration_ts) = {
|
||||
let inner = self.inner.lock().await;
|
||||
let Some(_opened_record) = inner.opened_records.get(&record_key) else {
|
||||
let opaque_record_key = record_key.opaque();
|
||||
let Some(_opened_record) = inner.opened_records.get(&opaque_record_key) else {
|
||||
apibail_generic!("record not open");
|
||||
};
|
||||
|
||||
|
@ -1287,6 +1353,8 @@ impl StorageManager {
|
|||
apibail_not_initialized!();
|
||||
};
|
||||
|
||||
let opaque_record_key = record_key.opaque();
|
||||
|
||||
let subkeys = if subkeys.is_empty() {
|
||||
ValueSubkeyRangeSet::full()
|
||||
} else {
|
||||
|
@ -1295,7 +1363,7 @@ impl StorageManager {
|
|||
|
||||
let mut inner = self.inner.lock().await;
|
||||
let safety_selection = {
|
||||
let Some(opened_record) = inner.opened_records.get(&record_key) else {
|
||||
let Some(opened_record) = inner.opened_records.get(&opaque_record_key) else {
|
||||
apibail_generic!("record not open");
|
||||
};
|
||||
opened_record.safety_selection()
|
||||
|
@ -1303,19 +1371,24 @@ impl StorageManager {
|
|||
|
||||
// See if the requested record is our local record store
|
||||
let mut local_inspect_result = self
|
||||
.handle_inspect_local_value_inner(&mut inner, record_key.clone(), subkeys.clone(), true)
|
||||
.handle_inspect_local_value_inner(
|
||||
&mut inner,
|
||||
opaque_record_key.clone(),
|
||||
subkeys.clone(),
|
||||
true,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Get the offline subkeys for this record still only returning the ones we're inspecting
|
||||
// Merge in the currently offline in-flight records and the actively written records as well
|
||||
let active_subkey_writes = inner
|
||||
.active_subkey_writes
|
||||
.get(&record_key)
|
||||
.get(&opaque_record_key)
|
||||
.cloned()
|
||||
.unwrap_or_default();
|
||||
let offline_subkey_writes = inner
|
||||
.offline_subkey_writes
|
||||
.get(&record_key)
|
||||
.get(&opaque_record_key)
|
||||
.map(|o| o.subkeys.union(&o.subkeys_in_flight))
|
||||
.unwrap_or_default()
|
||||
.union(&active_subkey_writes)
|
||||
|
@ -1361,7 +1434,7 @@ impl StorageManager {
|
|||
// Get the inspect record report from the network
|
||||
let result = self
|
||||
.outbound_inspect_value(
|
||||
record_key.clone(),
|
||||
opaque_record_key.clone(),
|
||||
subkeys,
|
||||
safety_selection,
|
||||
if matches!(scope, DHTReportScope::SyncGet | DHTReportScope::SyncSet) {
|
||||
|
@ -1384,7 +1457,7 @@ impl StorageManager {
|
|||
|
||||
Self::process_fanout_results_inner(
|
||||
&mut inner,
|
||||
record_key.clone(),
|
||||
opaque_record_key.clone(),
|
||||
results_iter,
|
||||
false,
|
||||
self.config()
|
||||
|
@ -1454,7 +1527,7 @@ impl StorageManager {
|
|||
#[instrument(level = "trace", target = "stor", skip_all)]
|
||||
fn check_fanout_set_offline(
|
||||
&self,
|
||||
record_key: RecordKey,
|
||||
opaque_record_key: OpaqueRecordKey,
|
||||
subkey: ValueSubkey,
|
||||
fanout_result: &FanoutResult,
|
||||
) -> bool {
|
||||
|
@ -1468,12 +1541,12 @@ impl StorageManager {
|
|||
if value_node_count < get_consensus {
|
||||
veilid_log!(self debug "timeout with insufficient consensus ({}<{}), adding offline subkey: {}:{}",
|
||||
value_node_count, get_consensus,
|
||||
record_key, subkey);
|
||||
opaque_record_key, subkey);
|
||||
true
|
||||
} else {
|
||||
veilid_log!(self debug "timeout with sufficient consensus ({}>={}): set_value {}:{}",
|
||||
value_node_count, get_consensus,
|
||||
record_key, subkey);
|
||||
opaque_record_key, subkey);
|
||||
false
|
||||
}
|
||||
}
|
||||
|
@ -1485,12 +1558,12 @@ impl StorageManager {
|
|||
if value_node_count < get_consensus {
|
||||
veilid_log!(self debug "exhausted with insufficient consensus ({}<{}), adding offline subkey: {}:{}",
|
||||
value_node_count, get_consensus,
|
||||
record_key, subkey);
|
||||
opaque_record_key, subkey);
|
||||
true
|
||||
} else {
|
||||
veilid_log!(self debug "exhausted with sufficient consensus ({}>={}): set_value {}:{}",
|
||||
value_node_count, get_consensus,
|
||||
record_key, subkey);
|
||||
opaque_record_key, subkey);
|
||||
false
|
||||
}
|
||||
}
|
||||
|
@ -1548,8 +1621,16 @@ impl StorageManager {
|
|||
vcrypto.generate_keypair()
|
||||
};
|
||||
|
||||
// Always create a new encryption key
|
||||
let encryption_key = Some(vcrypto.random_shared_secret().into_value());
|
||||
|
||||
// Calculate dht key
|
||||
let record_key = Self::make_record_key(&vcrypto, owner.ref_value().ref_key(), &schema_data);
|
||||
let record_key = Self::make_record_key(
|
||||
&vcrypto,
|
||||
owner.ref_value().ref_key(),
|
||||
&schema_data,
|
||||
encryption_key,
|
||||
);
|
||||
|
||||
// Make a signed value descriptor for this dht value
|
||||
let signed_value_descriptor = Arc::new(SignedValueDescriptor::make_signature(
|
||||
|
@ -1565,8 +1646,9 @@ impl StorageManager {
|
|||
let record =
|
||||
Record::<LocalRecordDetail>::new(cur_ts, signed_value_descriptor, local_record_detail)?;
|
||||
|
||||
let opaque_record_key = record_key.opaque();
|
||||
local_record_store
|
||||
.new_record(record_key.clone(), record)
|
||||
.new_record(opaque_record_key, record)
|
||||
.await?;
|
||||
|
||||
Ok((record_key, owner))
|
||||
|
@ -1593,7 +1675,8 @@ impl StorageManager {
|
|||
// Return record details
|
||||
r.clone()
|
||||
};
|
||||
let Some(remote_record) = remote_record_store.with_record(&record_key, rcb) else {
|
||||
let opaque_record_key = record_key.opaque();
|
||||
let Some(remote_record) = remote_record_store.with_record(&opaque_record_key, rcb) else {
|
||||
// No local or remote record found, return None
|
||||
return Ok(None);
|
||||
};
|
||||
|
@ -1606,13 +1689,13 @@ impl StorageManager {
|
|||
LocalRecordDetail::new(safety_selection),
|
||||
)?;
|
||||
local_record_store
|
||||
.new_record(record_key.clone(), local_record)
|
||||
.new_record(opaque_record_key.clone(), local_record)
|
||||
.await?;
|
||||
|
||||
// Move copy subkey data from remote to local store
|
||||
for subkey in remote_record.stored_subkeys().iter() {
|
||||
let Some(get_result) = remote_record_store
|
||||
.get_subkey(record_key.clone(), subkey, false)
|
||||
.get_subkey(opaque_record_key.clone(), subkey, false)
|
||||
.await?
|
||||
else {
|
||||
// Subkey was missing
|
||||
|
@ -1626,7 +1709,7 @@ impl StorageManager {
|
|||
};
|
||||
local_record_store
|
||||
.set_subkey(
|
||||
record_key.clone(),
|
||||
opaque_record_key.clone(),
|
||||
subkey,
|
||||
subkey_data,
|
||||
InboundWatchUpdateMode::NoUpdate,
|
||||
|
@ -1636,13 +1719,13 @@ impl StorageManager {
|
|||
|
||||
// Move watches
|
||||
local_record_store.move_watches(
|
||||
record_key.clone(),
|
||||
remote_record_store.move_watches(record_key.clone(), None),
|
||||
opaque_record_key.clone(),
|
||||
remote_record_store.move_watches(opaque_record_key.clone(), None),
|
||||
);
|
||||
|
||||
// Delete remote record from store
|
||||
remote_record_store
|
||||
.delete_record(record_key.clone())
|
||||
.delete_record(opaque_record_key.clone())
|
||||
.await?;
|
||||
|
||||
// Return record information as transferred to local record
|
||||
|
@ -1672,7 +1755,8 @@ impl StorageManager {
|
|||
// Return record details
|
||||
(r.owner(), r.schema())
|
||||
};
|
||||
let (owner, schema) = match local_record_store.with_record_mut(&record_key, cb) {
|
||||
let opaque_record_key = record_key.opaque();
|
||||
let (owner, schema) = match local_record_store.with_record_mut(&opaque_record_key, cb) {
|
||||
Some(v) => v,
|
||||
None => {
|
||||
// If we don't have a local record yet, check to see if we have a remote record
|
||||
|
@ -1705,15 +1789,34 @@ impl StorageManager {
|
|||
None
|
||||
};
|
||||
|
||||
let crypto = self.crypto();
|
||||
|
||||
let mut crypto_with_key: Option<(CryptoSystemGuard, BareSharedSecret)> = None;
|
||||
|
||||
if let Some(k) = record_key.ref_value().encryption_key() {
|
||||
let Some(value_crypto) = crypto.get(record_key.kind()) else {
|
||||
apibail_generic!("unsupported cryptosystem for record encryption key");
|
||||
};
|
||||
crypto_with_key = Some((value_crypto, k));
|
||||
}
|
||||
|
||||
// Write open record
|
||||
let opaque_record_key = record_key.opaque();
|
||||
inner
|
||||
.opened_records
|
||||
.entry(record_key.clone())
|
||||
.entry(opaque_record_key)
|
||||
.and_modify(|e| {
|
||||
e.set_writer(writer.clone());
|
||||
e.set_safety_selection(safety_selection.clone());
|
||||
e.set_encryption_key(crypto_with_key.as_ref().map(|(_, k)| k.clone()));
|
||||
})
|
||||
.or_insert_with(|| OpenedRecord::new(writer.clone(), safety_selection.clone()));
|
||||
.or_insert_with(|| {
|
||||
OpenedRecord::new(
|
||||
writer.clone(),
|
||||
safety_selection.clone(),
|
||||
crypto_with_key.map(|(_, k)| k),
|
||||
)
|
||||
});
|
||||
|
||||
// Make DHT Record Descriptor to return
|
||||
let descriptor = DHTRecordDescriptor::new(record_key, owner, owner_secret, schema);
|
||||
|
@ -1730,7 +1833,8 @@ impl StorageManager {
|
|||
safety_selection: SafetySelection,
|
||||
) -> VeilidAPIResult<DHTRecordDescriptor> {
|
||||
// Ensure the record is closed
|
||||
if inner.opened_records.contains_key(&record_key) {
|
||||
let opaque_record_key = record_key.opaque();
|
||||
if inner.opened_records.contains_key(&opaque_record_key) {
|
||||
panic!("new record should never be opened at this point");
|
||||
}
|
||||
|
||||
|
@ -1766,14 +1870,17 @@ impl StorageManager {
|
|||
signed_value_descriptor,
|
||||
LocalRecordDetail::new(safety_selection.clone()),
|
||||
)?;
|
||||
|
||||
local_record_store
|
||||
.new_record(record_key.clone(), record)
|
||||
.new_record(opaque_record_key.clone(), record)
|
||||
.await?;
|
||||
|
||||
let encryption_key = record_key.ref_value().encryption_key();
|
||||
|
||||
// Write open record
|
||||
inner.opened_records.insert(
|
||||
record_key.clone(),
|
||||
OpenedRecord::new(writer, safety_selection),
|
||||
opaque_record_key,
|
||||
OpenedRecord::new(writer, safety_selection, encryption_key),
|
||||
);
|
||||
|
||||
// Make DHT Record Descriptor to return
|
||||
|
@ -1784,7 +1891,7 @@ impl StorageManager {
|
|||
#[instrument(level = "trace", target = "stor", skip_all, err)]
|
||||
async fn get_value_nodes(
|
||||
&self,
|
||||
record_key: RecordKey,
|
||||
opaque_record_key: OpaqueRecordKey,
|
||||
) -> VeilidAPIResult<Option<Vec<NodeRef>>> {
|
||||
let inner = self.inner.lock().await;
|
||||
// Get local record store
|
||||
|
@ -1795,7 +1902,7 @@ impl StorageManager {
|
|||
// Get routing table to see if we still know about these nodes
|
||||
let routing_table = self.routing_table();
|
||||
|
||||
let opt_value_nodes = local_record_store.peek_record(&record_key, |r| {
|
||||
let opt_value_nodes = local_record_store.peek_record(&opaque_record_key, |r| {
|
||||
let d = r.detail();
|
||||
d.nodes
|
||||
.keys()
|
||||
|
@ -1810,7 +1917,7 @@ impl StorageManager {
|
|||
#[instrument(level = "trace", target = "stor", skip_all)]
|
||||
fn process_fanout_results_inner<I: IntoIterator<Item = (ValueSubkeyRangeSet, FanoutResult)>>(
|
||||
inner: &mut StorageManagerInner,
|
||||
record_key: RecordKey,
|
||||
opaque_record_key: OpaqueRecordKey,
|
||||
subkey_results_iter: I,
|
||||
is_set: bool,
|
||||
consensus_count: usize,
|
||||
|
@ -1819,14 +1926,14 @@ impl StorageManager {
|
|||
let local_record_store = inner.local_record_store.as_mut().unwrap();
|
||||
|
||||
let cur_ts = Timestamp::now();
|
||||
local_record_store.with_record_mut(&record_key, |r| {
|
||||
local_record_store.with_record_mut(&opaque_record_key, |r| {
|
||||
let d = r.detail_mut();
|
||||
|
||||
for (subkeys, fanout_result) in subkey_results_iter {
|
||||
for node_id in fanout_result
|
||||
.value_nodes
|
||||
.iter()
|
||||
.filter_map(|x| x.node_ids().get(record_key.kind()))
|
||||
.filter_map(|x| x.node_ids().get(opaque_record_key.kind()))
|
||||
{
|
||||
let pnd = d.nodes.entry(node_id).or_default();
|
||||
if is_set || pnd.last_set == Timestamp::default() {
|
||||
|
@ -1852,11 +1959,11 @@ impl StorageManager {
|
|||
// Distance is the next metric, closer nodes first
|
||||
let da =
|
||||
a.0.to_hash_coordinate()
|
||||
.distance(&record_key.to_hash_coordinate());
|
||||
.distance(&opaque_record_key.to_hash_coordinate());
|
||||
|
||||
let db =
|
||||
b.0.to_hash_coordinate()
|
||||
.distance(&record_key.to_hash_coordinate());
|
||||
.distance(&opaque_record_key.to_hash_coordinate());
|
||||
da.cmp(&db)
|
||||
});
|
||||
|
||||
|
@ -1873,14 +1980,16 @@ impl StorageManager {
|
|||
let Some(local_record_store) = inner.local_record_store.as_mut() else {
|
||||
apibail_not_initialized!();
|
||||
};
|
||||
let opaque_record_key = record_key.opaque();
|
||||
|
||||
if local_record_store
|
||||
.peek_record(&record_key, |_| {})
|
||||
.peek_record(&opaque_record_key, |_| {})
|
||||
.is_none()
|
||||
{
|
||||
apibail_key_not_found!(record_key);
|
||||
apibail_key_not_found!(opaque_record_key);
|
||||
}
|
||||
|
||||
if inner.opened_records.remove(&record_key).is_some() {
|
||||
if inner.opened_records.remove(&opaque_record_key).is_some() {
|
||||
// Set the watch to cancelled if we have one
|
||||
// Will process cancellation in the background
|
||||
inner
|
||||
|
@ -1895,15 +2004,18 @@ impl StorageManager {
|
|||
async fn handle_get_local_value_inner(
|
||||
&self,
|
||||
inner: &mut StorageManagerInner,
|
||||
record_key: RecordKey,
|
||||
opaque_record_key: OpaqueRecordKey,
|
||||
subkey: ValueSubkey,
|
||||
want_descriptor: bool,
|
||||
) -> VeilidAPIResult<GetResult> {
|
||||
// See if the value is in the offline subkey writes first,
|
||||
// since it may not have been committed yet to the local record store
|
||||
if let Some(get_result) =
|
||||
self.get_offline_subkey_writes_subkey(inner, &record_key, subkey, want_descriptor)?
|
||||
{
|
||||
if let Some(get_result) = self.get_offline_subkey_writes_subkey(
|
||||
inner,
|
||||
&opaque_record_key,
|
||||
subkey,
|
||||
want_descriptor,
|
||||
)? {
|
||||
return Ok(get_result);
|
||||
}
|
||||
|
||||
|
@ -1912,7 +2024,7 @@ impl StorageManager {
|
|||
apibail_not_initialized!();
|
||||
};
|
||||
if let Some(get_result) = local_record_store
|
||||
.get_subkey(record_key, subkey, want_descriptor)
|
||||
.get_subkey(opaque_record_key, subkey, want_descriptor)
|
||||
.await?
|
||||
{
|
||||
return Ok(get_result);
|
||||
|
@ -1928,7 +2040,7 @@ impl StorageManager {
|
|||
async fn handle_set_local_value_inner(
|
||||
&self,
|
||||
inner: &mut StorageManagerInner,
|
||||
record_key: RecordKey,
|
||||
opaque_record_key: OpaqueRecordKey,
|
||||
subkey: ValueSubkey,
|
||||
signed_value_data: Arc<SignedValueData>,
|
||||
watch_update_mode: InboundWatchUpdateMode,
|
||||
|
@ -1936,7 +2048,7 @@ impl StorageManager {
|
|||
// See if this new data supercedes any offline subkey writes
|
||||
self.remove_old_offline_subkey_writes_inner(
|
||||
inner,
|
||||
record_key.clone(),
|
||||
opaque_record_key.clone(),
|
||||
subkey,
|
||||
signed_value_data.clone(),
|
||||
);
|
||||
|
@ -1948,7 +2060,12 @@ impl StorageManager {
|
|||
|
||||
// Write subkey to local store
|
||||
local_record_store
|
||||
.set_subkey(record_key, subkey, signed_value_data, watch_update_mode)
|
||||
.set_subkey(
|
||||
opaque_record_key,
|
||||
subkey,
|
||||
signed_value_data,
|
||||
watch_update_mode,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
|
@ -1958,7 +2075,7 @@ impl StorageManager {
|
|||
async fn handle_inspect_local_value_inner(
|
||||
&self,
|
||||
inner: &mut StorageManagerInner,
|
||||
record_key: RecordKey,
|
||||
opaque_record_key: OpaqueRecordKey,
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
want_descriptor: bool,
|
||||
) -> VeilidAPIResult<InspectResult> {
|
||||
|
@ -1967,7 +2084,7 @@ impl StorageManager {
|
|||
apibail_not_initialized!();
|
||||
};
|
||||
if let Some(inspect_result) = local_record_store
|
||||
.inspect_record(record_key, &subkeys, want_descriptor)
|
||||
.inspect_record(opaque_record_key, &subkeys, want_descriptor)
|
||||
.await?
|
||||
{
|
||||
return Ok(inspect_result);
|
||||
|
@ -1986,7 +2103,7 @@ impl StorageManager {
|
|||
#[instrument(level = "trace", target = "stor", skip_all, err)]
|
||||
async fn handle_get_remote_value_inner(
|
||||
inner: &mut StorageManagerInner,
|
||||
record_key: RecordKey,
|
||||
opaque_record_key: OpaqueRecordKey,
|
||||
subkey: ValueSubkey,
|
||||
want_descriptor: bool,
|
||||
) -> VeilidAPIResult<GetResult> {
|
||||
|
@ -1995,7 +2112,7 @@ impl StorageManager {
|
|||
apibail_not_initialized!();
|
||||
};
|
||||
if let Some(get_result) = remote_record_store
|
||||
.get_subkey(record_key, subkey, want_descriptor)
|
||||
.get_subkey(opaque_record_key, subkey, want_descriptor)
|
||||
.await?
|
||||
{
|
||||
return Ok(get_result);
|
||||
|
@ -2010,7 +2127,7 @@ impl StorageManager {
|
|||
#[instrument(level = "trace", target = "stor", skip_all, err)]
|
||||
async fn handle_set_remote_value_inner(
|
||||
inner: &mut StorageManagerInner,
|
||||
record_key: RecordKey,
|
||||
opaque_record_key: OpaqueRecordKey,
|
||||
subkey: ValueSubkey,
|
||||
signed_value_data: Arc<SignedValueData>,
|
||||
signed_value_descriptor: Arc<SignedValueDescriptor>,
|
||||
|
@ -2023,7 +2140,7 @@ impl StorageManager {
|
|||
|
||||
// See if we have a remote record already or not
|
||||
if remote_record_store
|
||||
.with_record(&record_key, |_| {})
|
||||
.with_record(&opaque_record_key, |_| {})
|
||||
.is_none()
|
||||
{
|
||||
// record didn't exist, make it
|
||||
|
@ -2035,13 +2152,18 @@ impl StorageManager {
|
|||
remote_record_detail,
|
||||
)?;
|
||||
remote_record_store
|
||||
.new_record(record_key.clone(), record)
|
||||
.new_record(opaque_record_key.clone(), record)
|
||||
.await?
|
||||
};
|
||||
|
||||
// Write subkey to remote store
|
||||
remote_record_store
|
||||
.set_subkey(record_key, subkey, signed_value_data, watch_update_mode)
|
||||
.set_subkey(
|
||||
opaque_record_key,
|
||||
subkey,
|
||||
signed_value_data,
|
||||
watch_update_mode,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
|
@ -2051,7 +2173,7 @@ impl StorageManager {
|
|||
async fn handle_inspect_remote_value_inner(
|
||||
&self,
|
||||
inner: &mut StorageManagerInner,
|
||||
record_key: RecordKey,
|
||||
opaque_record_key: OpaqueRecordKey,
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
want_descriptor: bool,
|
||||
) -> VeilidAPIResult<InspectResult> {
|
||||
|
@ -2060,7 +2182,7 @@ impl StorageManager {
|
|||
apibail_not_initialized!();
|
||||
};
|
||||
if let Some(inspect_result) = remote_record_store
|
||||
.inspect_record(record_key, &subkeys, want_descriptor)
|
||||
.inspect_record(opaque_record_key, &subkeys, want_descriptor)
|
||||
.await?
|
||||
{
|
||||
return Ok(inspect_result);
|
||||
|
@ -2099,4 +2221,96 @@ impl StorageManager {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_encryption_key_for_opaque_record_key(
|
||||
&self,
|
||||
opaque_record_key: &OpaqueRecordKey,
|
||||
) -> VeilidAPIResult<Option<BareSharedSecret>> {
|
||||
let inner = self.inner.lock().await;
|
||||
|
||||
let Some(opened_record) = inner.opened_records.get(opaque_record_key) else {
|
||||
apibail_generic!("decrypt_value_data: opened_records does not contain an expected key");
|
||||
};
|
||||
|
||||
Ok(opened_record.encryption_key().cloned())
|
||||
}
|
||||
|
||||
/// Encrypt value data if the record key contains an encryption key.
|
||||
/// Leave it unchanged otherwise.
|
||||
fn maybe_encrypt_value_data(
|
||||
&self,
|
||||
record_key: &RecordKey,
|
||||
value_data: &ValueData,
|
||||
) -> VeilidAPIResult<EncryptedValueData> {
|
||||
if let Some(encryption_key) = record_key.ref_value().ref_encryption_key() {
|
||||
let crypto = self.registry.crypto();
|
||||
|
||||
let Some(vcrypto) = crypto.get(record_key.kind()) else {
|
||||
apibail_generic!("decrypt_value_data: unsupported crypto kind")
|
||||
};
|
||||
|
||||
let mut data = value_data.data().to_vec();
|
||||
let nonce = vcrypto.random_nonce();
|
||||
let encryption_key = SharedSecret::new(record_key.kind(), encryption_key.clone());
|
||||
vcrypto.crypt_in_place_no_auth(&mut data, &nonce, &encryption_key)?;
|
||||
|
||||
Ok(EncryptedValueData::new_with_seq(
|
||||
value_data.seq(),
|
||||
data,
|
||||
value_data.writer(),
|
||||
Some(nonce),
|
||||
)?)
|
||||
} else {
|
||||
Ok(EncryptedValueData::new_with_seq(
|
||||
value_data.seq(),
|
||||
value_data.data().to_vec(),
|
||||
value_data.writer(),
|
||||
None,
|
||||
)?)
|
||||
}
|
||||
}
|
||||
|
||||
/// Decrypt value data if the record key contains an encryption key and value data contains nonce.
|
||||
/// Leave data unchanged if both are none.
|
||||
/// Returns error if either encryption key or nonce is None.
|
||||
fn maybe_decrypt_value_data(
|
||||
&self,
|
||||
record_key: &RecordKey,
|
||||
encrypted_value_data: &EncryptedValueData,
|
||||
) -> VeilidAPIResult<ValueData> {
|
||||
match (
|
||||
record_key.ref_value().ref_encryption_key(),
|
||||
encrypted_value_data.nonce(),
|
||||
) {
|
||||
(Some(encryption_key), Some(nonce)) => {
|
||||
let crypto = self.registry.crypto();
|
||||
|
||||
let Some(vcrypto) = crypto.get(record_key.kind()) else {
|
||||
apibail_generic!("cannot decrypt value data: unsupported crypto kind")
|
||||
};
|
||||
|
||||
let mut data = encrypted_value_data.data().to_vec();
|
||||
let encryption_key = SharedSecret::new(record_key.kind(), encryption_key.clone());
|
||||
vcrypto.crypt_in_place_no_auth(&mut data, &nonce, &encryption_key)?;
|
||||
Ok(ValueData::new_with_seq(
|
||||
encrypted_value_data.seq(),
|
||||
data,
|
||||
encrypted_value_data.writer(),
|
||||
)?)
|
||||
}
|
||||
(None, None) => Ok(ValueData::new_with_seq(
|
||||
encrypted_value_data.seq(),
|
||||
encrypted_value_data.data().to_vec(),
|
||||
encrypted_value_data.writer(),
|
||||
)?),
|
||||
(Some(_), None) => {
|
||||
// Should not happen in normal circumstances
|
||||
apibail_generic!("cannot decrypt value data: missing nonce")
|
||||
}
|
||||
(None, Some(_)) => {
|
||||
// Should not happen in normal circumstances
|
||||
apibail_generic!("cannot decrypt value data: missing encryption key")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@ use super::*;
|
|||
|
||||
impl_veilid_log_facility!("stor");
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct OfflineSubkeyWrite {
|
||||
/// Safety selection to use when writing this record to the network
|
||||
pub safety_selection: SafetySelection,
|
||||
|
@ -21,14 +21,14 @@ impl StorageManager {
|
|||
pub(super) fn add_offline_subkey_write_inner(
|
||||
&self,
|
||||
inner: &mut StorageManagerInner,
|
||||
record_key: RecordKey,
|
||||
opaque_record_key: OpaqueRecordKey,
|
||||
subkey: ValueSubkey,
|
||||
safety_selection: SafetySelection,
|
||||
signed_value_data: Arc<SignedValueData>,
|
||||
) {
|
||||
inner
|
||||
.offline_subkey_writes
|
||||
.entry(record_key)
|
||||
.entry(opaque_record_key)
|
||||
.and_modify(|x| {
|
||||
x.subkeys.insert(subkey);
|
||||
x.subkey_value_data
|
||||
|
@ -49,22 +49,22 @@ impl StorageManager {
|
|||
pub(super) fn get_offline_subkey_writes_subkey(
|
||||
&self,
|
||||
inner: &mut StorageManagerInner,
|
||||
record_key: &RecordKey,
|
||||
opaque_record_key: &OpaqueRecordKey,
|
||||
subkey: ValueSubkey,
|
||||
want_descriptor: bool,
|
||||
) -> VeilidAPIResult<Option<GetResult>> {
|
||||
let Some(local_record_store) = inner.local_record_store.as_mut() else {
|
||||
apibail_not_initialized!();
|
||||
};
|
||||
let Some(osw) = inner.offline_subkey_writes.get(record_key) else {
|
||||
let Some(osw) = inner.offline_subkey_writes.get(opaque_record_key) else {
|
||||
return Ok(None);
|
||||
};
|
||||
let Some(signed_value_data) = osw.subkey_value_data.get(&subkey).cloned() else {
|
||||
return Ok(None);
|
||||
};
|
||||
let opt_descriptor = if want_descriptor {
|
||||
if let Some(descriptor) =
|
||||
local_record_store.with_record(record_key, |record| record.descriptor().clone())
|
||||
if let Some(descriptor) = local_record_store
|
||||
.with_record(opaque_record_key, |record| record.descriptor().clone())
|
||||
{
|
||||
Some(descriptor)
|
||||
} else {
|
||||
|
@ -88,12 +88,12 @@ impl StorageManager {
|
|||
pub(super) fn remove_old_offline_subkey_writes_inner(
|
||||
&self,
|
||||
inner: &mut StorageManagerInner,
|
||||
record_key: RecordKey,
|
||||
opaque_record_key: OpaqueRecordKey,
|
||||
subkey: ValueSubkey,
|
||||
signed_value_data: Arc<SignedValueData>,
|
||||
) {
|
||||
// Get the offline subkey write record
|
||||
match inner.offline_subkey_writes.entry(record_key.clone()) {
|
||||
match inner.offline_subkey_writes.entry(opaque_record_key.clone()) {
|
||||
hashlink::linked_hash_map::Entry::Occupied(mut o) => {
|
||||
let finished = {
|
||||
let osw = o.get_mut();
|
||||
|
@ -110,7 +110,7 @@ impl StorageManager {
|
|||
// handled by finish_offline_subkey_writes_inner
|
||||
osw.subkeys.remove(subkey);
|
||||
|
||||
veilid_log!(self debug "offline write overwritten by newer or different data from network: record_key={} subkey={} seq={}", record_key, subkey, signed_value_data.value_data().seq());
|
||||
veilid_log!(self debug "offline write overwritten by newer or different data from network: record_key={} subkey={} seq={}", opaque_record_key, subkey, signed_value_data.value_data().seq());
|
||||
}
|
||||
}
|
||||
std::collections::hash_map::Entry::Vacant(_) => {}
|
||||
|
@ -128,7 +128,7 @@ impl StorageManager {
|
|||
finished
|
||||
};
|
||||
if finished {
|
||||
veilid_log!(self debug "Offline write finished key {}", record_key);
|
||||
veilid_log!(self debug "Offline write finished key {}", opaque_record_key);
|
||||
o.remove();
|
||||
}
|
||||
}
|
||||
|
@ -144,7 +144,7 @@ impl StorageManager {
|
|||
pub(super) fn finish_offline_subkey_writes_inner(
|
||||
&self,
|
||||
inner: &mut StorageManagerInner,
|
||||
record_key: RecordKey,
|
||||
opaque_record_key: OpaqueRecordKey,
|
||||
subkeys_written: ValueSubkeyRangeSet,
|
||||
subkeys_still_offline: ValueSubkeyRangeSet,
|
||||
) {
|
||||
|
@ -154,7 +154,7 @@ impl StorageManager {
|
|||
);
|
||||
|
||||
// Get the offline subkey write record
|
||||
match inner.offline_subkey_writes.entry(record_key.clone()) {
|
||||
match inner.offline_subkey_writes.entry(opaque_record_key.clone()) {
|
||||
hashlink::linked_hash_map::Entry::Occupied(mut o) => {
|
||||
let finished = {
|
||||
let osw = o.get_mut();
|
||||
|
@ -177,12 +177,12 @@ impl StorageManager {
|
|||
finished
|
||||
};
|
||||
if finished {
|
||||
veilid_log!(self debug "offline subkey write finished key {}", record_key);
|
||||
veilid_log!(self debug "offline subkey write finished key {}", opaque_record_key);
|
||||
o.remove();
|
||||
}
|
||||
}
|
||||
hashlink::linked_hash_map::Entry::Vacant(_) => {
|
||||
veilid_log!(self warn "can't finish missing offline subkey write: ignoring key {}", record_key);
|
||||
veilid_log!(self warn "can't finish missing offline subkey write: ignoring key {}", opaque_record_key);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ impl InspectCacheL2 {
|
|||
|
||||
#[derive(Debug)]
|
||||
pub struct InspectCache {
|
||||
cache: LruCache<RecordKey, InspectCacheL2>,
|
||||
cache: LruCache<OpaqueRecordKey, InspectCacheL2>,
|
||||
}
|
||||
|
||||
impl InspectCache {
|
||||
|
@ -34,7 +34,7 @@ impl InspectCache {
|
|||
|
||||
pub fn get(
|
||||
&mut self,
|
||||
key: &RecordKey,
|
||||
key: &OpaqueRecordKey,
|
||||
subkeys: &ValueSubkeyRangeSet,
|
||||
) -> Option<InspectCacheL2Value> {
|
||||
if let Some(l2c) = self.cache.get_mut(key) {
|
||||
|
@ -47,7 +47,7 @@ impl InspectCache {
|
|||
|
||||
pub fn put(
|
||||
&mut self,
|
||||
key: RecordKey,
|
||||
key: OpaqueRecordKey,
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
value: InspectCacheL2Value,
|
||||
) {
|
||||
|
@ -58,11 +58,16 @@ impl InspectCache {
|
|||
.insert(subkeys, value);
|
||||
}
|
||||
|
||||
pub fn invalidate(&mut self, key: &RecordKey) {
|
||||
pub fn invalidate(&mut self, key: &OpaqueRecordKey) {
|
||||
self.cache.remove(key);
|
||||
}
|
||||
|
||||
pub fn replace_subkey_seq(&mut self, key: &RecordKey, subkey: ValueSubkey, seq: ValueSeqNum) {
|
||||
pub fn replace_subkey_seq(
|
||||
&mut self,
|
||||
key: &OpaqueRecordKey,
|
||||
subkey: ValueSubkey,
|
||||
seq: ValueSeqNum,
|
||||
) {
|
||||
let Some(l2) = self.cache.get_mut(key) else {
|
||||
return;
|
||||
};
|
||||
|
|
|
@ -2,7 +2,7 @@ use super::*;
|
|||
|
||||
#[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
|
||||
pub struct RecordTableKey {
|
||||
pub record_key: RecordKey,
|
||||
pub record_key: OpaqueRecordKey,
|
||||
}
|
||||
impl RecordTableKey {
|
||||
pub fn bytes(&self) -> Vec<u8> {
|
||||
|
@ -13,14 +13,14 @@ impl RecordTableKey {
|
|||
impl TryFrom<&[u8]> for RecordTableKey {
|
||||
type Error = EyreReport;
|
||||
fn try_from(bytes: &[u8]) -> Result<Self, Self::Error> {
|
||||
let key = RecordKey::try_from(bytes)?;
|
||||
let key = OpaqueRecordKey::try_from(bytes)?;
|
||||
Ok(RecordTableKey { record_key: key })
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
|
||||
pub struct SubkeyTableKey {
|
||||
pub record_key: RecordKey,
|
||||
pub record_key: OpaqueRecordKey,
|
||||
pub subkey: ValueSubkey,
|
||||
}
|
||||
impl SubkeyTableKey {
|
||||
|
@ -33,7 +33,7 @@ impl SubkeyTableKey {
|
|||
impl TryFrom<&[u8]> for SubkeyTableKey {
|
||||
type Error = EyreReport;
|
||||
fn try_from(bytes: &[u8]) -> Result<Self, Self::Error> {
|
||||
let key = RecordKey::try_from(&bytes[0..bytes.len() - 4])?;
|
||||
let key = OpaqueRecordKey::try_from(&bytes[0..bytes.len() - 4])?;
|
||||
let subkey = ValueSubkey::from_le_bytes(
|
||||
bytes[(bytes.len() - 4)..]
|
||||
.try_into()
|
||||
|
|
|
@ -474,7 +474,7 @@ where
|
|||
#[instrument(level = "trace", target = "stor", skip_all, err)]
|
||||
pub async fn new_record(
|
||||
&mut self,
|
||||
record_key: RecordKey,
|
||||
record_key: OpaqueRecordKey,
|
||||
record: Record<D>,
|
||||
) -> VeilidAPIResult<()> {
|
||||
let rtk = RecordTableKey { record_key };
|
||||
|
@ -520,7 +520,7 @@ where
|
|||
}
|
||||
|
||||
#[instrument(level = "trace", target = "stor", skip_all, err)]
|
||||
pub async fn delete_record(&mut self, record_key: RecordKey) -> VeilidAPIResult<()> {
|
||||
pub async fn delete_record(&mut self, record_key: OpaqueRecordKey) -> VeilidAPIResult<()> {
|
||||
// Get the record table key
|
||||
let rtk = RecordTableKey { record_key };
|
||||
|
||||
|
@ -546,7 +546,7 @@ where
|
|||
}
|
||||
|
||||
#[instrument(level = "trace", target = "stor", skip_all)]
|
||||
pub(super) fn contains_record(&mut self, record_key: &RecordKey) -> bool {
|
||||
pub(super) fn contains_record(&mut self, record_key: &OpaqueRecordKey) -> bool {
|
||||
let rtk = RecordTableKey {
|
||||
record_key: record_key.clone(),
|
||||
};
|
||||
|
@ -554,7 +554,7 @@ where
|
|||
}
|
||||
|
||||
#[instrument(level = "trace", target = "stor", skip_all)]
|
||||
pub(super) fn with_record<R, F>(&mut self, record_key: &RecordKey, f: F) -> Option<R>
|
||||
pub(super) fn with_record<R, F>(&mut self, record_key: &OpaqueRecordKey, f: F) -> Option<R>
|
||||
where
|
||||
F: FnOnce(&Record<D>) -> R,
|
||||
{
|
||||
|
@ -580,7 +580,7 @@ where
|
|||
}
|
||||
|
||||
#[instrument(level = "trace", target = "stor", skip_all)]
|
||||
pub(super) fn peek_record<R, F>(&self, record_key: &RecordKey, f: F) -> Option<R>
|
||||
pub(super) fn peek_record<R, F>(&self, record_key: &OpaqueRecordKey, f: F) -> Option<R>
|
||||
where
|
||||
F: FnOnce(&Record<D>) -> R,
|
||||
{
|
||||
|
@ -597,7 +597,7 @@ where
|
|||
}
|
||||
|
||||
#[instrument(level = "trace", target = "stor", skip_all)]
|
||||
pub(super) fn with_record_mut<R, F>(&mut self, record_key: &RecordKey, f: F) -> Option<R>
|
||||
pub(super) fn with_record_mut<R, F>(&mut self, record_key: &OpaqueRecordKey, f: F) -> Option<R>
|
||||
where
|
||||
F: FnOnce(&mut Record<D>) -> R,
|
||||
{
|
||||
|
@ -625,7 +625,7 @@ where
|
|||
#[instrument(level = "trace", target = "stor", skip_all, err)]
|
||||
pub async fn get_subkey(
|
||||
&mut self,
|
||||
record_key: RecordKey,
|
||||
record_key: OpaqueRecordKey,
|
||||
subkey: ValueSubkey,
|
||||
want_descriptor: bool,
|
||||
) -> VeilidAPIResult<Option<GetResult>> {
|
||||
|
@ -695,7 +695,7 @@ where
|
|||
#[instrument(level = "trace", target = "stor", skip_all, err)]
|
||||
pub async fn peek_subkey(
|
||||
&self,
|
||||
record_key: RecordKey,
|
||||
record_key: OpaqueRecordKey,
|
||||
subkey: ValueSubkey,
|
||||
want_descriptor: bool,
|
||||
) -> VeilidAPIResult<Option<GetResult>> {
|
||||
|
@ -762,7 +762,7 @@ where
|
|||
#[instrument(level = "trace", target = "stor", skip_all)]
|
||||
async fn update_watched_value(
|
||||
&mut self,
|
||||
record_key: RecordKey,
|
||||
record_key: OpaqueRecordKey,
|
||||
subkey: ValueSubkey,
|
||||
watch_update_mode: InboundWatchUpdateMode,
|
||||
) {
|
||||
|
@ -801,7 +801,7 @@ where
|
|||
#[instrument(level = "trace", target = "stor", skip_all, err)]
|
||||
pub async fn set_subkey(
|
||||
&mut self,
|
||||
record_key: RecordKey,
|
||||
record_key: OpaqueRecordKey,
|
||||
subkey: ValueSubkey,
|
||||
signed_value_data: Arc<SignedValueData>,
|
||||
watch_update_mode: InboundWatchUpdateMode,
|
||||
|
@ -910,7 +910,7 @@ where
|
|||
#[instrument(level = "trace", target = "stor", skip_all, err)]
|
||||
pub async fn inspect_record(
|
||||
&mut self,
|
||||
record_key: RecordKey,
|
||||
record_key: OpaqueRecordKey,
|
||||
subkeys: &ValueSubkeyRangeSet,
|
||||
want_descriptor: bool,
|
||||
) -> VeilidAPIResult<Option<InspectResult>> {
|
||||
|
@ -997,7 +997,7 @@ where
|
|||
#[instrument(level = "trace", target = "stor", skip_all, err)]
|
||||
pub async fn _change_existing_watch(
|
||||
&mut self,
|
||||
record_key: RecordKey,
|
||||
record_key: OpaqueRecordKey,
|
||||
params: InboundWatchParameters,
|
||||
watch_id: u64,
|
||||
) -> VeilidAPIResult<InboundWatchResult> {
|
||||
|
@ -1034,7 +1034,7 @@ where
|
|||
#[instrument(level = "trace", target = "stor", skip_all, err)]
|
||||
pub async fn _create_new_watch(
|
||||
&mut self,
|
||||
record_key: RecordKey,
|
||||
record_key: OpaqueRecordKey,
|
||||
params: InboundWatchParameters,
|
||||
member_check: Box<dyn Fn(&MemberId) -> bool + Send>,
|
||||
) -> VeilidAPIResult<InboundWatchResult> {
|
||||
|
@ -1126,7 +1126,7 @@ where
|
|||
#[instrument(level = "trace", target = "stor", skip_all, err)]
|
||||
pub async fn watch_record(
|
||||
&mut self,
|
||||
record_key: RecordKey,
|
||||
record_key: OpaqueRecordKey,
|
||||
mut params: InboundWatchParameters,
|
||||
opt_watch_id: Option<u64>,
|
||||
) -> VeilidAPIResult<InboundWatchResult> {
|
||||
|
@ -1194,7 +1194,7 @@ where
|
|||
#[instrument(level = "trace", target = "stor", skip_all, err)]
|
||||
async fn cancel_watch(
|
||||
&mut self,
|
||||
record_key: RecordKey,
|
||||
record_key: OpaqueRecordKey,
|
||||
watch_id: u64,
|
||||
watcher_member_id: MemberId,
|
||||
) -> VeilidAPIResult<bool> {
|
||||
|
@ -1234,7 +1234,7 @@ where
|
|||
#[instrument(level = "trace", target = "stor", skip_all)]
|
||||
pub fn move_watches(
|
||||
&mut self,
|
||||
record_key: RecordKey,
|
||||
record_key: OpaqueRecordKey,
|
||||
in_watch: Option<(InboundWatchList, bool)>,
|
||||
) -> Option<(InboundWatchList, bool)> {
|
||||
let rtk = RecordTableKey { record_key };
|
||||
|
@ -1272,7 +1272,7 @@ where
|
|||
// ValueChangedInfo but without the subkey data that requires a double mutable borrow to get
|
||||
struct EarlyValueChangedInfo {
|
||||
target: Target,
|
||||
key: RecordKey,
|
||||
key: OpaqueRecordKey,
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
count: u32,
|
||||
watch_id: u64,
|
||||
|
@ -1411,7 +1411,7 @@ where
|
|||
out
|
||||
}
|
||||
|
||||
pub fn debug_record_info(&self, record_key: RecordKey) -> String {
|
||||
pub fn debug_record_info(&self, record_key: OpaqueRecordKey) -> String {
|
||||
let record_info = self
|
||||
.peek_record(&record_key, |r| format!("{:#?}", r))
|
||||
.unwrap_or("Not found".to_owned());
|
||||
|
@ -1426,7 +1426,7 @@ where
|
|||
|
||||
pub async fn debug_record_subkey_info(
|
||||
&self,
|
||||
record_key: RecordKey,
|
||||
record_key: OpaqueRecordKey,
|
||||
subkey: ValueSubkey,
|
||||
) -> String {
|
||||
match self.peek_subkey(record_key, subkey, true).await {
|
||||
|
|
|
@ -11,13 +11,21 @@ pub(in crate::storage_manager) struct OpenedRecord {
|
|||
|
||||
/// The safety selection in current use
|
||||
safety_selection: SafetySelection,
|
||||
|
||||
/// Encryption key, for newer records
|
||||
encryption_key: Option<BareSharedSecret>,
|
||||
}
|
||||
|
||||
impl OpenedRecord {
|
||||
pub fn new(writer: Option<KeyPair>, safety_selection: SafetySelection) -> Self {
|
||||
pub fn new(
|
||||
writer: Option<KeyPair>,
|
||||
safety_selection: SafetySelection,
|
||||
encryption_key: Option<BareSharedSecret>,
|
||||
) -> Self {
|
||||
Self {
|
||||
writer,
|
||||
safety_selection,
|
||||
encryption_key,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -34,4 +42,11 @@ impl OpenedRecord {
|
|||
pub fn set_safety_selection(&mut self, safety_selection: SafetySelection) {
|
||||
self.safety_selection = safety_selection;
|
||||
}
|
||||
|
||||
pub fn encryption_key(&self) -> Option<&BareSharedSecret> {
|
||||
self.encryption_key.as_ref()
|
||||
}
|
||||
pub fn set_encryption_key(&mut self, encryption_key: Option<BareSharedSecret>) {
|
||||
self.encryption_key = encryption_key;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
use super::*;
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub(super) struct RecordData {
|
||||
signed_value_data: Arc<SignedValueData>,
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@ use super::{inspect_value::OutboundInspectValueResult, *};
|
|||
#[derive(Debug, Clone)]
|
||||
pub struct RehydrateReport {
|
||||
/// The record key rehydrated
|
||||
record_key: RecordKey,
|
||||
opaque_record_key: OpaqueRecordKey,
|
||||
/// The requested range of subkeys to rehydrate if necessary
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
/// The requested consensus count,
|
||||
|
@ -24,7 +24,7 @@ impl StorageManager {
|
|||
#[instrument(level = "trace", target = "stor", skip_all)]
|
||||
pub async fn add_rehydration_request(
|
||||
&self,
|
||||
record_key: RecordKey,
|
||||
opaque_record_key: OpaqueRecordKey,
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
consensus_count: usize,
|
||||
) {
|
||||
|
@ -32,11 +32,11 @@ impl StorageManager {
|
|||
subkeys,
|
||||
consensus_count,
|
||||
};
|
||||
veilid_log!(self debug "Adding rehydration request: {} {:?}", record_key, req);
|
||||
veilid_log!(self debug "Adding rehydration request: {} {:?}", opaque_record_key, req);
|
||||
let mut inner = self.inner.lock().await;
|
||||
inner
|
||||
.rehydration_requests
|
||||
.entry(record_key)
|
||||
.entry(opaque_record_key)
|
||||
.and_modify(|r| {
|
||||
r.subkeys = r.subkeys.union(&req.subkeys);
|
||||
r.consensus_count.max_assign(req.consensus_count);
|
||||
|
@ -55,11 +55,11 @@ impl StorageManager {
|
|||
#[instrument(level = "trace", target = "stor", skip(self), ret, err)]
|
||||
pub(super) async fn rehydrate_record(
|
||||
&self,
|
||||
record_key: RecordKey,
|
||||
opaque_record_key: OpaqueRecordKey,
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
consensus_count: usize,
|
||||
) -> VeilidAPIResult<RehydrateReport> {
|
||||
veilid_log!(self debug "Checking for record rehydration: {} {} @ consensus {}", record_key, subkeys, consensus_count);
|
||||
veilid_log!(self debug "Checking for record rehydration: {} {} @ consensus {}", opaque_record_key, subkeys, consensus_count);
|
||||
// Get subkey range for consideration
|
||||
let subkeys = if subkeys.is_empty() {
|
||||
ValueSubkeyRangeSet::full()
|
||||
|
@ -70,7 +70,7 @@ impl StorageManager {
|
|||
// Get safety selection
|
||||
let mut inner = self.inner.lock().await;
|
||||
let safety_selection = {
|
||||
if let Some(opened_record) = inner.opened_records.get(&record_key) {
|
||||
if let Some(opened_record) = inner.opened_records.get(&opaque_record_key) {
|
||||
opened_record.safety_selection()
|
||||
} else {
|
||||
// See if it's in the local record store
|
||||
|
@ -78,9 +78,11 @@ impl StorageManager {
|
|||
apibail_not_initialized!();
|
||||
};
|
||||
let Some(safety_selection) = local_record_store
|
||||
.with_record(&record_key, |rec| rec.detail().safety_selection.clone())
|
||||
.with_record(&opaque_record_key, |rec| {
|
||||
rec.detail().safety_selection.clone()
|
||||
})
|
||||
else {
|
||||
apibail_key_not_found!(record_key);
|
||||
apibail_key_not_found!(opaque_record_key);
|
||||
};
|
||||
safety_selection
|
||||
}
|
||||
|
@ -88,7 +90,12 @@ impl StorageManager {
|
|||
|
||||
// See if the requested record is our local record store
|
||||
let local_inspect_result = self
|
||||
.handle_inspect_local_value_inner(&mut inner, record_key.clone(), subkeys.clone(), true)
|
||||
.handle_inspect_local_value_inner(
|
||||
&mut inner,
|
||||
opaque_record_key.clone(),
|
||||
subkeys.clone(),
|
||||
true,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Get rpc processor and drop mutex so we don't block while getting the value from the network
|
||||
|
@ -105,7 +112,7 @@ impl StorageManager {
|
|||
// Get the inspect record report from the network
|
||||
let result = self
|
||||
.outbound_inspect_value(
|
||||
record_key.clone(),
|
||||
opaque_record_key.clone(),
|
||||
local_inspect_result.subkeys().clone(),
|
||||
safety_selection.clone(),
|
||||
InspectResult::default(),
|
||||
|
@ -119,7 +126,7 @@ impl StorageManager {
|
|||
{
|
||||
return self
|
||||
.rehydrate_all_subkeys(
|
||||
record_key.clone(),
|
||||
opaque_record_key.clone(),
|
||||
subkeys,
|
||||
consensus_count,
|
||||
safety_selection,
|
||||
|
@ -130,7 +137,7 @@ impl StorageManager {
|
|||
|
||||
return self
|
||||
.rehydrate_required_subkeys(
|
||||
record_key.clone(),
|
||||
opaque_record_key.clone(),
|
||||
subkeys,
|
||||
consensus_count,
|
||||
safety_selection,
|
||||
|
@ -143,18 +150,18 @@ impl StorageManager {
|
|||
async fn rehydrate_single_subkey_inner(
|
||||
&self,
|
||||
inner: &mut StorageManagerInner,
|
||||
record_key: RecordKey,
|
||||
opaque_record_key: OpaqueRecordKey,
|
||||
subkey: ValueSubkey,
|
||||
safety_selection: SafetySelection,
|
||||
) -> bool {
|
||||
// Get value to rehydrate with
|
||||
let get_result = match self
|
||||
.handle_get_local_value_inner(inner, record_key.clone(), subkey, false)
|
||||
.handle_get_local_value_inner(inner, opaque_record_key.clone(), subkey, false)
|
||||
.await
|
||||
{
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
veilid_log!(self debug "Missing local record for rehydrating subkey: record={} subkey={}: {}", record_key, subkey, e);
|
||||
veilid_log!(self debug "Missing local record for rehydrating subkey: record={} subkey={}: {}", opaque_record_key, subkey, e);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
@ -162,14 +169,20 @@ impl StorageManager {
|
|||
let data = match get_result.opt_value {
|
||||
Some(v) => v,
|
||||
None => {
|
||||
veilid_log!(self debug "Missing local subkey data for rehydrating subkey: record={} subkey={}", record_key, subkey);
|
||||
veilid_log!(self debug "Missing local subkey data for rehydrating subkey: record={} subkey={}", opaque_record_key, subkey);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
// Add to offline writes to flush
|
||||
veilid_log!(self debug "Rehydrating: record={} subkey={}", record_key, subkey);
|
||||
self.add_offline_subkey_write_inner(inner, record_key, subkey, safety_selection, data);
|
||||
veilid_log!(self debug "Rehydrating: record={} subkey={}", opaque_record_key, subkey);
|
||||
self.add_offline_subkey_write_inner(
|
||||
inner,
|
||||
opaque_record_key,
|
||||
subkey,
|
||||
safety_selection,
|
||||
data,
|
||||
);
|
||||
|
||||
true
|
||||
}
|
||||
|
@ -177,7 +190,7 @@ impl StorageManager {
|
|||
#[instrument(level = "trace", target = "stor", skip(self), ret, err)]
|
||||
pub(super) async fn rehydrate_all_subkeys(
|
||||
&self,
|
||||
record_key: RecordKey,
|
||||
opaque_record_key: OpaqueRecordKey,
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
consensus_count: usize,
|
||||
safety_selection: SafetySelection,
|
||||
|
@ -185,7 +198,7 @@ impl StorageManager {
|
|||
) -> VeilidAPIResult<RehydrateReport> {
|
||||
let mut inner = self.inner.lock().await;
|
||||
|
||||
veilid_log!(self debug "Rehydrating all subkeys: record={} subkeys={}", record_key, subkeys);
|
||||
veilid_log!(self debug "Rehydrating all subkeys: record={} subkeys={}", opaque_record_key, subkeys);
|
||||
|
||||
let mut rehydrated = ValueSubkeyRangeSet::new();
|
||||
for (n, subkey) in local_inspect_result.subkeys().iter().enumerate() {
|
||||
|
@ -194,7 +207,7 @@ impl StorageManager {
|
|||
if self
|
||||
.rehydrate_single_subkey_inner(
|
||||
&mut inner,
|
||||
record_key.clone(),
|
||||
opaque_record_key.clone(),
|
||||
subkey,
|
||||
safety_selection.clone(),
|
||||
)
|
||||
|
@ -206,13 +219,13 @@ impl StorageManager {
|
|||
}
|
||||
|
||||
if rehydrated.is_empty() {
|
||||
veilid_log!(self debug "Record wanted full rehydrating, but no subkey data available: record={} subkeys={}", record_key, subkeys);
|
||||
veilid_log!(self debug "Record wanted full rehydrating, but no subkey data available: record={} subkeys={}", opaque_record_key, subkeys);
|
||||
} else {
|
||||
veilid_log!(self debug "Record full rehydrating: record={} subkeys={} rehydrated={}", record_key, subkeys, rehydrated);
|
||||
veilid_log!(self debug "Record full rehydrating: record={} subkeys={} rehydrated={}", opaque_record_key, subkeys, rehydrated);
|
||||
}
|
||||
|
||||
return Ok(RehydrateReport {
|
||||
record_key,
|
||||
opaque_record_key,
|
||||
subkeys,
|
||||
consensus_count,
|
||||
rehydrated,
|
||||
|
@ -222,7 +235,7 @@ impl StorageManager {
|
|||
#[instrument(level = "trace", target = "stor", skip(self), ret, err)]
|
||||
pub(super) async fn rehydrate_required_subkeys(
|
||||
&self,
|
||||
record_key: RecordKey,
|
||||
opaque_record_key: OpaqueRecordKey,
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
consensus_count: usize,
|
||||
safety_selection: SafetySelection,
|
||||
|
@ -249,7 +262,7 @@ impl StorageManager {
|
|||
if self
|
||||
.rehydrate_single_subkey_inner(
|
||||
&mut inner,
|
||||
record_key.clone(),
|
||||
opaque_record_key.clone(),
|
||||
subkey,
|
||||
safety_selection.clone(),
|
||||
)
|
||||
|
@ -261,9 +274,9 @@ impl StorageManager {
|
|||
}
|
||||
|
||||
if rehydrated.is_empty() {
|
||||
veilid_log!(self debug "Record did not need rehydrating: record={} local_subkeys={}", record_key, local_inspect_result.subkeys());
|
||||
veilid_log!(self debug "Record did not need rehydrating: record={} local_subkeys={}", opaque_record_key, local_inspect_result.subkeys());
|
||||
} else {
|
||||
veilid_log!(self debug "Record rehydrating: record={} local_subkeys={} rehydrated={}", record_key, local_inspect_result.subkeys(), rehydrated);
|
||||
veilid_log!(self debug "Record rehydrating: record={} local_subkeys={} rehydrated={}", opaque_record_key, local_inspect_result.subkeys(), rehydrated);
|
||||
}
|
||||
|
||||
// Keep the list of nodes that returned a value for later reference
|
||||
|
@ -276,7 +289,7 @@ impl StorageManager {
|
|||
|
||||
Self::process_fanout_results_inner(
|
||||
&mut inner,
|
||||
record_key.clone(),
|
||||
opaque_record_key.clone(),
|
||||
results_iter,
|
||||
false,
|
||||
self.config()
|
||||
|
@ -284,7 +297,7 @@ impl StorageManager {
|
|||
);
|
||||
|
||||
Ok(RehydrateReport {
|
||||
record_key,
|
||||
opaque_record_key,
|
||||
subkeys,
|
||||
consensus_count,
|
||||
rehydrated,
|
||||
|
|
|
@ -32,8 +32,10 @@ impl StorageManager {
|
|||
schema: &DHTSchema,
|
||||
owner: &PublicKey,
|
||||
subkey: ValueSubkey,
|
||||
value_data: &ValueData,
|
||||
value_data: &EncryptedValueData,
|
||||
) -> VeilidAPIResult<()> {
|
||||
// First verify the record key
|
||||
|
||||
match schema {
|
||||
DHTSchema::DFLT(d) => self.check_subkey_value_data_dflt(d, owner, subkey, value_data),
|
||||
DHTSchema::SMPL(s) => self.check_subkey_value_data_smpl(s, owner, subkey, value_data),
|
||||
|
@ -46,14 +48,14 @@ impl StorageManager {
|
|||
schema_dflt: &DHTSchemaDFLT,
|
||||
owner: &PublicKey,
|
||||
subkey: ValueSubkey,
|
||||
value_data: &ValueData,
|
||||
value_data: &EncryptedValueData,
|
||||
) -> VeilidAPIResult<()> {
|
||||
let subkey = subkey as usize;
|
||||
|
||||
// Check if subkey is in owner range
|
||||
if subkey < (schema_dflt.o_cnt() as usize) {
|
||||
// Check value data has valid writer
|
||||
if value_data.ref_writer() == owner {
|
||||
if &value_data.writer() == owner {
|
||||
let max_value_len = usize::min(
|
||||
MAX_SUBKEY_SIZE,
|
||||
MAX_RECORD_DATA_SIZE / schema_dflt.o_cnt() as usize,
|
||||
|
@ -90,7 +92,7 @@ impl StorageManager {
|
|||
schema_smpl: &DHTSchemaSMPL,
|
||||
owner: &PublicKey,
|
||||
subkey: ValueSubkey,
|
||||
value_data: &ValueData,
|
||||
value_data: &EncryptedValueData,
|
||||
) -> VeilidAPIResult<()> {
|
||||
let mut cur_subkey = subkey as usize;
|
||||
|
||||
|
@ -102,7 +104,7 @@ impl StorageManager {
|
|||
// Check if subkey is in owner range
|
||||
if cur_subkey < (schema_smpl.o_cnt() as usize) {
|
||||
// Check value data has valid writer
|
||||
if value_data.ref_writer() == owner {
|
||||
if &value_data.writer() == owner {
|
||||
// Ensure value size is within additional limit
|
||||
if value_data.data_size() <= max_value_len {
|
||||
return Ok(());
|
||||
|
@ -124,7 +126,7 @@ impl StorageManager {
|
|||
}
|
||||
cur_subkey -= schema_smpl.o_cnt() as usize;
|
||||
|
||||
let writer_hash = self.generate_member_id(value_data.ref_writer())?;
|
||||
let writer_hash = self.generate_member_id(&value_data.writer())?;
|
||||
|
||||
// Check all member ranges
|
||||
for m in schema_smpl.members() {
|
||||
|
|
|
@ -28,7 +28,7 @@ impl StorageManager {
|
|||
#[instrument(level = "trace", target = "dht", skip_all, err)]
|
||||
pub(super) async fn outbound_set_value(
|
||||
&self,
|
||||
record_key: RecordKey,
|
||||
opaque_record_key: OpaqueRecordKey,
|
||||
subkey: ValueSubkey,
|
||||
safety_selection: SafetySelection,
|
||||
value: Arc<SignedValueData>,
|
||||
|
@ -48,7 +48,7 @@ impl StorageManager {
|
|||
|
||||
// Get the nodes we know are caching this value to seed the fanout
|
||||
let init_fanout_queue = {
|
||||
self.get_value_nodes(record_key.clone())
|
||||
self.get_value_nodes(opaque_record_key.clone())
|
||||
.await?
|
||||
.unwrap_or_default()
|
||||
.into_iter()
|
||||
|
@ -76,7 +76,7 @@ impl StorageManager {
|
|||
let call_routine = {
|
||||
let context = context.clone();
|
||||
let registry = self.registry();
|
||||
let record_key = record_key.clone();
|
||||
let opaque_record_key = opaque_record_key.clone();
|
||||
let safety_selection = safety_selection.clone();
|
||||
|
||||
Arc::new(
|
||||
|
@ -84,7 +84,7 @@ impl StorageManager {
|
|||
let registry = registry.clone();
|
||||
let context = context.clone();
|
||||
let descriptor = descriptor.clone();
|
||||
let record_key = record_key.clone();
|
||||
let opaque_record_key = opaque_record_key.clone();
|
||||
let safety_selection = safety_selection.clone();
|
||||
Box::pin(async move {
|
||||
let rpc_processor = registry.rpc_processor();
|
||||
|
@ -103,7 +103,7 @@ impl StorageManager {
|
|||
.rpc_call_set_value(
|
||||
Destination::direct(next_node.routing_domain_filtered(routing_domain))
|
||||
.with_safety(safety_selection),
|
||||
record_key.clone(),
|
||||
opaque_record_key.clone(),
|
||||
subkey,
|
||||
(*value).clone(),
|
||||
(*descriptor).clone(),
|
||||
|
@ -232,7 +232,7 @@ impl StorageManager {
|
|||
let routing_table = registry.routing_table();
|
||||
let fanout_call = FanoutCall::new(
|
||||
&routing_table,
|
||||
record_key.to_hash_coordinate(),
|
||||
opaque_record_key.to_hash_coordinate(),
|
||||
key_count,
|
||||
fanout,
|
||||
consensus_count,
|
||||
|
@ -361,12 +361,12 @@ impl StorageManager {
|
|||
|
||||
// Report on fanout result offline
|
||||
let was_offline =
|
||||
self.check_fanout_set_offline(record_key.clone(), subkey, &result.fanout_result);
|
||||
self.check_fanout_set_offline(record_key.opaque(), subkey, &result.fanout_result);
|
||||
if was_offline {
|
||||
// Failed to write, try again later
|
||||
self.add_offline_subkey_write_inner(
|
||||
&mut inner,
|
||||
record_key.clone(),
|
||||
record_key.opaque(),
|
||||
subkey,
|
||||
safety_selection,
|
||||
result.signed_value_data.clone(),
|
||||
|
@ -376,7 +376,7 @@ impl StorageManager {
|
|||
// Keep the list of nodes that returned a value for later reference
|
||||
Self::process_fanout_results_inner(
|
||||
&mut inner,
|
||||
record_key.clone(),
|
||||
record_key.opaque(),
|
||||
core::iter::once((ValueSubkeyRangeSet::single(subkey), result.fanout_result)),
|
||||
true,
|
||||
self.config()
|
||||
|
@ -386,16 +386,19 @@ impl StorageManager {
|
|||
// Record the set value locally since it was successfully set online
|
||||
self.handle_set_local_value_inner(
|
||||
&mut inner,
|
||||
record_key,
|
||||
record_key.opaque(),
|
||||
subkey,
|
||||
result.signed_value_data.clone(),
|
||||
InboundWatchUpdateMode::UpdateAll,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let value_data =
|
||||
self.maybe_decrypt_value_data(&record_key, result.signed_value_data.value_data())?;
|
||||
|
||||
// Return the new value if it differs from what was asked to set
|
||||
if result.signed_value_data.value_data() != &requested_value_data {
|
||||
return Ok(Some(result.signed_value_data.value_data().clone()));
|
||||
if value_data != requested_value_data {
|
||||
return Ok(Some(value_data));
|
||||
}
|
||||
|
||||
// If the original value was set, return None
|
||||
|
@ -408,7 +411,7 @@ impl StorageManager {
|
|||
#[instrument(level = "trace", target = "dht", skip_all)]
|
||||
pub async fn inbound_set_value(
|
||||
&self,
|
||||
record_key: RecordKey,
|
||||
opaque_record_key: OpaqueRecordKey,
|
||||
subkey: ValueSubkey,
|
||||
value: Arc<SignedValueData>,
|
||||
descriptor: Option<Arc<SignedValueDescriptor>>,
|
||||
|
@ -420,7 +423,7 @@ impl StorageManager {
|
|||
let (is_local, last_get_result) = {
|
||||
// See if the subkey we are modifying has a last known local value
|
||||
let last_get_result = self
|
||||
.handle_get_local_value_inner(&mut inner, record_key.clone(), subkey, true)
|
||||
.handle_get_local_value_inner(&mut inner, opaque_record_key.clone(), subkey, true)
|
||||
.await?;
|
||||
// If this is local, it must have a descriptor already
|
||||
if last_get_result.opt_descriptor.is_some() {
|
||||
|
@ -429,7 +432,7 @@ impl StorageManager {
|
|||
// See if the subkey we are modifying has a last known remote value
|
||||
let last_get_result = Self::handle_get_remote_value_inner(
|
||||
&mut inner,
|
||||
record_key.clone(),
|
||||
opaque_record_key.clone(),
|
||||
subkey,
|
||||
true,
|
||||
)
|
||||
|
@ -503,7 +506,7 @@ impl StorageManager {
|
|||
let res = if is_local {
|
||||
self.handle_set_local_value_inner(
|
||||
&mut inner,
|
||||
record_key.clone(),
|
||||
opaque_record_key.clone(),
|
||||
subkey,
|
||||
value,
|
||||
InboundWatchUpdateMode::ExcludeTarget(target),
|
||||
|
@ -512,7 +515,7 @@ impl StorageManager {
|
|||
} else {
|
||||
Self::handle_set_remote_value_inner(
|
||||
&mut inner,
|
||||
record_key.clone(),
|
||||
opaque_record_key.clone(),
|
||||
subkey,
|
||||
value,
|
||||
actual_descriptor,
|
||||
|
|
|
@ -13,7 +13,7 @@ enum OfflineSubkeyWriteResult {
|
|||
|
||||
#[derive(Debug)]
|
||||
struct WorkItem {
|
||||
record_key: RecordKey,
|
||||
opaque_record_key: OpaqueRecordKey,
|
||||
safety_selection: SafetySelection,
|
||||
subkeys: ValueSubkeyRangeSet,
|
||||
}
|
||||
|
@ -31,7 +31,7 @@ impl StorageManager {
|
|||
async fn write_single_offline_subkey(
|
||||
&self,
|
||||
stop_token: StopToken,
|
||||
key: RecordKey,
|
||||
opaque_record_key: OpaqueRecordKey,
|
||||
subkey: ValueSubkey,
|
||||
safety_selection: SafetySelection,
|
||||
) -> EyreResult<OfflineSubkeyWriteResult> {
|
||||
|
@ -41,27 +41,27 @@ impl StorageManager {
|
|||
};
|
||||
let get_result = {
|
||||
let mut inner = self.inner.lock().await;
|
||||
self.handle_get_local_value_inner(&mut inner, key.clone(), subkey, true)
|
||||
self.handle_get_local_value_inner(&mut inner, opaque_record_key.clone(), subkey, true)
|
||||
.await
|
||||
};
|
||||
let Ok(get_result) = get_result else {
|
||||
veilid_log!(self debug "Offline subkey write had no subkey result: {}:{}", key, subkey);
|
||||
veilid_log!(self debug "Offline subkey write had no subkey result: {}:{}", opaque_record_key, subkey);
|
||||
// drop this one
|
||||
return Ok(OfflineSubkeyWriteResult::Dropped);
|
||||
};
|
||||
let Some(value) = get_result.opt_value else {
|
||||
veilid_log!(self debug "Offline subkey write had no subkey value: {}:{}", key, subkey);
|
||||
veilid_log!(self debug "Offline subkey write had no subkey value: {}:{}", opaque_record_key, subkey);
|
||||
// drop this one
|
||||
return Ok(OfflineSubkeyWriteResult::Dropped);
|
||||
};
|
||||
let Some(descriptor) = get_result.opt_descriptor else {
|
||||
veilid_log!(self debug "Offline subkey write had no descriptor: {}:{}", key, subkey);
|
||||
veilid_log!(self debug "Offline subkey write had no descriptor: {}:{}", opaque_record_key, subkey);
|
||||
return Ok(OfflineSubkeyWriteResult::Dropped);
|
||||
};
|
||||
veilid_log!(self debug "Offline subkey write: {}:{} len={}", key, subkey, value.value_data().data().len());
|
||||
veilid_log!(self debug "Offline subkey write: {}:{} len={}", opaque_record_key, subkey, value.value_data().data().len());
|
||||
let osvres = self
|
||||
.outbound_set_value(
|
||||
key.clone(),
|
||||
opaque_record_key.clone(),
|
||||
subkey,
|
||||
safety_selection,
|
||||
value.clone(),
|
||||
|
@ -86,7 +86,7 @@ impl StorageManager {
|
|||
|
||||
self.handle_set_local_value_inner(
|
||||
&mut inner,
|
||||
key.clone(),
|
||||
opaque_record_key.clone(),
|
||||
subkey,
|
||||
result.signed_value_data.clone(),
|
||||
InboundWatchUpdateMode::UpdateAll,
|
||||
|
@ -97,16 +97,16 @@ impl StorageManager {
|
|||
return Ok(OfflineSubkeyWriteResult::Finished(result));
|
||||
}
|
||||
Err(e) => {
|
||||
veilid_log!(self debug "failed to get offline subkey write result: {}:{} {}", key, subkey, e);
|
||||
veilid_log!(self debug "failed to get offline subkey write result: {}:{} {}", opaque_record_key, subkey, e);
|
||||
return Ok(OfflineSubkeyWriteResult::Cancelled);
|
||||
}
|
||||
}
|
||||
}
|
||||
veilid_log!(self debug "writing offline subkey did not complete {}:{}", key, subkey);
|
||||
veilid_log!(self debug "writing offline subkey did not complete {}:{}", opaque_record_key, subkey);
|
||||
return Ok(OfflineSubkeyWriteResult::Cancelled);
|
||||
}
|
||||
Err(e) => {
|
||||
veilid_log!(self debug "failed to write offline subkey: {}:{} {}", key, subkey, e);
|
||||
veilid_log!(self debug "failed to write offline subkey: {}:{} {}", opaque_record_key, subkey, e);
|
||||
return Ok(OfflineSubkeyWriteResult::Cancelled);
|
||||
}
|
||||
}
|
||||
|
@ -130,7 +130,7 @@ impl StorageManager {
|
|||
let result = match self
|
||||
.write_single_offline_subkey(
|
||||
stop_token.clone(),
|
||||
work_item.record_key.clone(),
|
||||
work_item.opaque_record_key.clone(),
|
||||
subkey,
|
||||
work_item.safety_selection.clone(),
|
||||
)
|
||||
|
@ -150,7 +150,7 @@ impl StorageManager {
|
|||
|
||||
// Process non-partial setvalue result
|
||||
let was_offline = self.check_fanout_set_offline(
|
||||
work_item.record_key.clone(),
|
||||
work_item.opaque_record_key.clone(),
|
||||
subkey,
|
||||
&result.fanout_result,
|
||||
);
|
||||
|
@ -183,7 +183,7 @@ impl StorageManager {
|
|||
let subkeys_still_offline = result.work_item.subkeys.difference(&result.written_subkeys);
|
||||
self.finish_offline_subkey_writes_inner(
|
||||
&mut inner,
|
||||
result.work_item.record_key.clone(),
|
||||
result.work_item.opaque_record_key.clone(),
|
||||
result.written_subkeys,
|
||||
subkeys_still_offline,
|
||||
);
|
||||
|
@ -191,7 +191,7 @@ impl StorageManager {
|
|||
// Keep the list of nodes that returned a value for later reference
|
||||
Self::process_fanout_results_inner(
|
||||
&mut inner,
|
||||
result.work_item.record_key,
|
||||
result.work_item.opaque_record_key,
|
||||
result.fanout_results.into_iter().map(|x| (x.0, x.1)),
|
||||
true,
|
||||
consensus_count,
|
||||
|
@ -205,7 +205,7 @@ impl StorageManager {
|
|||
// Find first offline subkey write record
|
||||
// That doesn't have the maximum number of concurrent
|
||||
// in-flight subkeys right now
|
||||
for (record_key, osw) in &mut inner.offline_subkey_writes {
|
||||
for (opaque_record_key, osw) in &mut inner.offline_subkey_writes {
|
||||
if osw.subkeys_in_flight.len() < OFFLINE_SUBKEY_WRITES_SUBKEY_CHUNK_SIZE {
|
||||
// Get first subkey to process that is not already in-flight
|
||||
for sk in osw.subkeys.iter() {
|
||||
|
@ -215,7 +215,7 @@ impl StorageManager {
|
|||
osw.subkeys_in_flight.insert(sk);
|
||||
// And return a work item for it
|
||||
return Some(WorkItem {
|
||||
record_key: record_key.clone(),
|
||||
opaque_record_key: opaque_record_key.clone(),
|
||||
safety_selection: osw.safety_selection.clone(),
|
||||
subkeys: ValueSubkeyRangeSet::single(sk),
|
||||
});
|
||||
|
|
|
@ -2,15 +2,15 @@ use super::*;
|
|||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#[derive(Clone, Debug, PartialOrd, PartialEq, Eq, Ord, Serialize, Deserialize)]
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct SignedValueData {
|
||||
value_data: ValueData,
|
||||
value_data: EncryptedValueData,
|
||||
signature: Signature,
|
||||
}
|
||||
impl SignedValueData {
|
||||
pub fn new(value_data: ValueData, signature: Signature) -> Self {
|
||||
pub fn new(encrypted_value_data: EncryptedValueData, signature: Signature) -> Self {
|
||||
Self {
|
||||
value_data,
|
||||
value_data: encrypted_value_data,
|
||||
signature,
|
||||
}
|
||||
}
|
||||
|
@ -21,39 +21,55 @@ impl SignedValueData {
|
|||
subkey: ValueSubkey,
|
||||
vcrypto: &CryptoSystemGuard<'_>,
|
||||
) -> VeilidAPIResult<bool> {
|
||||
let node_info_bytes = Self::make_signature_bytes(&self.value_data, owner, subkey)?;
|
||||
if vcrypto.kind() != self.value_data.ref_writer().kind() {
|
||||
let writer = self.value_data.writer();
|
||||
if vcrypto.kind() != writer.kind() {
|
||||
return Ok(false);
|
||||
}
|
||||
if vcrypto.kind() != self.signature.kind() {
|
||||
return Ok(false);
|
||||
}
|
||||
// validate signature
|
||||
vcrypto.verify(
|
||||
self.value_data.ref_writer(),
|
||||
&node_info_bytes,
|
||||
&self.signature,
|
||||
)
|
||||
|
||||
if let Some(_nonce) = self.value_data.nonce() {
|
||||
// new approach, verify the whole capnp blob as is
|
||||
let value_data_bytes = self.value_data.raw_blob();
|
||||
// validate signature
|
||||
vcrypto.verify(&writer, value_data_bytes, &self.signature)
|
||||
} else {
|
||||
// old approach, use make_signature_bytes()
|
||||
let value_data_bytes = Self::make_signature_bytes(&self.value_data, owner, subkey)?;
|
||||
// validate signature
|
||||
vcrypto.verify(&writer, &value_data_bytes, &self.signature)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn make_signature(
|
||||
value_data: ValueData,
|
||||
value_data: EncryptedValueData,
|
||||
owner: &PublicKey,
|
||||
subkey: ValueSubkey,
|
||||
vcrypto: &CryptoSystemGuard<'_>,
|
||||
writer_secret: &SecretKey,
|
||||
) -> VeilidAPIResult<Self> {
|
||||
let node_info_bytes = Self::make_signature_bytes(&value_data, owner, subkey)?;
|
||||
let writer = value_data.writer();
|
||||
|
||||
let signature = if let Some(_nonce) = value_data.nonce() {
|
||||
// new approach, sign the whole capnp blob as is
|
||||
let value_data_bytes = value_data.raw_blob();
|
||||
// create signature
|
||||
vcrypto.sign(&writer, writer_secret, value_data_bytes)?
|
||||
} else {
|
||||
// old approach, use make_signature_bytes()
|
||||
let value_data_bytes = Self::make_signature_bytes(&value_data, owner, subkey)?;
|
||||
// create signature
|
||||
vcrypto.sign(&writer, writer_secret, &value_data_bytes)?
|
||||
};
|
||||
|
||||
// create signature
|
||||
let signature = vcrypto.sign(value_data.ref_writer(), writer_secret, &node_info_bytes)?;
|
||||
Ok(Self {
|
||||
value_data,
|
||||
signature,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn value_data(&self) -> &ValueData {
|
||||
pub fn value_data(&self) -> &EncryptedValueData {
|
||||
&self.value_data
|
||||
}
|
||||
|
||||
|
@ -66,11 +82,12 @@ impl SignedValueData {
|
|||
}
|
||||
|
||||
pub fn total_size(&self) -> usize {
|
||||
(mem::size_of::<Self>() - mem::size_of::<ValueData>()) + self.value_data.total_size()
|
||||
(mem::size_of::<Self>() - mem::size_of::<EncryptedValueData>())
|
||||
+ self.value_data.total_size()
|
||||
}
|
||||
|
||||
fn make_signature_bytes(
|
||||
value_data: &ValueData,
|
||||
value_data: &EncryptedValueData,
|
||||
owner: &PublicKey,
|
||||
subkey: ValueSubkey,
|
||||
) -> VeilidAPIResult<Vec<u8>> {
|
||||
|
@ -95,7 +112,7 @@ impl SignedValueData {
|
|||
// Add sequence number to signature
|
||||
node_info_bytes.extend_from_slice(&value_data.seq().to_le_bytes());
|
||||
// Add data to signature
|
||||
node_info_bytes.extend_from_slice(value_data.data());
|
||||
node_info_bytes.extend_from_slice(&value_data.data());
|
||||
|
||||
Ok(node_info_bytes)
|
||||
}
|
||||
|
|
|
@ -19,7 +19,11 @@ impl SignedValueDescriptor {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn validate(&self, vcrypto: &CryptoSystemGuard<'_>) -> VeilidAPIResult<()> {
|
||||
pub fn validate(
|
||||
&self,
|
||||
vcrypto: &CryptoSystemGuard<'_>,
|
||||
opaque_record_key: &OpaqueRecordKey,
|
||||
) -> VeilidAPIResult<()> {
|
||||
if self.owner.kind() != vcrypto.kind() {
|
||||
apibail_parse_error!(
|
||||
"wrong kind of owner for signed value descriptor",
|
||||
|
@ -39,8 +43,19 @@ impl SignedValueDescriptor {
|
|||
&self.signature
|
||||
);
|
||||
}
|
||||
// validate schema
|
||||
// validate schema bytes
|
||||
let _ = DHTSchema::try_from(self.schema_data.as_slice())?;
|
||||
|
||||
// Verify record key matches
|
||||
let verify_key = StorageManager::make_opaque_record_key(
|
||||
vcrypto,
|
||||
self.ref_owner().ref_value(),
|
||||
self.schema_data(),
|
||||
);
|
||||
if opaque_record_key != &verify_key {
|
||||
apibail_parse_error!("failed to validate record key match", verify_key);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
|
|
@ -67,7 +67,7 @@ impl StorageManager {
|
|||
.rpc_call_watch_value(
|
||||
Destination::direct(watch_node.routing_domain_filtered(routing_domain))
|
||||
.with_safety(safety_selection),
|
||||
record_key,
|
||||
record_key.opaque(),
|
||||
ValueSubkeyRangeSet::new(),
|
||||
Timestamp::default(),
|
||||
0,
|
||||
|
@ -113,7 +113,7 @@ impl StorageManager {
|
|||
pin_future!(self.rpc_processor().rpc_call_watch_value(
|
||||
Destination::direct(watch_node.routing_domain_filtered(routing_domain))
|
||||
.with_safety(params.safety_selection),
|
||||
record_key,
|
||||
record_key.opaque(),
|
||||
params.subkeys,
|
||||
params.expiration_ts,
|
||||
params.count,
|
||||
|
@ -184,7 +184,7 @@ impl StorageManager {
|
|||
|
||||
// Get the nodes we know are caching this value to seed the fanout
|
||||
let init_fanout_queue = {
|
||||
self.get_value_nodes(record_key.clone())
|
||||
self.get_value_nodes(record_key.opaque())
|
||||
.await?
|
||||
.unwrap_or_default()
|
||||
.into_iter()
|
||||
|
@ -229,7 +229,7 @@ impl StorageManager {
|
|||
rpc_processor
|
||||
.rpc_call_watch_value(
|
||||
Destination::direct(next_node.routing_domain_filtered(routing_domain)).with_safety(params.safety_selection),
|
||||
record_key.clone(),
|
||||
record_key.opaque(),
|
||||
params.subkeys,
|
||||
params.expiration_ts,
|
||||
params.count,
|
||||
|
@ -348,7 +348,7 @@ impl StorageManager {
|
|||
let mut inner = self.inner.lock().await;
|
||||
Self::process_fanout_results_inner(
|
||||
&mut inner,
|
||||
record_key,
|
||||
record_key.opaque(),
|
||||
core::iter::once((ValueSubkeyRangeSet::new(), fanout_result)),
|
||||
false,
|
||||
self.config()
|
||||
|
@ -1009,7 +1009,7 @@ impl StorageManager {
|
|||
#[instrument(level = "trace", target = "dht", skip_all)]
|
||||
pub async fn inbound_watch_value(
|
||||
&self,
|
||||
key: RecordKey,
|
||||
opaque_record_key: OpaqueRecordKey,
|
||||
params: InboundWatchParameters,
|
||||
watch_id: Option<u64>,
|
||||
) -> VeilidAPIResult<NetworkResult<InboundWatchResult>> {
|
||||
|
@ -1027,18 +1027,18 @@ impl StorageManager {
|
|||
let Some(local_record_store) = inner.local_record_store.as_mut() else {
|
||||
apibail_not_initialized!();
|
||||
};
|
||||
if local_record_store.contains_record(&key) {
|
||||
if local_record_store.contains_record(&opaque_record_key) {
|
||||
return local_record_store
|
||||
.watch_record(key, params, watch_id)
|
||||
.watch_record(opaque_record_key, params, watch_id)
|
||||
.await
|
||||
.map(NetworkResult::value);
|
||||
}
|
||||
let Some(remote_record_store) = inner.remote_record_store.as_mut() else {
|
||||
apibail_not_initialized!();
|
||||
};
|
||||
if remote_record_store.contains_record(&key) {
|
||||
if remote_record_store.contains_record(&opaque_record_key) {
|
||||
return remote_record_store
|
||||
.watch_record(key, params, watch_id)
|
||||
.watch_record(opaque_record_key, params, watch_id)
|
||||
.await
|
||||
.map(NetworkResult::value);
|
||||
}
|
||||
|
@ -1050,13 +1050,21 @@ impl StorageManager {
|
|||
#[instrument(level = "debug", target = "watch", skip_all)]
|
||||
pub async fn inbound_value_changed(
|
||||
&self,
|
||||
record_key: RecordKey,
|
||||
opaque_record_key: OpaqueRecordKey,
|
||||
mut subkeys: ValueSubkeyRangeSet,
|
||||
count: u32,
|
||||
value: Option<Arc<SignedValueData>>,
|
||||
inbound_node_id: NodeId,
|
||||
watch_id: u64,
|
||||
) -> VeilidAPIResult<NetworkResult<()>> {
|
||||
let encryption_key = self
|
||||
.get_encryption_key_for_opaque_record_key(&opaque_record_key)
|
||||
.await?;
|
||||
let record_key = RecordKey::new(
|
||||
opaque_record_key.kind(),
|
||||
BareRecordKey::new(opaque_record_key.value(), encryption_key),
|
||||
);
|
||||
|
||||
// Operate on the watch for this record
|
||||
let watch_lock = self
|
||||
.outbound_watch_lock_table
|
||||
|
@ -1167,7 +1175,12 @@ impl StorageManager {
|
|||
let mut report_value_change = false;
|
||||
if let Some(value) = &value {
|
||||
let last_get_result = self
|
||||
.handle_get_local_value_inner(inner, record_key.clone(), first_subkey, true)
|
||||
.handle_get_local_value_inner(
|
||||
inner,
|
||||
opaque_record_key.clone(),
|
||||
first_subkey,
|
||||
true,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let descriptor = last_get_result.opt_descriptor.unwrap();
|
||||
|
@ -1213,7 +1226,7 @@ impl StorageManager {
|
|||
if report_value_change {
|
||||
self.handle_set_local_value_inner(
|
||||
inner,
|
||||
record_key.clone(),
|
||||
opaque_record_key.clone(),
|
||||
first_subkey,
|
||||
value.clone(),
|
||||
InboundWatchUpdateMode::NoUpdate,
|
||||
|
@ -1266,11 +1279,14 @@ impl StorageManager {
|
|||
|
||||
drop(watch_lock);
|
||||
|
||||
let mut inner = self.inner.lock().await;
|
||||
|
||||
// Announce ValueChanged VeilidUpdate
|
||||
// Cancellations (count=0) are sent by process_outbound_watch_dead(), not here
|
||||
if report_value_change {
|
||||
let value = self.maybe_decrypt_value_data(&record_key, value.unwrap().value_data())?;
|
||||
|
||||
// We have a value with a newer sequence number to report
|
||||
let value = value.unwrap().value_data().clone();
|
||||
self.update_callback_value_change(
|
||||
record_key,
|
||||
reportable_subkeys,
|
||||
|
@ -1283,7 +1299,6 @@ impl StorageManager {
|
|||
// inspect the range to see what changed
|
||||
|
||||
// Queue this up for inspection
|
||||
let inner = &mut *self.inner.lock().await;
|
||||
inner
|
||||
.outbound_watch_manager
|
||||
.enqueue_change_inspect(record_key, reportable_subkeys);
|
||||
|
|
|
@ -5,8 +5,13 @@ use crate::*;
|
|||
use lazy_static::*;
|
||||
|
||||
lazy_static! {
|
||||
static ref BOGUS_KEY: RecordKey =
|
||||
RecordKey::new(TEST_CRYPTO_KIND, BareRecordKey::new(&[0u8; 32]));
|
||||
static ref BOGUS_KEY: RecordKey = RecordKey::new(
|
||||
TEST_CRYPTO_KIND,
|
||||
BareRecordKey::new(
|
||||
BareOpaqueRecordKey::new(&[0u8; 32]),
|
||||
Some(BareSharedSecret::new(&[1u8; 32]))
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
pub async fn test_get_dht_value_unopened(api: VeilidAPI) {
|
||||
|
@ -93,7 +98,11 @@ pub async fn test_get_dht_record_key(api: VeilidAPI) {
|
|||
|
||||
// recreate the record key from the metadata alone
|
||||
let key = rc
|
||||
.get_dht_record_key(schema.clone(), &owner_keypair.key())
|
||||
.get_dht_record_key(
|
||||
schema.clone(),
|
||||
owner_keypair.key(),
|
||||
rec.key().encryption_key(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// keys should be the same
|
||||
|
|
|
@ -97,17 +97,35 @@ fn test_config() {
|
|||
assert_eq!(inner.network.application.http.path, "app");
|
||||
assert_eq!(inner.network.application.http.url, None);
|
||||
|
||||
#[cfg(all(target_arch = "wasm32", target_os = "unknown"))]
|
||||
assert!(!inner.network.protocol.udp.enabled);
|
||||
#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))]
|
||||
assert!(inner.network.protocol.udp.enabled);
|
||||
|
||||
assert_eq!(inner.network.protocol.udp.socket_pool_size, 0u32);
|
||||
assert_eq!(inner.network.protocol.udp.listen_address, "");
|
||||
assert_eq!(inner.network.protocol.udp.public_address, None);
|
||||
|
||||
#[cfg(all(target_arch = "wasm32", target_os = "unknown"))]
|
||||
assert!(!inner.network.protocol.tcp.connect);
|
||||
#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))]
|
||||
assert!(inner.network.protocol.tcp.connect);
|
||||
|
||||
#[cfg(all(target_arch = "wasm32", target_os = "unknown"))]
|
||||
assert!(!inner.network.protocol.tcp.listen);
|
||||
#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))]
|
||||
assert!(inner.network.protocol.tcp.listen);
|
||||
|
||||
assert_eq!(inner.network.protocol.tcp.max_connections, 32u32);
|
||||
assert_eq!(inner.network.protocol.tcp.listen_address, "");
|
||||
assert_eq!(inner.network.protocol.tcp.public_address, None);
|
||||
assert!(inner.network.protocol.ws.connect);
|
||||
|
||||
#[cfg(all(target_arch = "wasm32", target_os = "unknown"))]
|
||||
assert!(!inner.network.protocol.ws.listen);
|
||||
#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))]
|
||||
assert!(inner.network.protocol.ws.listen);
|
||||
|
||||
assert_eq!(inner.network.protocol.ws.max_connections, 32u32);
|
||||
assert_eq!(inner.network.protocol.ws.listen_address, "");
|
||||
assert_eq!(inner.network.protocol.ws.path, "ws");
|
||||
|
|
|
@ -238,10 +238,10 @@ fn get_number<T: num_traits::Num + FromStr>(text: &str) -> Option<T> {
|
|||
T::from_str(text).ok()
|
||||
}
|
||||
|
||||
fn get_typed_record_key(text: &str) -> Option<RecordKey> {
|
||||
fn get_record_key(text: &str) -> Option<RecordKey> {
|
||||
RecordKey::from_str(text).ok()
|
||||
}
|
||||
fn get_node_id(text: &str) -> Option<BareNodeId> {
|
||||
fn get_bare_node_id(text: &str) -> Option<BareNodeId> {
|
||||
let bare_node_id = BareNodeId::from_str(text).ok()?;
|
||||
|
||||
// Enforce 32 byte node ids
|
||||
|
@ -250,7 +250,7 @@ fn get_node_id(text: &str) -> Option<BareNodeId> {
|
|||
}
|
||||
Some(bare_node_id)
|
||||
}
|
||||
fn get_typed_node_id(text: &str) -> Option<NodeId> {
|
||||
fn get_node_id(text: &str) -> Option<NodeId> {
|
||||
let node_id = NodeId::from_str(text).ok()?;
|
||||
|
||||
// Enforce 32 byte node ids
|
||||
|
@ -259,7 +259,7 @@ fn get_typed_node_id(text: &str) -> Option<NodeId> {
|
|||
}
|
||||
Some(node_id)
|
||||
}
|
||||
fn get_typedkeypair(text: &str) -> Option<KeyPair> {
|
||||
fn get_keypair(text: &str) -> Option<KeyPair> {
|
||||
KeyPair::from_str(text).ok()
|
||||
}
|
||||
|
||||
|
@ -274,7 +274,7 @@ fn get_crypto_system_version<'a>(
|
|||
}
|
||||
|
||||
fn get_dht_key_no_safety(text: &str) -> Option<RecordKey> {
|
||||
let key = get_typed_record_key(text)?;
|
||||
let key = get_record_key(text)?;
|
||||
|
||||
Some(key)
|
||||
}
|
||||
|
@ -294,7 +294,7 @@ fn get_dht_key(
|
|||
return None;
|
||||
}
|
||||
|
||||
let key = get_typed_record_key(text)?;
|
||||
let key = get_record_key(text)?;
|
||||
|
||||
Some((key, ss))
|
||||
}
|
||||
|
@ -307,7 +307,7 @@ fn resolve_node_ref(
|
|||
move |text| {
|
||||
let text = text.to_owned();
|
||||
Box::pin(async move {
|
||||
let nr = if let Some(node_id) = get_typed_node_id(&text) {
|
||||
let nr = if let Some(node_id) = get_node_id(&text) {
|
||||
registry
|
||||
.rpc_processor()
|
||||
.resolve_node(node_id, safety_selection)
|
||||
|
@ -334,7 +334,7 @@ fn resolve_filtered_node_ref(
|
|||
.map(|x| (x.0, Some(x.1)))
|
||||
.unwrap_or((&text, None));
|
||||
|
||||
let nr = if let Some(node_id) = get_typed_node_id(text) {
|
||||
let nr = if let Some(node_id) = get_node_id(text) {
|
||||
registry
|
||||
.rpc_processor()
|
||||
.resolve_node(node_id, safety_selection)
|
||||
|
@ -356,9 +356,9 @@ fn resolve_filtered_node_ref(
|
|||
fn get_node_ref(registry: VeilidComponentRegistry) -> impl FnOnce(&str) -> Option<NodeRef> {
|
||||
move |text| {
|
||||
let routing_table = registry.routing_table();
|
||||
let nr = if let Some(key) = get_node_id(text) {
|
||||
let nr = if let Some(key) = get_bare_node_id(text) {
|
||||
routing_table.lookup_any_node_ref(key).ok().flatten()?
|
||||
} else if let Some(node_id) = get_typed_node_id(text) {
|
||||
} else if let Some(node_id) = get_node_id(text) {
|
||||
routing_table.lookup_node_ref(node_id).ok().flatten()?
|
||||
} else {
|
||||
return None;
|
||||
|
@ -389,9 +389,9 @@ fn get_filtered_node_ref(
|
|||
.map(|x| (x.0, Some(x.1)))
|
||||
.unwrap_or((text, None));
|
||||
|
||||
let nr = if let Some(key) = get_node_id(text) {
|
||||
let nr = if let Some(key) = get_bare_node_id(text) {
|
||||
routing_table.lookup_any_node_ref(key).ok().flatten()?
|
||||
} else if let Some(node_id) = get_typed_node_id(text) {
|
||||
} else if let Some(node_id) = get_node_id(text) {
|
||||
routing_table.lookup_node_ref(node_id).ok().flatten()?
|
||||
} else {
|
||||
return None;
|
||||
|
@ -645,13 +645,8 @@ impl VeilidAPI {
|
|||
// Dump routing table txt record
|
||||
let args: Vec<String> = args.split_whitespace().map(|s| s.to_owned()).collect();
|
||||
|
||||
let signing_key_pair = get_debug_argument_at(
|
||||
&args,
|
||||
0,
|
||||
"debug_txtrecord",
|
||||
"signing_key_pair",
|
||||
get_typedkeypair,
|
||||
)?;
|
||||
let signing_key_pair =
|
||||
get_debug_argument_at(&args, 0, "debug_txtrecord", "signing_key_pair", get_keypair)?;
|
||||
|
||||
let network_manager = self.core_context()?.network_manager();
|
||||
Ok(network_manager.debug_info_txtrecord(signing_key_pair).await)
|
||||
|
@ -750,8 +745,8 @@ impl VeilidAPI {
|
|||
get_routing_domain,
|
||||
)
|
||||
.ok();
|
||||
if opt_routing_domain.is_some() {
|
||||
routing_domain = opt_routing_domain.unwrap();
|
||||
if let Some(rd) = opt_routing_domain {
|
||||
routing_domain = rd;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -1586,10 +1581,9 @@ impl VeilidAPI {
|
|||
});
|
||||
|
||||
Ok(format!(
|
||||
"Created: {} {}:{}\n{:?}",
|
||||
"Created: {} {}\n{:?}",
|
||||
record.key(),
|
||||
record.owner(),
|
||||
record.owner_secret().unwrap(),
|
||||
record.owner_keypair().unwrap(),
|
||||
record
|
||||
))
|
||||
}
|
||||
|
@ -1605,7 +1599,7 @@ impl VeilidAPI {
|
|||
get_dht_key(registry.clone()),
|
||||
)?;
|
||||
let writer =
|
||||
get_debug_argument_at(&args, 2, "debug_record_open", "writer", get_typedkeypair).ok();
|
||||
get_debug_argument_at(&args, 2, "debug_record_open", "writer", get_keypair).ok();
|
||||
|
||||
// Get routing context with optional safety
|
||||
let rc = self.routing_context()?;
|
||||
|
@ -1629,7 +1623,7 @@ impl VeilidAPI {
|
|||
dc.opened_record_contexts.insert(record.key(), rc);
|
||||
});
|
||||
|
||||
Ok(format!("Opened: {} : {:?}", key, record))
|
||||
Ok(format!("Opened: {}\n{:#?}", key, record))
|
||||
}
|
||||
|
||||
async fn debug_record_close(&self, args: Vec<String>) -> VeilidAPIResult<String> {
|
||||
|
@ -1668,7 +1662,7 @@ impl VeilidAPI {
|
|||
3 + opt_arg_add,
|
||||
"debug_record_set",
|
||||
"writer",
|
||||
get_typedkeypair,
|
||||
get_keypair,
|
||||
) {
|
||||
Ok(v) => {
|
||||
opt_arg_add += 1;
|
||||
|
@ -2051,7 +2045,7 @@ impl VeilidAPI {
|
|||
// Do a record rehydrate
|
||||
storage_manager
|
||||
.add_rehydration_request(
|
||||
key,
|
||||
key.opaque(),
|
||||
subkeys.unwrap_or_default(),
|
||||
consensus_count.unwrap_or_else(|| {
|
||||
registry
|
||||
|
|
|
@ -132,7 +132,7 @@ pub enum VeilidAPIError {
|
|||
#[error("Key not found: {key}")]
|
||||
KeyNotFound {
|
||||
#[schemars(with = "String")]
|
||||
key: RecordKey,
|
||||
key: OpaqueRecordKey,
|
||||
},
|
||||
#[error("Internal: {message}")]
|
||||
Internal { message: String },
|
||||
|
@ -180,7 +180,7 @@ impl VeilidAPIError {
|
|||
message: msg.to_string(),
|
||||
}
|
||||
}
|
||||
pub fn key_not_found(key: RecordKey) -> Self {
|
||||
pub fn key_not_found(key: OpaqueRecordKey) -> Self {
|
||||
Self::KeyNotFound { key }
|
||||
}
|
||||
pub fn internal<T: ToString>(msg: T) -> Self {
|
||||
|
|
|
@ -310,14 +310,15 @@ impl RoutingContext {
|
|||
pub fn get_dht_record_key(
|
||||
&self,
|
||||
schema: DHTSchema,
|
||||
owner_key: &PublicKey,
|
||||
owner_key: PublicKey,
|
||||
encryption_key: Option<SharedSecret>,
|
||||
) -> VeilidAPIResult<RecordKey> {
|
||||
veilid_log!(self debug
|
||||
"RoutingContext::get_dht_record_key(self: {:?}, schema: {:?}, owner_key: {:?}", self, schema, owner_key);
|
||||
"RoutingContext::get_dht_record_key(self: {:?} schema: {:?}, owner_key: {:?}, encryption_key: {:?}", self, schema, owner_key, encryption_key);
|
||||
schema.validate()?;
|
||||
|
||||
let storage_manager = self.api.core_context()?.storage_manager();
|
||||
storage_manager.get_record_key(schema, owner_key)
|
||||
storage_manager.get_record_key(schema, &owner_key, encryption_key)
|
||||
}
|
||||
|
||||
/// Creates a new DHT record
|
||||
|
|
|
@ -19,7 +19,8 @@ pub fn test_dhtrecorddescriptor() {
|
|||
// value_data
|
||||
|
||||
pub fn test_valuedata() {
|
||||
let orig = ValueData::new_with_seq(42, b"Brent Spiner".to_vec(), fix_fake_public_key());
|
||||
let orig =
|
||||
EncryptedValueData::new_with_seq(42, b"Brent Spiner".to_vec(), fix_fake_public_key(), None);
|
||||
let copy = deserialize_json(&serialize_json(&orig)).unwrap();
|
||||
|
||||
assert_eq!(orig, copy);
|
||||
|
|
283
veilid-core/src/veilid_api/types/dht/encrypted_value_data.rs
Normal file
283
veilid-core/src/veilid_api/types/dht/encrypted_value_data.rs
Normal file
|
@ -0,0 +1,283 @@
|
|||
use super::*;
|
||||
use data_encoding::BASE64URL_NOPAD;
|
||||
|
||||
#[derive(Clone, Eq, PartialEq)]
|
||||
#[must_use]
|
||||
pub struct EncryptedValueData {
|
||||
// capnp struct ValueData encoded without packing
|
||||
blob: Vec<u8>,
|
||||
}
|
||||
|
||||
impl EncryptedValueData {
|
||||
pub const MAX_LEN: usize = 32768;
|
||||
|
||||
pub fn new(data: Vec<u8>, writer: PublicKey, nonce: Option<Nonce>) -> VeilidAPIResult<Self> {
|
||||
Self::new_with_seq(0, data, writer, nonce)
|
||||
}
|
||||
|
||||
pub fn new_with_seq(
|
||||
seq: ValueSeqNum,
|
||||
data: Vec<u8>,
|
||||
writer: PublicKey,
|
||||
nonce: Option<Nonce>,
|
||||
) -> VeilidAPIResult<Self> {
|
||||
if data.len() > Self::MAX_LEN {
|
||||
apibail_generic!("invalid size");
|
||||
}
|
||||
|
||||
let estimated_capacity = 128
|
||||
+ data.len()
|
||||
+ writer.ref_value().len()
|
||||
+ nonce.as_ref().map_or(0, |nonce| nonce.len());
|
||||
|
||||
let mut memory = vec![0; 32767 + 4096];
|
||||
let allocator = capnp::message::SingleSegmentAllocator::new(&mut memory);
|
||||
let mut message_builder = ::capnp::message::Builder::new(allocator);
|
||||
let mut builder = message_builder.init_root::<veilid_capnp::value_data::Builder>();
|
||||
|
||||
builder.set_seq(seq);
|
||||
|
||||
builder.set_data(&data);
|
||||
|
||||
let mut wb = builder.reborrow().init_writer();
|
||||
capnp_encode_public_key(&writer, &mut wb);
|
||||
|
||||
if let Some(nonce_val) = nonce {
|
||||
let mut nb = builder.reborrow().init_nonce();
|
||||
capnp_encode_nonce(&nonce_val, &mut nb);
|
||||
}
|
||||
|
||||
let mut blob = Vec::with_capacity(estimated_capacity);
|
||||
capnp::serialize::write_message(&mut blob, &message_builder).unwrap();
|
||||
|
||||
// Ensure the blob could be decoded without errors, allowing to do unwrap() in getter methods
|
||||
validate_value_data_blob(&blob).map_err(VeilidAPIError::generic)?;
|
||||
|
||||
Ok(Self { blob })
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn seq(&self) -> ValueSeqNum {
|
||||
let message_reader = capnp::serialize::read_message_from_flat_slice(
|
||||
&mut &self.blob[..],
|
||||
capnp::message::ReaderOptions::new(),
|
||||
)
|
||||
.unwrap();
|
||||
let reader = message_reader
|
||||
.get_root::<veilid_capnp::value_data::Reader>()
|
||||
.unwrap();
|
||||
|
||||
reader.get_seq()
|
||||
}
|
||||
|
||||
pub fn writer(&self) -> PublicKey {
|
||||
let message_reader = capnp::serialize::read_message_from_flat_slice(
|
||||
&mut &self.blob[..],
|
||||
capnp::message::ReaderOptions::new(),
|
||||
)
|
||||
.unwrap();
|
||||
let reader = message_reader
|
||||
.get_root::<veilid_capnp::value_data::Reader>()
|
||||
.unwrap();
|
||||
|
||||
let w = reader.get_writer().unwrap();
|
||||
PublicKey::new(
|
||||
CryptoKind::from(w.get_kind()),
|
||||
BarePublicKey::new(w.get_value().unwrap()),
|
||||
)
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn data(&self) -> Vec<u8> {
|
||||
let message_reader = capnp::serialize::read_message_from_flat_slice(
|
||||
&mut &self.blob[..],
|
||||
capnp::message::ReaderOptions::new(),
|
||||
)
|
||||
.unwrap();
|
||||
let reader = message_reader
|
||||
.get_root::<veilid_capnp::value_data::Reader>()
|
||||
.unwrap();
|
||||
|
||||
// TODO: try to make this function return &[u8]
|
||||
reader.get_data().unwrap().to_vec()
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn nonce(&self) -> Option<Nonce> {
|
||||
let message_reader = capnp::serialize::read_message_from_flat_slice(
|
||||
&mut &self.blob[..],
|
||||
capnp::message::ReaderOptions::new(),
|
||||
)
|
||||
.unwrap();
|
||||
let reader = message_reader
|
||||
.get_root::<veilid_capnp::value_data::Reader>()
|
||||
.unwrap();
|
||||
|
||||
if reader.has_nonce() {
|
||||
let n = reader.get_nonce().unwrap();
|
||||
Some(Nonce::new(n.get_value().unwrap()))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn data_size(&self) -> usize {
|
||||
let message_reader = capnp::serialize::read_message_from_flat_slice(
|
||||
&mut &self.blob[..],
|
||||
capnp::message::ReaderOptions::new(),
|
||||
)
|
||||
.unwrap();
|
||||
let reader = message_reader
|
||||
.get_root::<veilid_capnp::value_data::Reader>()
|
||||
.unwrap();
|
||||
|
||||
reader.get_data().unwrap().len()
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn total_size(&self) -> usize {
|
||||
mem::size_of::<Self>() + self.data_size()
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn raw_blob(&self) -> &[u8] {
|
||||
&self.blob
|
||||
}
|
||||
}
|
||||
|
||||
fn validate_value_data_blob(blob: &[u8]) -> capnp::Result<()> {
|
||||
let message_reader = capnp::serialize::read_message_from_flat_slice(
|
||||
&mut &blob[..],
|
||||
capnp::message::ReaderOptions::new(),
|
||||
)?;
|
||||
let reader = message_reader.get_root::<veilid_capnp::value_data::Reader>()?;
|
||||
let _ = reader.get_data()?;
|
||||
let _ = reader.get_writer()?;
|
||||
if reader.has_nonce() {
|
||||
let n = reader.get_nonce()?;
|
||||
let _ = n.get_value()?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
impl fmt::Debug for EncryptedValueData {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
let seq = self.seq();
|
||||
let data = self.data();
|
||||
let writer = self.writer();
|
||||
let nonce = self.nonce();
|
||||
|
||||
fmt.debug_struct("EncryptedValueData")
|
||||
.field("seq", &seq)
|
||||
.field("data", &print_data(&data, Some(64)))
|
||||
.field("writer", &writer)
|
||||
.field("nonce", &nonce)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl serde::Serialize for EncryptedValueData {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
let encoded = BASE64URL_NOPAD.encode(&self.blob);
|
||||
serializer.serialize_str(&encoded)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> serde::Deserialize<'de> for EncryptedValueData {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
#[derive(Deserialize)]
|
||||
struct LegacyValueData {
|
||||
/// An increasing sequence number to time-order the DHT record changes
|
||||
seq: ValueSeqNum,
|
||||
|
||||
/// The contents of a DHT Record
|
||||
#[cfg_attr(
|
||||
not(all(target_arch = "wasm32", target_os = "unknown")),
|
||||
serde(with = "as_human_base64")
|
||||
)]
|
||||
data: Vec<u8>,
|
||||
|
||||
/// The public identity key of the writer of the data
|
||||
#[serde(with = "public_key_try_untyped_vld0")]
|
||||
writer: PublicKey,
|
||||
}
|
||||
|
||||
#[derive(serde::Deserialize)]
|
||||
#[serde(untagged)]
|
||||
enum Helper {
|
||||
Base64Str(String),
|
||||
Legacy(LegacyValueData),
|
||||
}
|
||||
|
||||
match Helper::deserialize(deserializer)? {
|
||||
Helper::Base64Str(value) => {
|
||||
let blob = BASE64URL_NOPAD.decode(value.as_bytes()).map_err(|e| {
|
||||
<D::Error as serde::de::Error>::custom(format!(
|
||||
"Failed to decode base64: {}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
validate_value_data_blob(&blob).map_err(|e| {
|
||||
<D::Error as serde::de::Error>::custom(format!(
|
||||
"Decoded blob is not a valid ValueData capnp struct: {}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
Ok(EncryptedValueData { blob })
|
||||
}
|
||||
Helper::Legacy(legacy) => {
|
||||
EncryptedValueData::new_with_seq(legacy.seq, legacy.data, legacy.writer, None)
|
||||
.map_err(serde::de::Error::custom)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::crypto::tests::fixtures::*;
|
||||
|
||||
#[test]
|
||||
fn value_data_ok() {
|
||||
assert!(EncryptedValueData::new(
|
||||
vec![0; EncryptedValueData::MAX_LEN],
|
||||
fix_fake_public_key(),
|
||||
None,
|
||||
)
|
||||
.is_ok());
|
||||
assert!(EncryptedValueData::new_with_seq(
|
||||
0,
|
||||
vec![0; EncryptedValueData::MAX_LEN],
|
||||
fix_fake_public_key(),
|
||||
None,
|
||||
)
|
||||
.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn value_data_too_long() {
|
||||
assert!(EncryptedValueData::new(
|
||||
vec![0; EncryptedValueData::MAX_LEN + 1],
|
||||
fix_fake_public_key(),
|
||||
None,
|
||||
)
|
||||
.is_err());
|
||||
assert!(EncryptedValueData::new_with_seq(
|
||||
0,
|
||||
vec![0; EncryptedValueData::MAX_LEN + 1],
|
||||
fix_fake_public_key(),
|
||||
None,
|
||||
)
|
||||
.is_err());
|
||||
}
|
||||
}
|
|
@ -1,5 +1,6 @@
|
|||
mod dht_record_descriptor;
|
||||
mod dht_record_report;
|
||||
mod encrypted_value_data;
|
||||
mod schema;
|
||||
mod set_dht_value_options;
|
||||
mod value_data;
|
||||
|
@ -9,6 +10,7 @@ use super::*;
|
|||
|
||||
pub use dht_record_descriptor::*;
|
||||
pub use dht_record_report::*;
|
||||
pub use encrypted_value_data::*;
|
||||
pub use schema::*;
|
||||
pub use set_dht_value_options::*;
|
||||
pub use value_data::*;
|
||||
|
|
|
@ -112,12 +112,17 @@ impl TryFrom<&[u8]> for DHTSchema {
|
|||
apibail_generic!("invalid size");
|
||||
}
|
||||
let fcc: [u8; 4] = b[0..4].try_into().unwrap();
|
||||
match fcc {
|
||||
DHTSchemaDFLT::FCC => Ok(DHTSchema::DFLT(DHTSchemaDFLT::try_from(b)?)),
|
||||
DHTSchemaSMPL::FCC => Ok(DHTSchema::SMPL(DHTSchemaSMPL::try_from(b)?)),
|
||||
let schema = match fcc {
|
||||
DHTSchemaDFLT::FCC => DHTSchema::DFLT(DHTSchemaDFLT::try_from(b)?),
|
||||
DHTSchemaSMPL::FCC => DHTSchema::SMPL(DHTSchemaSMPL::try_from(b)?),
|
||||
_ => {
|
||||
apibail_generic!("unknown fourcc");
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Just to make sure, although it should come out of the try_from already validated.
|
||||
schema.validate()?;
|
||||
|
||||
Ok(schema)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -151,10 +151,12 @@ Future<void> testCreateDHTRecordWithDeterministicKey() async {
|
|||
final owner = ownerKeyPair.key;
|
||||
final secret = ownerKeyPair.secret;
|
||||
const schema = DHTSchema.dflt(oCnt: 1);
|
||||
final dhtRecordKey = await rc.getDHTRecordKey(schema, owner);
|
||||
final dhtRecord = await rc.createDHTRecord(
|
||||
kind, const DHTSchema.dflt(oCnt: 1),
|
||||
owner: ownerKeyPair);
|
||||
final encryptionKey = dhtRecord.key.encryptionKey;
|
||||
final dhtRecordKey =
|
||||
await rc.getDHTRecordKey(schema, owner, encryptionKey);
|
||||
expect(dhtRecord.key, equals(dhtRecordKey));
|
||||
expect(dhtRecord.owner, equals(owner));
|
||||
expect(dhtRecord.ownerSecret, equals(secret));
|
||||
|
|
|
@ -367,7 +367,8 @@ abstract class VeilidRoutingContext {
|
|||
Future<DHTRecordDescriptor> openDHTRecord(RecordKey key, {KeyPair? writer});
|
||||
Future<void> closeDHTRecord(RecordKey key);
|
||||
Future<void> deleteDHTRecord(RecordKey key);
|
||||
Future<RecordKey> getDHTRecordKey(DHTSchema schema, PublicKey owner);
|
||||
Future<RecordKey> getDHTRecordKey(
|
||||
DHTSchema schema, PublicKey owner, SharedSecret? encryptionKey);
|
||||
Future<ValueData?> getDHTValue(RecordKey key, int subkey,
|
||||
{bool forceRefresh = false});
|
||||
Future<ValueData?> setDHTValue(RecordKey key, int subkey, Uint8List data,
|
||||
|
|
|
@ -48,7 +48,7 @@ Map<String, dynamic> _$DHTSchemaMemberToJson(_DHTSchemaMember instance) =>
|
|||
|
||||
_DHTRecordDescriptor _$DHTRecordDescriptorFromJson(Map<String, dynamic> json) =>
|
||||
_DHTRecordDescriptor(
|
||||
key: Typed<BareRecordKey>.fromJson(json['key']),
|
||||
key: RecordKey.fromJson(json['key']),
|
||||
owner: Typed<BarePublicKey>.fromJson(json['owner']),
|
||||
schema: DHTSchema.fromJson(json['schema']),
|
||||
ownerSecret: json['owner_secret'] == null
|
||||
|
|
|
@ -145,15 +145,97 @@ class KeyPair extends Equatable {
|
|||
BareKeyPair(key: key.value, secret: secret.value);
|
||||
}
|
||||
|
||||
@immutable
|
||||
class BareRecordKey extends Equatable {
|
||||
const BareRecordKey({required this.key, required this.encryptionKey});
|
||||
|
||||
factory BareRecordKey.fromString(String s) {
|
||||
final parts = s.split(':');
|
||||
if (parts.length > 2 || parts.isEmpty) {
|
||||
throw const FormatException('malformed string');
|
||||
}
|
||||
if (parts.length == 2) {
|
||||
final key = BareOpaqueRecordKey.fromString(parts[0]);
|
||||
final encryptionKey = BareSharedSecret.fromString(parts[1]);
|
||||
return BareRecordKey(key: key, encryptionKey: encryptionKey);
|
||||
}
|
||||
final key = BareOpaqueRecordKey.fromString(parts[0]);
|
||||
return BareRecordKey(key: key, encryptionKey: null);
|
||||
}
|
||||
factory BareRecordKey.fromJson(dynamic json) =>
|
||||
BareRecordKey.fromString(json as String);
|
||||
final BareOpaqueRecordKey key;
|
||||
final BareSharedSecret? encryptionKey;
|
||||
@override
|
||||
List<Object?> get props => [key, encryptionKey];
|
||||
|
||||
@override
|
||||
String toString() => encryptionKey != null ? '$key:$encryptionKey' : '$key';
|
||||
|
||||
String toJson() => toString();
|
||||
}
|
||||
|
||||
@immutable
|
||||
class RecordKey extends Equatable {
|
||||
RecordKey({required this.key, required this.encryptionKey})
|
||||
: assert(encryptionKey == null || key.kind == encryptionKey.kind,
|
||||
'recordkey parts must have same kind');
|
||||
|
||||
factory RecordKey.fromString(String s) {
|
||||
final parts = s.split(':');
|
||||
if (parts.length < 2 ||
|
||||
parts.length > 3 ||
|
||||
parts[0].codeUnits.length != 4) {
|
||||
throw VeilidAPIExceptionInvalidArgument('malformed string', 's', s);
|
||||
}
|
||||
final kind = cryptoKindFromString(parts[0]);
|
||||
final key = OpaqueRecordKey(
|
||||
kind: kind, value: BareOpaqueRecordKey.fromString(parts[1]));
|
||||
if (parts.length == 3) {
|
||||
final encryptionKey = SharedSecret(
|
||||
kind: kind, value: BareSharedSecret.fromString(parts[2]));
|
||||
return RecordKey(key: key, encryptionKey: encryptionKey);
|
||||
}
|
||||
return RecordKey(key: key, encryptionKey: null);
|
||||
}
|
||||
factory RecordKey.fromJson(dynamic json) =>
|
||||
RecordKey.fromString(json as String);
|
||||
factory RecordKey.fromBareRecordKey(
|
||||
CryptoKind kind, BareRecordKey bareRecordKey) =>
|
||||
RecordKey(
|
||||
key: OpaqueRecordKey(kind: kind, value: bareRecordKey.key),
|
||||
encryptionKey: bareRecordKey.encryptionKey == null
|
||||
? null
|
||||
: SharedSecret(kind: kind, value: bareRecordKey.encryptionKey!));
|
||||
factory RecordKey.fromOpaqueRecordKey(
|
||||
OpaqueRecordKey key, BareSharedSecret? encryptionKey) =>
|
||||
RecordKey(
|
||||
key: key,
|
||||
encryptionKey: encryptionKey == null
|
||||
? null
|
||||
: SharedSecret(kind: key.kind, value: encryptionKey));
|
||||
final OpaqueRecordKey key;
|
||||
final SharedSecret? encryptionKey;
|
||||
@override
|
||||
List<Object?> get props => [key, encryptionKey];
|
||||
|
||||
@override
|
||||
String toString() => encryptionKey != null
|
||||
? '${cryptoKindToString(key.kind)}:${key.value}:${encryptionKey!.value}'
|
||||
: '${cryptoKindToString(key.kind)}:${key.value}';
|
||||
|
||||
String toJson() => toString();
|
||||
}
|
||||
|
||||
typedef PublicKey = Typed<BarePublicKey>;
|
||||
typedef Signature = Typed<BareSignature>;
|
||||
typedef SecretKey = Typed<BareSecretKey>;
|
||||
typedef HashDigest = Typed<BareHashDigest>;
|
||||
typedef SharedSecret = Typed<BareSharedSecret>;
|
||||
typedef RecordKey = Typed<BareRecordKey>;
|
||||
typedef RouteId = Typed<BareRouteId>;
|
||||
typedef NodeId = Typed<BareNodeId>;
|
||||
typedef MemberId = Typed<BareMemberId>;
|
||||
typedef OpaqueRecordKey = Typed<BareOpaqueRecordKey>;
|
||||
|
||||
//////////////////////////////////////
|
||||
/// VeilidCryptoSystem
|
||||
|
|
|
@ -78,8 +78,6 @@ sealed class EncodedString extends Equatable {
|
|||
return BareSharedSecret.fromBytes(bytes) as T;
|
||||
case const (BareHashDistance):
|
||||
return BareHashDistance.fromBytes(bytes) as T;
|
||||
case const (BareRecordKey):
|
||||
return BareRecordKey.fromBytes(bytes) as T;
|
||||
case const (BareRouteId):
|
||||
return BareRouteId.fromBytes(bytes) as T;
|
||||
case const (BareNodeId):
|
||||
|
@ -107,8 +105,6 @@ sealed class EncodedString extends Equatable {
|
|||
return BareSharedSecret.fromString(s) as T;
|
||||
case const (BareHashDistance):
|
||||
return BareHashDistance.fromString(s) as T;
|
||||
case const (BareRecordKey):
|
||||
return BareRecordKey.fromString(s) as T;
|
||||
case const (BareRouteId):
|
||||
return BareRouteId.fromString(s) as T;
|
||||
case const (BareNodeId):
|
||||
|
@ -136,8 +132,6 @@ sealed class EncodedString extends Equatable {
|
|||
return BareSharedSecret.fromJson(json) as T;
|
||||
case const (BareHashDistance):
|
||||
return BareHashDistance.fromJson(json) as T;
|
||||
case const (BareRecordKey):
|
||||
return BareRecordKey.fromJson(json) as T;
|
||||
case const (BareRouteId):
|
||||
return BareRouteId.fromJson(json) as T;
|
||||
case const (BareNodeId):
|
||||
|
@ -198,10 +192,10 @@ class BareHashDistance extends EncodedString {
|
|||
BareHashDistance.fromJson(super.json) : super._fromJson();
|
||||
}
|
||||
|
||||
class BareRecordKey extends EncodedString {
|
||||
BareRecordKey.fromBytes(super.bytes) : super._fromBytes();
|
||||
BareRecordKey.fromString(super.s) : super._fromString();
|
||||
BareRecordKey.fromJson(super.json) : super._fromJson();
|
||||
class BareOpaqueRecordKey extends EncodedString {
|
||||
BareOpaqueRecordKey.fromBytes(super.bytes) : super._fromBytes();
|
||||
BareOpaqueRecordKey.fromString(super.s) : super._fromString();
|
||||
BareOpaqueRecordKey.fromJson(super.json) : super._fromJson();
|
||||
}
|
||||
|
||||
class BareRouteId extends EncodedString {
|
||||
|
|
|
@ -69,7 +69,7 @@ typedef _RoutingContextAppMessageDart = void Function(
|
|||
// fn routing_context_get_dht_record_key(port: i64,
|
||||
// id: u32, schema: FfiStr, owner: FfiStr)
|
||||
typedef _RoutingContextGetDHTRecordKeyDart = void Function(
|
||||
int, int, Pointer<Utf8>, Pointer<Utf8>);
|
||||
int, int, Pointer<Utf8>, Pointer<Utf8>, Pointer<Utf8>);
|
||||
// fn routing_context_create_dht_record(port: i64,
|
||||
// id: u32, schema: FfiStr, owner: FfiStr, kind: u32)
|
||||
typedef _RoutingContextCreateDHTRecordDart = void Function(
|
||||
|
@ -654,14 +654,18 @@ class VeilidRoutingContextFFI extends VeilidRoutingContext {
|
|||
}
|
||||
|
||||
@override
|
||||
Future<RecordKey> getDHTRecordKey(DHTSchema schema, PublicKey owner) async {
|
||||
Future<RecordKey> getDHTRecordKey(
|
||||
DHTSchema schema, PublicKey owner, SharedSecret? encryptionKey) async {
|
||||
_ctx.ensureValid();
|
||||
final nativeSchema = jsonEncode(schema).toNativeUtf8();
|
||||
final nativeOwner = jsonEncode(owner).toNativeUtf8();
|
||||
final nativeEncryptionKey = encryptionKey != null
|
||||
? jsonEncode(encryptionKey).toNativeUtf8()
|
||||
: nullptr;
|
||||
final recvPort = ReceivePort('routing_context_get_dht_record_key');
|
||||
final sendPort = recvPort.sendPort;
|
||||
_ctx.ffi._routingContextGetDHTRecordKey(
|
||||
sendPort.nativePort, _ctx.id!, nativeSchema, nativeOwner);
|
||||
_ctx.ffi._routingContextGetDHTRecordKey(sendPort.nativePort, _ctx.id!,
|
||||
nativeSchema, nativeOwner, nativeEncryptionKey);
|
||||
final recordKey =
|
||||
await processFutureJson(RecordKey.fromJson, recvPort.first);
|
||||
return recordKey;
|
||||
|
@ -1427,7 +1431,8 @@ class VeilidFFI extends Veilid {
|
|||
Void Function(Int64, Uint32, Pointer<Utf8>, Pointer<Utf8>),
|
||||
_RoutingContextAppMessageDart>('routing_context_app_message'),
|
||||
_routingContextGetDHTRecordKey = dylib.lookupFunction<
|
||||
Void Function(Int64, Uint32, Pointer<Utf8>, Pointer<Utf8>),
|
||||
Void Function(
|
||||
Int64, Uint32, Pointer<Utf8>, Pointer<Utf8>, Pointer<Utf8>),
|
||||
_RoutingContextGetDHTRecordKeyDart>(
|
||||
'routing_context_get_dht_record_key'),
|
||||
_routingContextCreateDHTRecord =
|
||||
|
|
|
@ -140,13 +140,15 @@ class VeilidRoutingContextJS extends VeilidRoutingContext {
|
|||
}
|
||||
|
||||
@override
|
||||
Future<RecordKey> getDHTRecordKey(DHTSchema schema, PublicKey owner) async {
|
||||
Future<RecordKey> getDHTRecordKey(
|
||||
DHTSchema schema, PublicKey owner, SharedSecret? encryptionKey) async {
|
||||
final id = _ctx.requireId();
|
||||
return RecordKey.fromJson(jsonDecode(await _wrapApiPromise(
|
||||
js_util.callMethod(wasm, 'routing_context_get_dht_record_key', [
|
||||
id,
|
||||
jsonEncode(schema),
|
||||
jsonEncode(owner),
|
||||
if (encryptionKey != null) jsonEncode(encryptionKey) else null,
|
||||
]))));
|
||||
}
|
||||
|
||||
|
|
|
@ -237,7 +237,7 @@ sealed class VeilidUpdate with _$VeilidUpdate {
|
|||
required List<String> deadRemoteRoutes,
|
||||
}) = VeilidUpdateRouteChange;
|
||||
const factory VeilidUpdate.valueChange({
|
||||
required PublicKey key,
|
||||
required RecordKey key,
|
||||
required List<ValueSubkeyRange> subkeys,
|
||||
required int count,
|
||||
required ValueData? value,
|
||||
|
|
|
@ -3352,7 +3352,7 @@ class VeilidUpdateValueChange implements VeilidUpdate {
|
|||
factory VeilidUpdateValueChange.fromJson(Map<String, dynamic> json) =>
|
||||
_$VeilidUpdateValueChangeFromJson(json);
|
||||
|
||||
final PublicKey key;
|
||||
final RecordKey key;
|
||||
final List<ValueSubkeyRange> _subkeys;
|
||||
List<ValueSubkeyRange> get subkeys {
|
||||
if (_subkeys is EqualUnmodifiableListView) return _subkeys;
|
||||
|
@ -3411,7 +3411,7 @@ abstract mixin class $VeilidUpdateValueChangeCopyWith<$Res>
|
|||
_$VeilidUpdateValueChangeCopyWithImpl;
|
||||
@useResult
|
||||
$Res call(
|
||||
{PublicKey key,
|
||||
{RecordKey key,
|
||||
List<ValueSubkeyRange> subkeys,
|
||||
int count,
|
||||
ValueData? value});
|
||||
|
@ -3440,7 +3440,7 @@ class _$VeilidUpdateValueChangeCopyWithImpl<$Res>
|
|||
key: null == key
|
||||
? _self.key
|
||||
: key // ignore: cast_nullable_to_non_nullable
|
||||
as PublicKey,
|
||||
as RecordKey,
|
||||
subkeys: null == subkeys
|
||||
? _self._subkeys
|
||||
: subkeys // ignore: cast_nullable_to_non_nullable
|
||||
|
|
|
@ -343,7 +343,7 @@ Map<String, dynamic> _$VeilidUpdateRouteChangeToJson(
|
|||
VeilidUpdateValueChange _$VeilidUpdateValueChangeFromJson(
|
||||
Map<String, dynamic> json) =>
|
||||
VeilidUpdateValueChange(
|
||||
key: Typed<BarePublicKey>.fromJson(json['key']),
|
||||
key: RecordKey.fromJson(json['key']),
|
||||
subkeys: (json['subkeys'] as List<dynamic>)
|
||||
.map(ValueSubkeyRange.fromJson)
|
||||
.toList(),
|
||||
|
|
|
@ -675,17 +675,21 @@ pub extern "C" fn routing_context_get_dht_record_key(
|
|||
id: u32,
|
||||
schema: FfiStr,
|
||||
owner: FfiStr,
|
||||
encryption_key: FfiStr,
|
||||
) {
|
||||
let schema: veilid_core::DHTSchema =
|
||||
veilid_core::deserialize_opt_json(schema.into_opt_string()).unwrap();
|
||||
let owner: veilid_core::PublicKey =
|
||||
veilid_core::deserialize_opt_json(owner.into_opt_string()).unwrap();
|
||||
let encryption_key: Option<veilid_core::SharedSecret> = encryption_key
|
||||
.into_opt_string()
|
||||
.map(|s| veilid_core::deserialize_json(&s).unwrap());
|
||||
|
||||
DartIsolateWrapper::new(port).spawn_result_json(
|
||||
async move {
|
||||
let routing_context = get_routing_context(id, "routing_context_get_dht_record_key")?;
|
||||
|
||||
let record_key = routing_context.get_dht_record_key(schema, &owner)?;
|
||||
let record_key = routing_context.get_dht_record_key(schema, owner, encryption_key)?;
|
||||
APIResult::Ok(record_key)
|
||||
}
|
||||
.in_current_span(),
|
||||
|
|
|
@ -11,7 +11,7 @@ from veilid.types import ValueSeqNum, VeilidJSONEncoder
|
|||
|
||||
##################################################################
|
||||
BOGUS_KEY = veilid.RecordKey.from_value(
|
||||
veilid.CryptoKind.CRYPTO_KIND_VLD0, veilid.BareRecordKey.from_bytes(b' '))
|
||||
veilid.CryptoKind.CRYPTO_KIND_VLD0, veilid.BareRecordKey.from_parts(veilid.BareOpaqueRecordKey.from_bytes(b' '), None))
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
|
@ -105,10 +105,11 @@ async def test_set_get_dht_value_with_owner(api_connection: veilid.VeilidAPI):
|
|||
async with cs:
|
||||
owner = await cs.generate_key_pair()
|
||||
|
||||
record_key = await rc.get_dht_record_key(veilid.DHTSchema.dflt(2), owner = owner.key())
|
||||
|
||||
rec = await rc.create_dht_record(kind, veilid.DHTSchema.dflt(2), owner=owner)
|
||||
|
||||
record_key = await rc.get_dht_record_key(veilid.DHTSchema.dflt(2), owner = owner.key(), encryption_key=rec.key.encryption_key())
|
||||
|
||||
assert rec.key == record_key
|
||||
assert rec.owner == owner.key()
|
||||
assert rec.owner_secret is not None
|
||||
|
|
|
@ -58,7 +58,7 @@ class RoutingContext(ABC):
|
|||
|
||||
@abstractmethod
|
||||
async def get_dht_record_key(
|
||||
self, schema: types.DHTSchema, owner: types.PublicKey) -> types.RecordKey:
|
||||
self, schema: types.DHTSchema, owner: types.PublicKey, encryption_key: Optional[types.SharedSecret]) -> types.RecordKey:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
|
|
|
@ -654,9 +654,11 @@ class _JsonRoutingContext(RoutingContext):
|
|||
|
||||
|
||||
async def get_dht_record_key(
|
||||
self, schema: DHTSchema, owner: PublicKey) -> RecordKey:
|
||||
self, schema: DHTSchema, owner: PublicKey, encryption_key: Optional[SharedSecret]) -> RecordKey:
|
||||
assert isinstance(schema, DHTSchema)
|
||||
assert isinstance(owner, PublicKey)
|
||||
if encryption_key is not None:
|
||||
assert isinstance(encryption_key, SharedSecret)
|
||||
|
||||
return raise_api_result(
|
||||
await self.api.send_ndjson_request(
|
||||
|
@ -666,6 +668,7 @@ class _JsonRoutingContext(RoutingContext):
|
|||
rc_op=RoutingContextOperation.GET_DHT_RECORD_KEY,
|
||||
schema=schema,
|
||||
owner=owner,
|
||||
encryption_key=encryption_key,
|
||||
)
|
||||
)
|
||||
|
||||
|
|
|
@ -1306,6 +1306,12 @@
|
|||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"encryption_key": {
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"owner": {
|
||||
"type": "string"
|
||||
},
|
||||
|
|
|
@ -8,6 +8,7 @@ from .types import (
|
|||
Timestamp,
|
||||
TimestampDuration,
|
||||
PublicKey,
|
||||
RecordKey,
|
||||
ValueData,
|
||||
ValueSubkey,
|
||||
VeilidLogLevel,
|
||||
|
@ -563,12 +564,12 @@ class VeilidRouteChange:
|
|||
|
||||
|
||||
class VeilidValueChange:
|
||||
key: PublicKey
|
||||
key: RecordKey
|
||||
subkeys: list[tuple[ValueSubkey, ValueSubkey]]
|
||||
count: int
|
||||
value: Optional[ValueData]
|
||||
|
||||
def __init__(self, key: PublicKey, subkeys: list[tuple[ValueSubkey, ValueSubkey]], count: int, value: Optional[ValueData]):
|
||||
def __init__(self, key: RecordKey, subkeys: list[tuple[ValueSubkey, ValueSubkey]], count: int, value: Optional[ValueData]):
|
||||
self.key = key
|
||||
self.subkeys = subkeys
|
||||
self.count = count
|
||||
|
@ -578,7 +579,7 @@ class VeilidValueChange:
|
|||
def from_json(cls, j: dict) -> Self:
|
||||
"""JSON object hook"""
|
||||
return cls(
|
||||
PublicKey(j["key"]),
|
||||
RecordKey(j["key"]),
|
||||
[(p[0], p[1]) for p in j["subkeys"]],
|
||||
j["count"],
|
||||
None if j["value"] is None else ValueData.from_json(j["value"]),
|
||||
|
|
|
@ -124,9 +124,6 @@ class EncodedString(str):
|
|||
assert isinstance(b, bytes)
|
||||
return cls(urlsafe_b64encode_no_pad(b))
|
||||
|
||||
class BareRecordKey(EncodedString):
|
||||
pass
|
||||
|
||||
class BarePublicKey(EncodedString):
|
||||
pass
|
||||
|
||||
|
@ -145,7 +142,6 @@ class BareSignature(EncodedString):
|
|||
class Nonce(EncodedString):
|
||||
pass
|
||||
|
||||
|
||||
class BareRouteId(EncodedString):
|
||||
pass
|
||||
|
||||
|
@ -155,6 +151,27 @@ class BareNodeId(EncodedString):
|
|||
class BareMemberId(EncodedString):
|
||||
pass
|
||||
|
||||
class BareOpaqueRecordKey(EncodedString):
|
||||
pass
|
||||
|
||||
class BareRecordKey(str):
|
||||
@classmethod
|
||||
def from_parts(cls, key: BareOpaqueRecordKey, encryption_key: Optional[BareSharedSecret]) -> Self:
|
||||
assert isinstance(key, BareOpaqueRecordKey)
|
||||
if encryption_key is not None:
|
||||
assert isinstance(encryption_key, BareSharedSecret)
|
||||
return cls(f"{key}:{encryption_key}")
|
||||
return cls(f"{key}")
|
||||
|
||||
def key(self) -> BareOpaqueRecordKey:
|
||||
parts = self.split(":", 1)
|
||||
return BareOpaqueRecordKey(parts[0])
|
||||
|
||||
def encryption_key(self) -> Optional[BareSharedSecret]:
|
||||
parts = self.split(":", 1)
|
||||
if len(parts) == 2:
|
||||
return BareSharedSecret(self.split(":", 1)[1])
|
||||
return None
|
||||
|
||||
class BareKeyPair(str):
|
||||
@classmethod
|
||||
|
@ -180,6 +197,15 @@ class CryptoTyped(str):
|
|||
raise ValueError("Not CryptoTyped")
|
||||
return self[5:]
|
||||
|
||||
class SharedSecret(CryptoTyped):
|
||||
@classmethod
|
||||
def from_value(cls, kind: CryptoKind, value: BareSharedSecret) -> Self:
|
||||
assert isinstance(kind, CryptoKind)
|
||||
assert isinstance(value, BareSharedSecret)
|
||||
return cls(f"{kind}:{value}")
|
||||
|
||||
def value(self) -> BareSharedSecret:
|
||||
return BareSharedSecret(self._value())
|
||||
|
||||
class RecordKey(CryptoTyped):
|
||||
@classmethod
|
||||
|
@ -191,15 +217,9 @@ class RecordKey(CryptoTyped):
|
|||
def value(self) -> BareRecordKey:
|
||||
return BareRecordKey(self._value())
|
||||
|
||||
class SharedSecret(CryptoTyped):
|
||||
@classmethod
|
||||
def from_value(cls, kind: CryptoKind, value: BareSharedSecret) -> Self:
|
||||
assert isinstance(kind, CryptoKind)
|
||||
assert isinstance(value, BareSharedSecret)
|
||||
return cls(f"{kind}:{value}")
|
||||
|
||||
def value(self) -> BareSharedSecret:
|
||||
return BareSharedSecret(self._value())
|
||||
def encryption_key(self) -> Optional[SharedSecret]:
|
||||
ek = self.value().encryption_key()
|
||||
return None if ek == None else SharedSecret.from_value(self.kind(), ek)
|
||||
|
||||
class HashDigest(CryptoTyped):
|
||||
@classmethod
|
||||
|
|
|
@ -12,11 +12,18 @@ rust-version.workspace = true
|
|||
documentation = "https://docs.rs/veilid-remote-api"
|
||||
homepage = "https://veilid.gitlab.io/developer-book/"
|
||||
|
||||
[features]
|
||||
default = ["rt-tokio"]
|
||||
default-async-std = ["rt-async-std"]
|
||||
|
||||
rt-tokio = ["veilid-core/default-tokio"]
|
||||
rt-async-std = ["veilid-core/default-async-std"]
|
||||
|
||||
[lib]
|
||||
path = "src/lib.rs"
|
||||
|
||||
[dependencies]
|
||||
veilid-core = { version = "0.4.8", path = "../veilid-core" }
|
||||
veilid-core = { version = "0.4.8", path = "../veilid-core", default-features = false }
|
||||
|
||||
tracing = { version = "^0", features = ["log", "attributes"] }
|
||||
serde = { version = "1.0.218", features = ["derive", "rc"] }
|
||||
|
|
|
@ -267,13 +267,17 @@ impl JsonRequestProcessor {
|
|||
),
|
||||
}
|
||||
}
|
||||
RoutingContextRequestOp::GetDhtRecordKey { schema, owner } => {
|
||||
RoutingContextResponseOp::GetDhtRecordKey {
|
||||
result: to_json_api_result_with_string(
|
||||
routing_context.get_dht_record_key(schema, &owner),
|
||||
),
|
||||
}
|
||||
}
|
||||
RoutingContextRequestOp::GetDhtRecordKey {
|
||||
schema,
|
||||
owner,
|
||||
encryption_key,
|
||||
} => RoutingContextResponseOp::GetDhtRecordKey {
|
||||
result: to_json_api_result_with_string(routing_context.get_dht_record_key(
|
||||
schema,
|
||||
owner,
|
||||
encryption_key,
|
||||
)),
|
||||
},
|
||||
RoutingContextRequestOp::CreateDhtRecord {
|
||||
kind,
|
||||
schema,
|
||||
|
|
|
@ -42,6 +42,8 @@ pub enum RoutingContextRequestOp {
|
|||
schema: DHTSchema,
|
||||
#[schemars(with = "String")]
|
||||
owner: PublicKey,
|
||||
#[schemars(with = "Option<String>")]
|
||||
encryption_key: Option<SharedSecret>,
|
||||
},
|
||||
CreateDhtRecord {
|
||||
#[schemars(with = "String")]
|
||||
|
|
|
@ -17,26 +17,21 @@ name = "veilid-server"
|
|||
path = "src/main.rs"
|
||||
|
||||
[features]
|
||||
default = ["rt-tokio", "veilid-core/default", "otlp-tonic"]
|
||||
default-async-std = ["rt-async-std", "veilid-core/default-async-std"]
|
||||
default = [
|
||||
"rt-tokio",
|
||||
"veilid-core/default",
|
||||
"otlp-tonic",
|
||||
"veilid-remote-api/default",
|
||||
]
|
||||
|
||||
default-async-std = [
|
||||
"rt-async-std",
|
||||
"veilid-core/default-async-std",
|
||||
"veilid-remote-api/default-async-std",
|
||||
]
|
||||
|
||||
footgun = ["veilid-core/footgun"]
|
||||
|
||||
virtual-network = [
|
||||
"veilid-core/virtual-network",
|
||||
"veilid-core/virtual-network-server",
|
||||
]
|
||||
|
||||
crypto-test = ["rt-tokio", "veilid-core/crypto-test"]
|
||||
crypto-test-none = ["rt-tokio", "veilid-core/crypto-test-none"]
|
||||
|
||||
otlp-tonic = ["opentelemetry-otlp/grpc-tonic", "opentelemetry-otlp/trace"]
|
||||
# otlp-grpc = ["opentelemetry-otlp/grpc-sys", "opentelemetry-otlp/trace"]
|
||||
|
||||
rt-async-std = [
|
||||
"veilid-core/rt-async-std",
|
||||
"async-std",
|
||||
"opentelemetry_sdk/rt-async-std",
|
||||
]
|
||||
rt-tokio = [
|
||||
"veilid-core/rt-tokio",
|
||||
"tokio",
|
||||
|
@ -44,18 +39,33 @@ rt-tokio = [
|
|||
"tokio-util",
|
||||
"opentelemetry_sdk/rt-tokio",
|
||||
]
|
||||
tracking = ["veilid-core/tracking"]
|
||||
|
||||
rt-async-std = [
|
||||
"veilid-core/rt-async-std",
|
||||
"async-std",
|
||||
"opentelemetry_sdk/rt-async-std",
|
||||
]
|
||||
|
||||
# Debugging and testing features
|
||||
crypto-test = ["rt-tokio", "veilid-core/crypto-test"]
|
||||
crypto-test-none = ["rt-tokio", "veilid-core/crypto-test-none"]
|
||||
debug-json-api = []
|
||||
debug-locks = ["veilid-core/debug-locks"]
|
||||
perfetto = ["tracing-perfetto"]
|
||||
flame = ["tracing-flame"]
|
||||
tokio-console = ["rt-tokio", "console-subscriber"]
|
||||
|
||||
geolocation = ["veilid-core/geolocation"]
|
||||
# otlp-grpc = ["opentelemetry-otlp/grpc-sys", "opentelemetry-otlp/trace"]
|
||||
otlp-tonic = ["opentelemetry-otlp/grpc-tonic", "opentelemetry-otlp/trace"]
|
||||
perfetto = ["tracing-perfetto"]
|
||||
tokio-console = ["rt-tokio", "console-subscriber"]
|
||||
tracking = ["veilid-core/tracking"]
|
||||
virtual-network = [
|
||||
"veilid-core/virtual-network",
|
||||
"veilid-core/virtual-network-server",
|
||||
]
|
||||
|
||||
[dependencies]
|
||||
veilid-core = { path = "../veilid-core", default-features = false }
|
||||
veilid-remote-api = { path = "../veilid-remote-api"}
|
||||
veilid-remote-api = { path = "../veilid-remote-api", default-features = false }
|
||||
tracing = { version = "^0.1.41", features = ["log", "attributes"] }
|
||||
tracing-subscriber = { version = "^0.3.19", features = ["env-filter", "time"] }
|
||||
tracing-appender = "^0.2.3"
|
||||
|
|
|
@ -25,11 +25,6 @@ required-features = ["virtual-router-bin"]
|
|||
|
||||
[features]
|
||||
default = ["rt-tokio"]
|
||||
rt-async-std = [
|
||||
"async-std",
|
||||
"async_executors/async_std",
|
||||
"rtnetlink/smol_socket",
|
||||
]
|
||||
rt-tokio = [
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
|
@ -39,6 +34,11 @@ rt-tokio = [
|
|||
"async_executors/tokio_io",
|
||||
"async_executors/tokio_timer",
|
||||
]
|
||||
rt-async-std = [
|
||||
"async-std",
|
||||
"async_executors/async_std",
|
||||
"rtnetlink/smol_socket",
|
||||
]
|
||||
rt-wasm-bindgen = [
|
||||
"async_executors/bindgen",
|
||||
"async_executors/timer",
|
||||
|
|
|
@ -106,7 +106,7 @@ impl IpcListener {
|
|||
|
||||
/// Accepts a new incoming connection to this listener.
|
||||
#[must_use]
|
||||
pub fn accept(&self) -> PinBoxFuture<io::Result<IpcStream>> {
|
||||
pub fn accept(&self) -> PinBoxFuture<'_, io::Result<IpcStream>> {
|
||||
if self.path.is_none() {
|
||||
return Box::pin(std::future::ready(Err(io::Error::from(
|
||||
io::ErrorKind::NotConnected,
|
||||
|
|
|
@ -200,14 +200,14 @@ impl IpAdapterAddresses {
|
|||
.into_owned()
|
||||
}
|
||||
|
||||
pub fn prefixes(&self) -> PrefixesIterator {
|
||||
pub fn prefixes(&self) -> PrefixesIterator<'_> {
|
||||
PrefixesIterator {
|
||||
_phantom: std::marker::PhantomData {},
|
||||
next: unsafe { (*self.data).FirstPrefix },
|
||||
}
|
||||
}
|
||||
|
||||
pub fn unicast_addresses(&self) -> UnicastAddressesIterator {
|
||||
pub fn unicast_addresses(&self) -> UnicastAddressesIterator<'_> {
|
||||
UnicastAddressesIterator {
|
||||
_phantom: std::marker::PhantomData {},
|
||||
next: unsafe { (*self.data).FirstUnicastAddress },
|
||||
|
|
|
@ -113,6 +113,6 @@ pub fn ws_err_to_io_error(err: WsErr) -> io::Error {
|
|||
WsErr::InvalidEncoding => io::Error::new(io::ErrorKind::InvalidInput, err.to_string()),
|
||||
WsErr::CantDecodeBlob => io::Error::new(io::ErrorKind::InvalidInput, err.to_string()),
|
||||
WsErr::UnknownDataType => io::Error::new(io::ErrorKind::InvalidInput, err.to_string()),
|
||||
_ => io::Error::new(io::ErrorKind::Other, err.to_string()),
|
||||
_ => io::Error::other(err.to_string()),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -368,16 +368,25 @@ pub fn routing_context_app_message(id: u32, target: String, message: String) ->
|
|||
}
|
||||
|
||||
#[wasm_bindgen()]
|
||||
pub fn routing_context_get_dht_record_key(id: u32, schema: String, owner: String) -> Promise {
|
||||
pub fn routing_context_get_dht_record_key(
|
||||
id: u32,
|
||||
schema: String,
|
||||
owner: String,
|
||||
encryption_key: Option<String>,
|
||||
) -> Promise {
|
||||
wrap_api_future_json(async move {
|
||||
let schema: veilid_core::DHTSchema =
|
||||
veilid_core::deserialize_json(&schema).map_err(VeilidAPIError::generic)?;
|
||||
let owner: veilid_core::PublicKey =
|
||||
veilid_core::deserialize_json(&owner).map_err(VeilidAPIError::generic)?;
|
||||
let encryption_key: Option<veilid_core::SharedSecret> = match encryption_key {
|
||||
Some(s) => Some(veilid_core::deserialize_json(&s).map_err(VeilidAPIError::generic)?),
|
||||
None => None,
|
||||
};
|
||||
|
||||
let routing_context = get_routing_context(id, "routing_context_get_dht_record_key")?;
|
||||
|
||||
let key = routing_context.get_dht_record_key(schema, &owner)?;
|
||||
let key = routing_context.get_dht_record_key(schema, owner, encryption_key)?;
|
||||
|
||||
APIResult::Ok(key)
|
||||
})
|
||||
|
|
|
@ -116,10 +116,17 @@ impl VeilidRoutingContext {
|
|||
&self,
|
||||
schema: DHTSchema,
|
||||
owner: &PublicKey,
|
||||
encryption_key: Option<TypeStubSharedSecret>,
|
||||
) -> APIResult<RecordKey> {
|
||||
let encryption_key = match encryption_key {
|
||||
Some(encryption_key) => try_from_js_option::<SharedSecret>(encryption_key)
|
||||
.map_err(VeilidAPIError::generic)?,
|
||||
None => None,
|
||||
};
|
||||
|
||||
let routing_context = self.getRoutingContext()?;
|
||||
|
||||
let key = routing_context.get_dht_record_key(schema, owner)?;
|
||||
let key = routing_context.get_dht_record_key(schema, owner.clone(), encryption_key)?;
|
||||
APIResult::Ok(key)
|
||||
}
|
||||
|
||||
|
|
|
@ -106,8 +106,8 @@ describe('VeilidRoutingContext', () => {
|
|||
const ownerKeyPair = vcrypto.generateKeyPair();
|
||||
const owner = ownerKeyPair.key
|
||||
const secret = ownerKeyPair.secret
|
||||
const dhtRecordKey = await routingContext.getDhtRecordKey({ kind: 'DFLT', o_cnt: 1 }, owner);
|
||||
const dhtRecord = await routingContext.createDhtRecord(cryptoKind, { kind: 'DFLT', o_cnt: 1 }, ownerKeyPair);
|
||||
const dhtRecordKey = await routingContext.getDhtRecordKey({ kind: 'DFLT', o_cnt: 1 }, owner, dhtRecord.key.encryption_key);
|
||||
expect(dhtRecord.key).toBeDefined();
|
||||
expect(dhtRecord.key.isEqual(dhtRecordKey)).toEqual(true);
|
||||
expect(dhtRecord.owner).toBeDefined();
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue