Variable Length Keys

This commit is contained in:
Christien Rioux 2025-07-06 18:55:17 -04:00
parent b52e64e56a
commit 67be63c91c
171 changed files with 12847 additions and 10261 deletions

33
Cargo.lock generated
View file

@ -870,6 +870,9 @@ name = "bytes"
version = "1.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a"
dependencies = [
"serde",
]
[[package]]
name = "capnp"
@ -4574,6 +4577,26 @@ dependencies = [
"thiserror 1.0.69",
]
[[package]]
name = "ref-cast"
version = "1.0.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4a0ae411dbe946a674d89546582cea4ba2bb8defac896622d6496f14c23ba5cf"
dependencies = [
"ref-cast-impl",
]
[[package]]
name = "ref-cast-impl"
version = "1.0.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1165225c21bff1f3bbce98f5a1f889949bc902d3575308cc7b0de30b4f6d27c7"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.101",
]
[[package]]
name = "regex"
version = "1.11.1"
@ -4890,11 +4913,12 @@ dependencies = [
[[package]]
name = "schemars"
version = "0.8.22"
version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3fbf2ae1b8bc8e02df939598064d22402220cd5bbcca1c76f7d6a310974d5615"
checksum = "1375ba8ef45a6f15d83fa8748f1079428295d403d6ea991d09ab100155fbc06d"
dependencies = [
"dyn-clone",
"ref-cast",
"schemars_derive",
"serde",
"serde_json",
@ -4902,9 +4926,9 @@ dependencies = [
[[package]]
name = "schemars_derive"
version = "0.8.22"
version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32e265784ad618884abaea0600a9adf15393368d840e0222d101a072f3f7534d"
checksum = "2b13ed22d6d49fe23712e068770b5c4df4a693a2b02eeff8e7ca3135627a24f6"
dependencies = [
"proc-macro2",
"quote",
@ -6412,6 +6436,7 @@ dependencies = [
"backtrace",
"blake3",
"bosion",
"bytes 1.10.1",
"capnp",
"capnpc",
"cfg-if 1.0.0",

View file

@ -45,8 +45,22 @@ rt-tokio = [
]
# Crypto support features
enable-crypto-vld0 = [ "ed25519-dalek", "x25519-dalek", "curve25519-dalek", "blake3", "chacha20poly1305", "chacha20", "argon2" ]
enable-crypto-none = [ "ed25519-dalek", "curve25519-dalek", "blake3", "argon2", "digest" ]
enable-crypto-vld0 = [
"ed25519-dalek",
"x25519-dalek",
"curve25519-dalek",
"blake3",
"chacha20poly1305",
"chacha20",
"argon2",
]
enable-crypto-none = [
"ed25519-dalek",
"curve25519-dalek",
"blake3",
"argon2",
"digest",
]
# Debugging and testing features
verbose-tracing = []
@ -134,7 +148,7 @@ blake3 = { version = "1.8.2", optional = true }
chacha20poly1305 = { version = "0.10.1", optional = true }
chacha20 = { version = "0.9.1", optional = true }
argon2 = { version = "0.5.3", optional = true }
digest = { version = "0.10.7", optional = true, features = [ "rand_core" ]}
digest = { version = "0.10.7", optional = true, features = ["rand_core"] }
# Network
async-std-resolver = { version = "0.24.4", optional = true }
@ -147,7 +161,7 @@ serde_json = { version = "1.0.140" }
serde-big-array = "0.5.1"
json = "0.12.4"
data-encoding = { version = "2.8.0" }
schemars = "0.8.22"
schemars = "1.0.3"
lz4_flex = { version = "0.11.3", default-features = false, features = [
"safe-encode",
"safe-decode",
@ -155,6 +169,7 @@ lz4_flex = { version = "0.11.3", default-features = false, features = [
indent = "0.1.1"
sanitize-filename = "0.5.0"
serde_with = "3.12.0"
bytes = { version = "1.10.1", features = ["serde"] }
# Dependencies for native builds only
# Linux, Windows, Mac, iOS, Android

View file

@ -233,7 +233,7 @@ async fn open_route(
if val.is_empty() {
break;
}
try_again_loop(|| async { rc.app_message(Target::PrivateRoute(route_id), val.as_bytes().to_vec()).await })
try_again_loop(|| async { rc.app_message(Target::RouteId(route_id.clone()), val.as_bytes().to_vec()).await })
.await?;
} else {
break;

View file

@ -1,50 +1,59 @@
@0x8ffce8033734ab02;
# IDs And Hashes
# Typed IDs and Hashes
##############################
struct Key256 @0xdde44e3286f6a90d {
u0 @0 :UInt64;
u1 @1 :UInt64;
u2 @2 :UInt64;
u3 @3 :UInt64;
# DHT Record Key
struct RecordKey @0xcd55475a98deb0c1 {
kind @0 :CryptoKind;
value @1 :Data;
}
struct Signature512 @0x806749043a129c12 {
u0 @0 :UInt64;
u1 @1 :UInt64;
u2 @2 :UInt64;
u3 @3 :UInt64;
u4 @4 :UInt64;
u5 @5 :UInt64;
u6 @6 :UInt64;
u7 @7 :UInt64;
# Blockstore Block Id
struct BlockId @0xed3678a9e4771a22 {
kind @0 :CryptoKind;
value @1 :Data;
}
struct Nonce24 @0xb6260db25d8d7dfc {
u0 @0 :UInt64;
u1 @1 :UInt64;
u2 @2 :UInt64;
# Node Id (hash of node public key)
struct NodeId @0xce9822f1cebca1bb {
kind @0 :CryptoKind;
value @1 :Data;
}
using PublicKey = Key256; # Node id / Hash / DHT key / Route id, etc
using Nonce = Nonce24; # One-time encryption nonce
using Signature = Signature512; # Signature block
using TunnelID = UInt64; # Id for tunnels
# Public Key
struct PublicKey @0xfc522f2dbdf30cee {
kind @0 :CryptoKind;
value @1 :Data;
}
# DHT Route Id
struct RouteId @0xc8ae026d91da0ae6 {
kind @0 :CryptoKind;
value @1 :Data;
}
# Signature
struct Signature @0xf06fa6e2ac8726a2 {
kind @0 :CryptoKind;
value @1 :Data;
}
# Untyped generic one-time encryption nonce
struct Nonce @0x9b00866db77b59f6 {
value @0 :Data;
}
# Convenience Typedefs
################################################################
using TunnelId = UInt64; # Id for tunnels
using CryptoKind = UInt32; # FOURCC code for cryptography type
using ValueSeqNum = UInt32; # sequence numbers for values
using Subkey = UInt32; # subkey index for dht
using Capability = UInt32; # FOURCC code for capability
struct TypedKey @0xe2d567a9f1e61b29 {
kind @0 :CryptoKind;
key @1 :PublicKey;
}
struct TypedSignature @0x963170c7298e3884 {
kind @0 :CryptoKind;
signature @1 :Signature;
}
# Node Dial Info
################################################################
@ -122,7 +131,7 @@ struct SignalInfoReverseConnect @0xd9ebd3bd0d46e013 {
# Private Routes
##############################
struct RouteHopData @0x8ce231f9d1b7adf2 {
struct RouteHopData @0x9d45cb1880e79fdb {
nonce @0 :Nonce; # nonce for encrypted blob
blob @1 :Data; # encrypted blob with ENC(nonce,DH(PK,SK))
# if this is a safety route RouteHopData, there is a single byte tag appended to the end of the encrypted blob
@ -134,7 +143,7 @@ struct RouteHopData @0x8ce231f9d1b7adf2 {
struct RouteHop @0xf8f672d75cce0c3b {
node :union {
nodeId @0 :PublicKey; # node id key only for established routes (kind is the same as the pr or sr it is part of)
nodeId @0 :NodeId; # node id key only for established routes (kind is the same as the pr or sr it is part of)
peerInfo @1 :PeerInfo; # full peer info for this hop to establish the route
}
nextHop @2 :RouteHopData; # optional: If this the end of a private route, this field will not exist
@ -142,21 +151,19 @@ struct RouteHop @0xf8f672d75cce0c3b {
}
struct PrivateRoute @0x8a83fccb0851e776 {
publicKey @0 :TypedKey; # private route public key (unique per private route)
hopCount @1 :UInt8; # Count of hops left in the private route (for timeout calculation purposes only)
publicKey @0 :PublicKey; # private route public key (unique per private route)
hops :union {
firstHop @2 :RouteHop; # first hop of a private route is unencrypted (hopcount > 0)
data @3 :RouteHopData; # private route has more hops (hopcount > 0 && hopcount < total_hopcount)
empty @4 :Void; # private route has ended (hopcount = 0)
firstHop @1 :RouteHop; # first hop of a private route is unencrypted (hopcount > 0)
data @2 :RouteHopData; # private route has more hops (hopcount > 0 && hopcount < total_hopcount)
empty @3 :Void; # private route has ended (hopcount = 0)
}
}
struct SafetyRoute @0xf554734d07cb5d59 {
publicKey @0 :TypedKey; # safety route public key (unique per safety route)
hopCount @1 :UInt8; # Count of hops left in the safety route (for timeout calculation purposes only)
publicKey @0 :PublicKey; # safety route public key (unique per safety route)
hops :union {
data @2 :RouteHopData; # safety route has more hops
private @3 :PrivateRoute; # safety route has ended and private route follows
data @1 :RouteHopData; # safety route has more hops
private @2 :PrivateRoute; # safety route has ended and private route follows
}
}
@ -220,18 +227,18 @@ struct NodeInfo @0xe125d847e3f9f419 {
dialInfoDetailList @6 :List(DialInfoDetail); # inbound dial info details for this node
}
struct SignedDirectNodeInfo @0xe0e7ea3e893a3dd7 {
struct SignedDirectNodeInfo @0xa70f85e77dafc0cd {
nodeInfo @0 :NodeInfo; # node info
timestamp @1 :UInt64; # when signed node info was generated
signatures @2 :List(TypedSignature); # signatures
signatures @2 :List(Signature); # signatures
}
struct SignedRelayedNodeInfo @0xb39e8428ccd87cbb {
struct SignedRelayedNodeInfo @0x8b193d500b11573f {
nodeInfo @0 :NodeInfo; # node info
relayIds @1 :List(TypedKey); # node ids for relay
relayIds @1 :List(NodeId); # node ids for relay
relayInfo @2 :SignedDirectNodeInfo; # signed node info for relay
timestamp @3 :UInt64; # when signed node info was generated
signatures @4 :List(TypedSignature); # signatures
signatures @4 :List(Signature); # signatures
}
struct SignedNodeInfo @0xd2478ce5f593406a {
@ -241,12 +248,12 @@ struct SignedNodeInfo @0xd2478ce5f593406a {
}
}
struct PeerInfo @0xfe2d722d5d3c4bcb {
nodeIds @0 :List(TypedKey); # node ids for 'closer peer'
struct PeerInfo @0xb33ceb3dd583dbf7 {
nodeIds @0 :List(NodeId); # node ids for 'closer peer'
signedNodeInfo @1 :SignedNodeInfo; # signed node info for 'closer peer'
}
struct RoutedOperation @0xcbcb8535b839e9dd {
struct RoutedOperation @0xa074e6662fe5aa92 {
sequencing @0 :Sequencing; # sequencing preference to use to pass the message along
signatures @1 :List(Signature); # signatures from nodes that have handled the private route
nonce @2 :Nonce; # nonce Xmsg
@ -272,8 +279,8 @@ struct OperationReturnReceipt @0xeb0fb5b5a9160eeb {
receipt @0 :Data; # receipt being returned to its origin
}
struct OperationFindNodeQ @0xfdef788fe9623bcd {
nodeId @0 :TypedKey; # node id to locate
struct OperationFindNodeQ @0xca524d602e9d86ca {
nodeId @0 :NodeId; # node id to locate
capabilities @1 :List(Capability); # required capabilities returned peers must have
}
@ -303,13 +310,13 @@ struct SubkeyRange @0xf592dac0a4d0171c {
end @1 :Subkey; # the end of a subkey range
}
struct SignedValueData @0xb4b7416f169f2a3d {
struct SignedValueData @0xff3944efaaf7fc18 {
seq @0 :ValueSeqNum; # sequence number of value
data @1 :Data; # value or subvalue contents
writer @2 :PublicKey; # the public key of the writer
signature @3 :Signature; # signature of data at this subkey, using the writer key (which may be the same as the owner key)
# signature covers:
# * ownerKey
# * owner public key
# * subkey
# * sequence number
# * data
@ -325,8 +332,8 @@ struct SignedValueDescriptor @0xe7911cd3f9e1b0e7 {
}
struct OperationGetValueQ @0xf88a5b6da5eda5d0 {
key @0 :TypedKey; # DHT Key = Hash(ownerKeyKind) of: [ ownerKeyValue, schema ]
struct OperationGetValueQ @0x8c176d26517ea24d {
key @0 :RecordKey; # DHT Key = Hash(ownerKeyKind) of: [ ownerKeyValue, schema ]
subkey @1 :Subkey; # the index of the subkey
wantDescriptor @2 :Bool; # whether or not to include the descriptor for the key
}
@ -339,7 +346,7 @@ struct OperationGetValueA @0xd896bb46f2e0249f {
}
struct OperationSetValueQ @0xbac06191ff8bdbc5 {
key @0 :TypedKey; # DHT Key = Hash(ownerKeyKind) of: [ ownerKeyValue, schema ]
key @0 :RecordKey; # DHT Key = Hash(ownerKeyKind) of: [ ownerKeyValue, schema ]
subkey @1 :Subkey; # the index of the subkey
value @2 :SignedValueData; # value or subvalue contents (older or equal seq number gets dropped)
descriptor @3 :SignedValueDescriptor; # optional: the descriptor if needed
@ -352,7 +359,7 @@ struct OperationSetValueA @0x9378d0732dc95be2 {
}
struct OperationWatchValueQ @0xf9a5a6c547b9b228 {
key @0 :TypedKey; # key for value to watch
key @0 :RecordKey; # key for value to watch
subkeys @1 :List(SubkeyRange); # subkey range to watch (up to 512 subranges). An empty range here should not be specified unless cancelling a watch (count=0).
expiration @2 :UInt64; # requested timestamp when this watch will expire in usec since epoch (watch can return less, 0 for max)
count @3 :UInt32; # requested number of changes to watch for (0 = cancel, 1 = single shot, 2+ = counter, UINT32_MAX = continuous)
@ -369,7 +376,7 @@ struct OperationWatchValueA @0xa726cab7064ba893 {
}
struct OperationInspectValueQ @0xdef712d2fd16f55a {
key @0 :TypedKey; # DHT Key = Hash(ownerKeyKind) of: [ ownerKeyValue, schema ]
key @0 :RecordKey; # DHT Key = Hash(ownerKeyKind) of: [ ownerKeyValue, schema ]
subkeys @1 :List(SubkeyRange); # subkey range to inspect (up to 512 total subkeys), if empty this implies 0..=511
wantDescriptor @2 :Bool; # whether or not to include the descriptor for the key
}
@ -381,7 +388,7 @@ struct OperationInspectValueA @0xb8b57faf960ee102 {
}
struct OperationValueChanged @0xd1c59ebdd8cc1bf6 {
key @0 :TypedKey; # key for value that changed
key @0 :RecordKey; # key for value that changed
subkeys @1 :List(SubkeyRange); # subkey range that changed (up to 512 ranges at a time, if empty this is a watch expiration notice)
count @2 :UInt32; # remaining changes left (0 means watch has expired)
watchId @3 :UInt64; # watch id this value change came from
@ -389,7 +396,8 @@ struct OperationValueChanged @0xd1c59ebdd8cc1bf6 {
}
struct OperationSupplyBlockQ @0xadbf4c542d749971 {
blockId @0 :TypedKey; # hash of the block we can supply
blockId @0 :BlockId; # hash of the block we can supply
routeId @1 :RouteId; # the private route endpoint for this block supplier
}
struct OperationSupplyBlockA @0xf003822e83b5c0d7 {
@ -398,13 +406,13 @@ struct OperationSupplyBlockA @0xf003822e83b5c0d7 {
}
struct OperationFindBlockQ @0xaf4353ff004c7156 {
blockId @0 :TypedKey; # hash of the block to locate
blockId @0 :BlockId; # hash of the block to locate
}
struct OperationFindBlockA @0xc51455bc4915465d {
data @0 :Data; # Optional: the actual block data if we have that block ourselves
# null if we don't have a block to return
suppliers @1 :List(PeerInfo); # returned list of suppliers if we have them
suppliers @1 :List(RouteId); # returned list of supplier private route ids if we have them
peers @2 :List(PeerInfo); # returned 'closer peer' information
}
@ -429,24 +437,24 @@ enum TunnelError @0xb82c6bfb1ec38c7c {
struct TunnelEndpoint @0xc2602aa983cc337d {
mode @0 :TunnelEndpointMode; # what kind of endpoint this is
description @1 :Text; # endpoint description (TODO)
description @1 :Data; # endpoint description (TODO)
}
struct FullTunnel @0x9821c3dc75373f63 {
id @0 :TunnelID; # tunnel id to use everywhere
id @0 :TunnelId; # tunnel id to use everywhere
timeout @1 :UInt64; # duration from last data when this expires if no data is sent or received
local @2 :TunnelEndpoint; # local endpoint
remote @3 :TunnelEndpoint; # remote endpoint
}
struct PartialTunnel @0x827a7ebc02be2fc8 {
id @0 :TunnelID; # tunnel id to use everywhere
id @0 :TunnelId; # tunnel id to use everywhere
timeout @1 :UInt64; # timestamp when this expires if not completed
local @2 :TunnelEndpoint; # local endpoint
}
struct OperationStartTunnelQ @0xa9c49afce44187af {
id @0 :TunnelID; # tunnel id to use everywhere
id @0 :TunnelId; # tunnel id to use everywhere
localMode @1 :TunnelEndpointMode; # what kind of local endpoint mode is being requested
depth @2 :UInt8; # the number of nodes in the tunnel
}
@ -459,7 +467,7 @@ struct OperationStartTunnelA @0x818162e4cc61bf1e {
}
struct OperationCompleteTunnelQ @0xe978594588eb950b {
id @0 :TunnelID; # tunnel id to use everywhere
id @0 :TunnelId; # tunnel id to use everywhere
localMode @1 :TunnelEndpointMode; # what kind of local endpoint mode is being requested
depth @2 :UInt8; # the number of nodes in the tunnel
endpoint @3 :TunnelEndpoint; # the remote endpoint to complete
@ -473,12 +481,12 @@ struct OperationCompleteTunnelA @0x84090791bb765f2a {
}
struct OperationCancelTunnelQ @0xae2811ae0a003738 {
id @0 :TunnelID; # the tunnel id to cancel
id @0 :TunnelId; # the tunnel id to cancel
}
struct OperationCancelTunnelA @0xbba23c992eff97bc {
union {
tunnel @0 :TunnelID; # the tunnel id that was cancelled
tunnel @0 :TunnelId; # the tunnel id that was cancelled
error @1 :TunnelError; # if we couldn't cancel, why not
}
}
@ -501,6 +509,7 @@ struct Question @0xd8510bc33492ef70 {
watchValueQ @7 :OperationWatchValueQ;
inspectValueQ @8 :OperationInspectValueQ;
# Blockstore operations
# #[cfg(feature="unstable-blockstore")]
# supplyBlockQ @9 :OperationSupplyBlockQ;
# findBlockQ @10 :OperationFindBlockQ;
@ -542,9 +551,10 @@ struct Answer @0xacacb8b6988c1058 {
watchValueA @5 :OperationWatchValueA;
inspectValueA @6 :OperationInspectValueA;
# Blockstore operations
# #[cfg(feature="unstable-blockstore")]
#supplyBlockA @7 :OperationSupplyBlockA;
#findBlockA @8 :OperationFindBlockA;
# supplyBlockA @7 :OperationSupplyBlockA;
# findBlockA @8 :OperationFindBlockA;
# Tunnel operations
# #[cfg(feature="unstable-tunnels")]

File diff suppressed because it is too large Load diff

View file

@ -16,7 +16,6 @@ pub trait CryptoSystem {
// Generation
fn random_bytes(&self, len: u32) -> Vec<u8>;
fn default_salt_length(&self) -> u32;
fn hash_password(&self, password: &[u8], salt: &[u8]) -> VeilidAPIResult<String>;
fn verify_password(&self, password: &[u8], password_hash: &str) -> VeilidAPIResult<bool>;
fn derive_shared_secret(
@ -38,9 +37,9 @@ pub trait CryptoSystem {
domain: &[u8],
) -> VeilidAPIResult<BareSharedSecret> {
let dh = self.compute_dh(key, secret)?;
Ok(BareSharedSecret::from(self.generate_hash(
&[&dh.bytes, domain, VEILID_DOMAIN_API].concat(),
)))
Ok(BareSharedSecret::from(
self.generate_hash(&[&dh, domain, VEILID_DOMAIN_API].concat()),
))
}
fn generate_keypair(&self) -> BareKeyPair;
fn generate_hash(&self, data: &[u8]) -> BareHashDigest;
@ -50,6 +49,76 @@ pub trait CryptoSystem {
) -> VeilidAPIResult<BarePublicKey>;
// Validation
fn shared_secret_length(&self) -> usize;
fn nonce_length(&self) -> usize;
fn hash_digest_length(&self) -> usize;
fn public_key_length(&self) -> usize;
fn secret_key_length(&self) -> usize;
fn signature_length(&self) -> usize;
fn default_salt_length(&self) -> usize;
fn aead_overhead(&self) -> usize;
fn check_shared_secret(&self, secret: &BareSharedSecret) -> VeilidAPIResult<()> {
if secret.len() != self.shared_secret_length() {
apibail_generic!(format!(
"invalid shared secret length: {} != {}",
secret.len(),
self.shared_secret_length()
));
}
Ok(())
}
fn check_nonce(&self, nonce: &BareNonce) -> VeilidAPIResult<()> {
if nonce.len() != self.nonce_length() {
apibail_generic!(format!(
"invalid nonce length: {} != {}",
nonce.len(),
self.nonce_length()
));
}
Ok(())
}
fn check_hash_digest(&self, hash: &BareHashDigest) -> VeilidAPIResult<()> {
if hash.len() != self.hash_digest_length() {
apibail_generic!(format!(
"invalid hash digest length: {} != {}",
hash.len(),
self.hash_digest_length()
));
}
Ok(())
}
fn check_public_key(&self, key: &BarePublicKey) -> VeilidAPIResult<()> {
if key.len() != self.public_key_length() {
apibail_generic!(format!(
"invalid public key length: {} != {}",
key.len(),
self.public_key_length()
));
}
Ok(())
}
fn check_secret_key(&self, key: &BareSecretKey) -> VeilidAPIResult<()> {
if key.len() != self.secret_key_length() {
apibail_generic!(format!(
"invalid secret key length: {} != {}",
key.len(),
self.secret_key_length()
));
}
Ok(())
}
fn check_signature(&self, signature: &BareSignature) -> VeilidAPIResult<()> {
if signature.len() != self.signature_length() {
apibail_generic!(format!(
"invalid signature length: {} != {}",
signature.len(),
self.signature_length()
));
}
Ok(())
}
fn validate_keypair(&self, key: &BarePublicKey, secret: &BareSecretKey) -> bool;
fn validate_hash(&self, data: &[u8], hash: &BareHashDigest) -> bool;
fn validate_hash_reader(
@ -76,7 +145,6 @@ pub trait CryptoSystem {
) -> VeilidAPIResult<bool>;
// AEAD Encrypt/Decrypt
fn aead_overhead(&self) -> usize;
fn decrypt_in_place_aead(
&self,
body: &mut Vec<u8>,
@ -112,24 +180,24 @@ pub trait CryptoSystem {
body: &mut [u8],
nonce: &BareNonce,
shared_secret: &BareSharedSecret,
);
) -> VeilidAPIResult<()>;
fn crypt_b2b_no_auth(
&self,
in_buf: &[u8],
out_buf: &mut [u8],
nonce: &BareNonce,
shared_secret: &BareSharedSecret,
);
) -> VeilidAPIResult<()>;
fn crypt_no_auth_aligned_8(
&self,
body: &[u8],
nonce: &BareNonce,
shared_secret: &BareSharedSecret,
) -> Vec<u8>;
) -> VeilidAPIResult<Vec<u8>>;
fn crypt_no_auth_unaligned(
&self,
body: &[u8],
nonce: &BareNonce,
shared_secret: &BareSharedSecret,
) -> Vec<u8>;
) -> VeilidAPIResult<Vec<u8>>;
}

View file

@ -20,9 +20,9 @@ pub fn cache_to_bytes(cache: &DHCache) -> Vec<u8> {
let cnt: usize = cache.len();
let mut out: Vec<u8> = Vec::with_capacity(cnt * (32 + 32 + 32));
for e in cache.iter() {
out.extend(&e.0.key.bytes);
out.extend(&e.0.secret.bytes);
out.extend(&e.1.shared_secret.bytes);
out.extend_from_slice(&e.0.key);
out.extend_from_slice(&e.0.secret);
out.extend_from_slice(&e.1.shared_secret);
}
let mut rev: Vec<u8> = Vec::with_capacity(out.len());
for d in out.chunks(32 + 32 + 32).rev() {

View file

@ -130,32 +130,33 @@ impl Envelope {
.into();
// Get nonce and sender node id
let nonce_slice: [u8; NONCE_LENGTH] = data[0x12..0x2A]
let mut nonce_slice: [u8; VLD0_NONCE_LENGTH] = data[0x12..0x2A]
.try_into()
.map_err(VeilidAPIError::internal)?;
let sender_id_slice: [u8; PUBLIC_KEY_LENGTH] = data[0x2A..0x4A]
let mut sender_id_slice: [u8; VLD0_HASH_DIGEST_LENGTH] = data[0x2A..0x4A]
.try_into()
.map_err(VeilidAPIError::internal)?;
let recipient_id_slice: [u8; PUBLIC_KEY_LENGTH] = data[0x4A..0x6A]
let mut recipient_id_slice: [u8; VLD0_HASH_DIGEST_LENGTH] = data[0x4A..0x6A]
.try_into()
.map_err(VeilidAPIError::internal)?;
let mut nonce: BareNonce = BareNonce::new(nonce_slice);
let mut sender_id = BareNodeId::new(sender_id_slice);
let mut recipient_id = BareNodeId::new(recipient_id_slice);
// Apply network key (not the best, but it will keep networks from colliding without much overhead)
if let Some(nk) = network_key.as_ref() {
for n in 0..NONCE_LENGTH {
nonce.bytes[n] ^= nk.bytes[n];
for n in 0..VLD0_NONCE_LENGTH {
nonce_slice[n] ^= nk[n];
}
for n in 0..CRYPTO_KEY_LENGTH {
sender_id.bytes[n] ^= nk.bytes[n];
for n in 0..VLD0_HASH_DIGEST_LENGTH {
sender_id_slice[n] ^= nk[n];
}
for n in 0..CRYPTO_KEY_LENGTH {
recipient_id.bytes[n] ^= nk.bytes[n];
for n in 0..VLD0_HASH_DIGEST_LENGTH {
recipient_id_slice[n] ^= nk[n];
}
}
let nonce: BareNonce = BareNonce::new(&nonce_slice);
let sender_id = BareNodeId::new(&sender_id_slice);
let recipient_id = BareNodeId::new(&recipient_id_slice);
// Ensure sender_id and recipient_id are not the same
if sender_id == recipient_id {
apibail_parse_error!(
@ -173,7 +174,11 @@ impl Envelope {
// Validate signature
if !vcrypto
.verify(&sender_id.into(), &data[0..(data.len() - 64)], &signature)
.verify(
&sender_id.clone().into(),
&data[0..(data.len() - 64)],
&signature,
)
.map_err(VeilidAPIError::internal)?
{
apibail_parse_error!("signature verification of envelope failed", signature);
@ -202,17 +207,24 @@ impl Envelope {
let vcrypto = crypto
.get(self.crypto_kind)
.expect("need to ensure only valid crypto kinds here");
let mut dh_secret = vcrypto.cached_dh(&self.sender_id.into(), node_id_secret)?;
let mut dh_secret = vcrypto.cached_dh(&self.sender_id.clone().into(), node_id_secret)?;
// Apply network key
if let Some(nk) = network_key.as_ref() {
for n in 0..CRYPTO_KEY_LENGTH {
dh_secret.bytes[n] ^= nk.bytes[n];
let mut dh_secret_bytes = dh_secret.to_vec();
for n in 0..VLD0_SHARED_SECRET_LENGTH {
dh_secret_bytes[n] ^= nk[n];
}
dh_secret = BareSharedSecret::new(&dh_secret_bytes);
}
// Decrypt message without authentication
let body =
vcrypto.crypt_no_auth_aligned_8(&data[0x6A..data.len() - 64], &self.nonce, &dh_secret);
let body = vcrypto.crypt_no_auth_aligned_8(
&data[0x6A..data.len() - 64],
&self.nonce,
&dh_secret,
)?;
// Decompress body
let body = decompress_size_prepended(&body, Some(MAX_ENVELOPE_SIZE))?;
@ -252,7 +264,7 @@ impl Envelope {
let vcrypto = crypto
.get(self.crypto_kind)
.expect("need to ensure only valid crypto kinds here");
let mut dh_secret = vcrypto.cached_dh(&self.recipient_id.into(), node_id_secret)?;
let mut dh_secret = vcrypto.cached_dh(&self.recipient_id.clone().into(), node_id_secret)?;
// Write envelope body
let mut data = vec![0u8; envelope_size];
@ -268,30 +280,34 @@ impl Envelope {
// Write timestamp
data[0x0A..0x12].copy_from_slice(&self.timestamp.as_u64().to_le_bytes());
// Write nonce
data[0x12..0x2A].copy_from_slice(&self.nonce.bytes);
data[0x12..0x2A].copy_from_slice(&self.nonce);
// Write sender node id
data[0x2A..0x4A].copy_from_slice(&self.sender_id.bytes);
data[0x2A..0x4A].copy_from_slice(&self.sender_id);
// Write recipient node id
data[0x4A..0x6A].copy_from_slice(&self.recipient_id.bytes);
data[0x4A..0x6A].copy_from_slice(&self.recipient_id);
// Apply network key (not the best, but it will keep networks from colliding without much overhead)
if let Some(nk) = network_key.as_ref() {
for n in 0..SECRET_KEY_LENGTH {
dh_secret.bytes[n] ^= nk.bytes[n];
let mut dh_secret_bytes = dh_secret.to_vec();
for n in 0..VLD0_SHARED_SECRET_LENGTH {
dh_secret_bytes[n] ^= nk[n];
}
for n in 0..NONCE_LENGTH {
data[0x12 + n] ^= nk.bytes[n];
for n in 0..VLD0_NONCE_LENGTH {
data[0x12 + n] ^= nk[n];
}
for n in 0..CRYPTO_KEY_LENGTH {
data[0x2A + n] ^= nk.bytes[n];
for n in 0..VLD0_HASH_DIGEST_LENGTH {
data[0x2A + n] ^= nk[n];
}
for n in 0..CRYPTO_KEY_LENGTH {
data[0x4A + n] ^= nk.bytes[n];
for n in 0..VLD0_HASH_DIGEST_LENGTH {
data[0x4A + n] ^= nk[n];
}
dh_secret = BareSharedSecret::new(&dh_secret_bytes);
}
// Encrypt message
let encrypted_body = vcrypto.crypt_no_auth_unaligned(&body, &self.nonce, &dh_secret);
let encrypted_body = vcrypto.crypt_no_auth_unaligned(&body, &self.nonce, &dh_secret)?;
// Write body
if !encrypted_body.is_empty() {
@ -300,13 +316,13 @@ impl Envelope {
// Sign the envelope
let signature = vcrypto.sign(
&self.sender_id.into(),
&self.sender_id.clone().into(),
node_id_secret,
&data[0..(envelope_size - 64)],
)?;
// Append the signature
data[(envelope_size - 64)..].copy_from_slice(&signature.bytes);
data[(envelope_size - 64)..].copy_from_slice(&signature);
Ok(data)
}
@ -326,24 +342,24 @@ impl Envelope {
#[expect(dead_code)]
pub fn get_nonce(&self) -> BareNonce {
self.nonce
self.nonce.clone()
}
#[expect(dead_code)]
pub fn get_sender_id(&self) -> BareNodeId {
self.sender_id
pub fn get_bare_sender_id(&self) -> BareNodeId {
self.sender_id.clone()
}
pub fn get_sender_typed_id(&self) -> NodeId {
NodeId::new(self.crypto_kind, self.sender_id)
pub fn get_sender_id(&self) -> NodeId {
NodeId::new(self.crypto_kind, self.sender_id.clone())
}
#[expect(dead_code)]
pub fn get_recipient_id(&self) -> BareNodeId {
self.recipient_id
pub fn get_bare_recipient_id(&self) -> BareNodeId {
self.recipient_id.clone()
}
pub fn get_recipient_typed_id(&self) -> NodeId {
NodeId::new(self.crypto_kind, self.recipient_id)
pub fn get_recipient_id(&self) -> NodeId {
NodeId::new(self.crypto_kind, self.recipient_id.clone())
}
}

View file

@ -62,10 +62,7 @@ impl AsyncCryptoSystemGuard<'_> {
pub async fn random_bytes(&self, len: u32) -> Vec<u8> {
yielding(|| self.guard.random_bytes(len)).await
}
#[must_use]
pub fn default_salt_length(&self) -> u32 {
self.guard.default_salt_length()
}
pub async fn hash_password(&self, password: &[u8], salt: &[u8]) -> VeilidAPIResult<String> {
yielding(|| self.guard.hash_password(password, salt)).await
}
@ -104,7 +101,7 @@ impl AsyncCryptoSystemGuard<'_> {
) -> VeilidAPIResult<BareSharedSecret> {
let dh = self.compute_dh(key, secret).await?;
Ok(BareSharedSecret::from(
self.generate_hash(&[&dh.bytes, domain, VEILID_DOMAIN_API].concat())
self.generate_hash(&[&dh, domain, VEILID_DOMAIN_API].concat())
.await,
))
}
@ -125,6 +122,56 @@ impl AsyncCryptoSystemGuard<'_> {
}
// Validation
#[must_use]
pub fn shared_secret_length(&self) -> usize {
self.guard.shared_secret_length()
}
#[must_use]
pub fn nonce_length(&self) -> usize {
self.guard.nonce_length()
}
#[must_use]
pub fn hash_digest_length(&self) -> usize {
self.guard.hash_digest_length()
}
#[must_use]
pub fn public_key_length(&self) -> usize {
self.guard.public_key_length()
}
#[must_use]
pub fn secret_key_length(&self) -> usize {
self.guard.secret_key_length()
}
#[must_use]
pub fn signature_length(&self) -> usize {
self.guard.signature_length()
}
#[must_use]
pub fn aead_overhead(&self) -> usize {
self.guard.aead_overhead()
}
#[must_use]
pub fn default_salt_length(&self) -> usize {
self.guard.default_salt_length()
}
pub fn check_shared_secret(&self, secret: &BareSharedSecret) -> VeilidAPIResult<()> {
self.guard.check_shared_secret(secret)
}
pub fn check_nonce(&self, nonce: &BareNonce) -> VeilidAPIResult<()> {
self.guard.check_nonce(nonce)
}
pub fn check_hash_digest(&self, hash: &BareHashDigest) -> VeilidAPIResult<()> {
self.guard.check_hash_digest(hash)
}
pub fn check_public_key(&self, key: &BarePublicKey) -> VeilidAPIResult<()> {
self.guard.check_public_key(key)
}
pub fn check_secret_key(&self, key: &BareSecretKey) -> VeilidAPIResult<()> {
self.guard.check_secret_key(key)
}
pub fn check_signature(&self, signature: &BareSignature) -> VeilidAPIResult<()> {
self.guard.check_signature(signature)
}
pub async fn validate_keypair(&self, key: &BarePublicKey, secret: &BareSecretKey) -> bool {
yielding(|| self.guard.validate_keypair(key, secret)).await
}
@ -165,11 +212,6 @@ impl AsyncCryptoSystemGuard<'_> {
}
// AEAD Encrypt/Decrypt
#[must_use]
pub fn aead_overhead(&self) -> usize {
self.guard.aead_overhead()
}
pub async fn decrypt_in_place_aead(
&self,
body: &mut Vec<u8>,
@ -232,7 +274,7 @@ impl AsyncCryptoSystemGuard<'_> {
body: &mut [u8],
nonce: &BareNonce,
shared_secret: &BareSharedSecret,
) {
) -> VeilidAPIResult<()> {
yielding(|| {
self.guard
.crypt_in_place_no_auth(body, nonce, shared_secret)
@ -246,7 +288,7 @@ impl AsyncCryptoSystemGuard<'_> {
out_buf: &mut [u8],
nonce: &BareNonce,
shared_secret: &BareSharedSecret,
) {
) -> VeilidAPIResult<()> {
yielding(|| {
self.guard
.crypt_b2b_no_auth(in_buf, out_buf, nonce, shared_secret)
@ -259,7 +301,7 @@ impl AsyncCryptoSystemGuard<'_> {
body: &[u8],
nonce: &BareNonce,
shared_secret: &BareSharedSecret,
) -> Vec<u8> {
) -> VeilidAPIResult<Vec<u8>> {
yielding(|| {
self.guard
.crypt_no_auth_aligned_8(body, nonce, shared_secret)
@ -272,7 +314,7 @@ impl AsyncCryptoSystemGuard<'_> {
body: &[u8],
nonce: &BareNonce,
shared_secret: &BareSharedSecret,
) -> Vec<u8> {
) -> VeilidAPIResult<Vec<u8>> {
yielding(|| {
self.guard
.crypt_no_auth_unaligned(body, nonce, shared_secret)

View file

@ -8,6 +8,7 @@ mod types;
pub mod crypto_system;
#[cfg(feature = "enable-crypto-none")]
pub(crate) mod none;
#[doc(hidden)]
pub mod tests;
#[cfg(feature = "enable-crypto-vld0")]
@ -22,11 +23,11 @@ pub(crate) use receipt::*;
pub use types::*;
#[cfg(feature = "enable-crypto-none")]
pub use none::CRYPTO_KIND_NONE;
pub use none::sizes::*;
#[cfg(feature = "enable-crypto-none")]
pub(crate) use none::*;
#[cfg(feature = "enable-crypto-vld0")]
pub use vld0::CRYPTO_KIND_VLD0;
pub use vld0::sizes::*;
#[cfg(feature = "enable-crypto-vld0")]
pub(crate) use vld0::*;
@ -70,6 +71,7 @@ pub type EnvelopeVersion = u8;
pub const VALID_ENVELOPE_VERSIONS: [EnvelopeVersion; 1] = [0u8];
/// Number of envelope versions to keep on structures if many are present beyond the ones we consider valid
pub const MAX_ENVELOPE_VERSIONS: usize = 3;
/// Return the best envelope version we support
#[must_use]
pub fn best_envelope_version() -> EnvelopeVersion {
@ -154,7 +156,7 @@ impl Crypto {
self.config().with(|c| {
for ck in VALID_CRYPTO_KINDS {
if let Some(nid) = c.network.routing_table.node_id.get(ck) {
cache_validity_key.append(&mut nid.value.bytes.to_vec());
cache_validity_key.extend_from_slice(nid.ref_value());
}
}
});
@ -270,12 +272,12 @@ impl Crypto {
let mut out = PublicKeyGroup::with_capacity(public_keys.len());
for sig in typed_signatures {
for nid in public_keys {
if nid.kind == sig.kind {
if let Some(vcrypto) = self.get(sig.kind) {
if !vcrypto.verify(&nid.value, data, &sig.value)? {
if nid.kind() == sig.kind() {
if let Some(vcrypto) = self.get(sig.kind()) {
if !vcrypto.verify(nid.ref_value(), data, sig.ref_value())? {
return Ok(None);
}
out.add(*nid);
out.add(nid.clone());
}
}
}
@ -297,8 +299,9 @@ impl Crypto {
{
let mut out = Vec::<R>::with_capacity(typed_key_pairs.len());
for kp in typed_key_pairs {
if let Some(vcrypto) = self.get(kp.kind) {
let sig = vcrypto.sign(&kp.value.key, &kp.value.secret, data)?;
if let Some(vcrypto) = self.get(kp.kind()) {
let sig =
vcrypto.sign(kp.ref_value().ref_key(), kp.ref_value().ref_secret(), data)?;
out.push(transform(kp, sig))
}
}
@ -331,13 +334,15 @@ impl Crypto {
) -> VeilidAPIResult<BareSharedSecret> {
Ok(
match self.inner.lock().dh_cache.entry(DHCacheKey {
key: *key,
secret: *secret,
key: key.clone(),
secret: secret.clone(),
}) {
Entry::Occupied(e) => e.get().shared_secret,
Entry::Occupied(e) => e.get().shared_secret.clone(),
Entry::Vacant(e) => {
let shared_secret = vcrypto.compute_dh(key, secret)?;
e.insert(DHCacheValue { shared_secret });
e.insert(DHCacheValue {
shared_secret: shared_secret.clone(),
});
shared_secret
}
},
@ -404,7 +409,7 @@ impl Crypto {
if let (Some(node_id), Some(node_id_secret)) = (node_id, node_id_secret) {
// Validate node id
if !vcrypto
.validate_keypair(&node_id.value.into(), &node_id_secret.value)
.validate_keypair(&node_id.value().into(), &node_id_secret.value())
.await
{
apibail_generic!(format!(
@ -418,8 +423,8 @@ impl Crypto {
veilid_log!(self debug "generating new node_id_{}", ck);
let kp = vcrypto.generate_keypair().await;
(
NodeId::new(ck, kp.key.into()),
SecretKey::new(ck, kp.secret),
NodeId::new(ck, kp.key().into()),
SecretKey::new(ck, kp.secret()),
)
};
veilid_log!(self info "Node Id: {}", node_id);
@ -451,8 +456,8 @@ impl Crypto {
let (node_id, node_id_secret) = {
let kp = vcrypto.generate_keypair().await;
(
NodeId::new(ck, kp.key.into()),
SecretKey::new(ck, kp.secret),
NodeId::new(ck, kp.key().into()),
SecretKey::new(ck, kp.secret()),
)
};
#[cfg(not(test))]

View file

@ -1,51 +1,72 @@
pub mod sizes;
use super::*;
use argon2::password_hash::Salt;
use data_encoding::BASE64URL_NOPAD;
use digest::rand_core::RngCore;
use digest::Digest;
const AEAD_OVERHEAD: usize = PUBLIC_KEY_LENGTH;
const NONE_AEAD_OVERHEAD: usize = NONE_PUBLIC_KEY_LENGTH;
pub const CRYPTO_KIND_NONE: CryptoKind = CryptoKind(*b"NONE");
pub use sizes::*;
pub fn none_generate_keypair() -> BareKeyPair {
let mut csprng = VeilidRng {};
let mut pub_bytes = [0u8; PUBLIC_KEY_LENGTH];
let mut sec_bytes = [0u8; SECRET_KEY_LENGTH];
let mut pub_bytes = [0u8; NONE_PUBLIC_KEY_LENGTH];
let mut sec_bytes = [0u8; NONE_SECRET_KEY_LENGTH];
csprng.fill_bytes(&mut pub_bytes);
for n in 0..PUBLIC_KEY_LENGTH {
for n in 0..NONE_PUBLIC_KEY_LENGTH {
sec_bytes[n] = !pub_bytes[n];
}
let dht_key = BarePublicKey::new(pub_bytes);
let dht_key_secret = BareSecretKey::new(sec_bytes);
let dht_key = BarePublicKey::new(&pub_bytes);
let dht_key_secret = BareSecretKey::new(&sec_bytes);
BareKeyPair::new(dht_key, dht_key_secret)
}
fn do_xor_32(a: &[u8], b: &[u8]) -> [u8; 32] {
fn do_xor_32(a: &[u8], b: &[u8]) -> VeilidAPIResult<[u8; 32]> {
if a.len() != 32 || b.len() != 32 {
apibail_generic!("wrong key length");
}
let mut out = [0u8; 32];
for n in 0..32 {
out[n] = a[n] ^ b[n];
}
out
Ok(out)
}
fn do_xor_inplace(a: &mut [u8], key: &[u8]) {
fn do_xor_inplace(a: &mut [u8], key: &[u8]) -> VeilidAPIResult<()> {
if a.len() != 32 || key.is_empty() {
apibail_generic!("wrong key length");
}
for n in 0..a.len() {
a[n] ^= key[n % key.len()];
}
Ok(())
}
fn do_xor_b2b(a: &[u8], b: &mut [u8], key: &[u8]) {
fn do_xor_b2b(a: &[u8], b: &mut [u8], key: &[u8]) -> VeilidAPIResult<()> {
if a.len() != 32 || b.len() != 32 || key.is_empty() {
apibail_generic!("wrong key length");
}
for n in 0..a.len() {
b[n] = a[n] ^ key[n % key.len()];
}
Ok(())
}
fn is_bytes_eq_32(a: &[u8], v: u8) -> bool {
fn is_bytes_eq_32(a: &[u8], v: u8) -> VeilidAPIResult<bool> {
if a.len() != 32 {
apibail_generic!("wrong key length");
}
for n in 0..32 {
if a[n] != v {
return false;
return Ok(false);
}
}
true
Ok(true)
}
/// None CryptoSystem
@ -86,9 +107,6 @@ impl CryptoSystem for CryptoSystemNONE {
random_bytes(bytes.as_mut());
bytes
}
fn default_salt_length(&self) -> u32 {
4
}
fn hash_password(&self, password: &[u8], salt: &[u8]) -> VeilidAPIResult<String> {
if salt.len() < Salt::MIN_LENGTH || salt.len() > Salt::MAX_LENGTH {
apibail_generic!("invalid salt length");
@ -106,7 +124,7 @@ impl CryptoSystem for CryptoSystemNONE {
let Ok(salt) = BASE64URL_NOPAD.decode(salt.as_bytes()) else {
apibail_generic!("invalid salt");
};
return Ok(&self.hash_password(password, &salt)? == password_hash);
Ok(self.hash_password(password, &salt)? == password_hash)
}
fn derive_shared_secret(
@ -118,33 +136,33 @@ impl CryptoSystem for CryptoSystemNONE {
apibail_generic!("invalid salt length");
}
Ok(BareSharedSecret::new(
*blake3::hash(self.hash_password(password, salt)?.as_bytes()).as_bytes(),
blake3::hash(self.hash_password(password, salt)?.as_bytes()).as_bytes(),
))
}
fn random_nonce(&self) -> BareNonce {
let mut nonce = [0u8; NONCE_LENGTH];
let mut nonce = [0u8; NONE_NONCE_LENGTH];
random_bytes(&mut nonce);
BareNonce::new(nonce)
BareNonce::new(&nonce)
}
fn random_shared_secret(&self) -> BareSharedSecret {
let mut s = [0u8; SHARED_SECRET_LENGTH];
let mut s = [0u8; NONE_SHARED_SECRET_LENGTH];
random_bytes(&mut s);
BareSharedSecret::new(s)
BareSharedSecret::new(&s)
}
fn compute_dh(
&self,
key: &BarePublicKey,
secret: &BareSecretKey,
) -> VeilidAPIResult<BareSharedSecret> {
let s = do_xor_32(&key.bytes, &secret.bytes);
Ok(BareSharedSecret::new(s))
let s = do_xor_32(key, secret)?;
Ok(BareSharedSecret::new(&s))
}
fn generate_keypair(&self) -> BareKeyPair {
none_generate_keypair()
}
fn generate_hash(&self, data: &[u8]) -> BareHashDigest {
BareHashDigest::new(*blake3::hash(data).as_bytes())
BareHashDigest::new(blake3::hash(data).as_bytes())
}
fn generate_hash_reader(
&self,
@ -152,10 +170,35 @@ impl CryptoSystem for CryptoSystemNONE {
) -> VeilidAPIResult<BarePublicKey> {
let mut hasher = blake3::Hasher::new();
std::io::copy(reader, &mut hasher).map_err(VeilidAPIError::generic)?;
Ok(BarePublicKey::new(*hasher.finalize().as_bytes()))
Ok(BarePublicKey::new(hasher.finalize().as_bytes()))
}
// Validation
fn default_salt_length(&self) -> usize {
4
}
fn shared_secret_length(&self) -> usize {
NONE_SHARED_SECRET_LENGTH
}
fn nonce_length(&self) -> usize {
NONE_NONCE_LENGTH
}
fn hash_digest_length(&self) -> usize {
NONE_HASH_DIGEST_LENGTH
}
fn aead_overhead(&self) -> usize {
NONE_AEAD_OVERHEAD
}
fn public_key_length(&self) -> usize {
NONE_PUBLIC_KEY_LENGTH
}
fn secret_key_length(&self) -> usize {
NONE_SECRET_KEY_LENGTH
}
fn signature_length(&self) -> usize {
NONE_SIGNATURE_LENGTH
}
fn validate_keypair(&self, dht_key: &BarePublicKey, dht_key_secret: &BareSecretKey) -> bool {
let data = vec![0u8; 512];
let Ok(sig) = self.sign(dht_key, dht_key_secret, &data) else {
@ -168,7 +211,7 @@ impl CryptoSystem for CryptoSystemNONE {
}
fn validate_hash(&self, data: &[u8], dht_key: &BareHashDigest) -> bool {
let bytes = *blake3::hash(data).as_bytes();
bytes == dht_key.bytes
bytes == dht_key.bytes()
}
fn validate_hash_reader(
&self,
@ -178,17 +221,17 @@ impl CryptoSystem for CryptoSystemNONE {
let mut hasher = blake3::Hasher::new();
std::io::copy(reader, &mut hasher).map_err(VeilidAPIError::generic)?;
let bytes = *hasher.finalize().as_bytes();
Ok(bytes == dht_key.bytes)
Ok(bytes == dht_key.bytes())
}
// Distance Metric
fn distance(&self, key1: &BareHashDigest, key2: &BareHashDigest) -> BareHashDistance {
let mut bytes = [0u8; HASH_DIGEST_LENGTH];
let mut bytes = [0u8; NONE_HASH_DIGEST_LENGTH];
for (n, byte) in bytes.iter_mut().enumerate() {
*byte = key1.bytes[n] ^ key2.bytes[n];
*byte = key1[n] ^ key2[n];
}
BareHashDistance::new(bytes)
BareHashDistance::new(&bytes)
}
// Authentication
@ -198,7 +241,7 @@ impl CryptoSystem for CryptoSystemNONE {
dht_key_secret: &BareSecretKey,
data: &[u8],
) -> VeilidAPIResult<BareSignature> {
if !is_bytes_eq_32(&do_xor_32(&dht_key.bytes, &dht_key_secret.bytes), 0xFFu8) {
if !is_bytes_eq_32(&do_xor_32(dht_key, dht_key_secret)?, 0xFFu8)? {
return Err(VeilidAPIError::parse_error(
"Keypair is invalid",
"invalid keys",
@ -208,14 +251,15 @@ impl CryptoSystem for CryptoSystemNONE {
let mut dig = Blake3Digest512::new();
dig.update(data);
let sig = dig.finalize();
let in_sig_bytes: [u8; SIGNATURE_LENGTH] = sig.into();
let mut sig_bytes = [0u8; SIGNATURE_LENGTH];
let in_sig_bytes: [u8; NONE_SIGNATURE_LENGTH] = sig.into();
let mut sig_bytes = [0u8; NONE_SIGNATURE_LENGTH];
sig_bytes[0..32].copy_from_slice(&in_sig_bytes[0..32]);
sig_bytes[32..64].copy_from_slice(&do_xor_32(&in_sig_bytes[32..64], &dht_key_secret.bytes));
let dht_sig = BareSignature::new(sig_bytes.into());
sig_bytes[32..64].copy_from_slice(&do_xor_32(&in_sig_bytes[32..64], dht_key_secret)?);
let dht_sig = BareSignature::new(&sig_bytes);
println!("DEBUG dht_sig: {:?}", dht_sig);
Ok(dht_sig)
}
fn verify(
&self,
dht_key: &BarePublicKey,
@ -225,27 +269,23 @@ impl CryptoSystem for CryptoSystemNONE {
let mut dig = Blake3Digest512::new();
dig.update(data);
let sig = dig.finalize();
let in_sig_bytes: [u8; SIGNATURE_LENGTH] = sig.into();
let mut verify_bytes = [0u8; SIGNATURE_LENGTH];
verify_bytes[0..32]
.copy_from_slice(&do_xor_32(&in_sig_bytes[0..32], &signature.bytes[0..32]));
let in_sig_bytes: [u8; NONE_SIGNATURE_LENGTH] = sig.into();
let mut verify_bytes = [0u8; NONE_SIGNATURE_LENGTH];
verify_bytes[0..32].copy_from_slice(&do_xor_32(&in_sig_bytes[0..32], &signature[0..32])?);
verify_bytes[32..64]
.copy_from_slice(&do_xor_32(&in_sig_bytes[32..64], &signature.bytes[32..64]));
.copy_from_slice(&do_xor_32(&in_sig_bytes[32..64], &signature[32..64])?);
if !is_bytes_eq_32(&verify_bytes[0..32], 0u8) {
if !is_bytes_eq_32(&verify_bytes[0..32], 0u8)? {
return Ok(false);
}
if !is_bytes_eq_32(&do_xor_32(&verify_bytes[32..64], &dht_key.bytes), 0xFFu8) {
if !is_bytes_eq_32(&do_xor_32(&verify_bytes[32..64], dht_key)?, 0xFFu8)? {
return Ok(false);
}
return Ok(true);
Ok(true)
}
// AEAD Encrypt/Decrypt
fn aead_overhead(&self) -> usize {
AEAD_OVERHEAD
}
fn decrypt_in_place_aead(
&self,
body: &mut Vec<u8>,
@ -253,19 +293,18 @@ impl CryptoSystem for CryptoSystemNONE {
shared_secret: &BareSharedSecret,
_associated_data: Option<&[u8]>,
) -> VeilidAPIResult<()> {
let mut blob = nonce.bytes.to_vec();
let mut blob = nonce.to_vec();
blob.extend_from_slice(&[0u8; 8]);
let blob = do_xor_32(&blob, &shared_secret.bytes);
let blob = do_xor_32(&blob, shared_secret)?;
if body.len() < AEAD_OVERHEAD {
if body.len() < NONE_AEAD_OVERHEAD {
return Err(VeilidAPIError::generic("invalid length"));
}
if &body[body.len() - AEAD_OVERHEAD..] != &blob {
if body[body.len() - NONE_AEAD_OVERHEAD..] != blob {
return Err(VeilidAPIError::generic("invalid keyblob"));
}
body.truncate(body.len() - AEAD_OVERHEAD);
do_xor_inplace(body, &blob);
Ok(())
body.truncate(body.len() - NONE_AEAD_OVERHEAD);
do_xor_inplace(body, &blob)
}
fn decrypt_aead(
@ -289,10 +328,10 @@ impl CryptoSystem for CryptoSystemNONE {
shared_secret: &BareSharedSecret,
_associated_data: Option<&[u8]>,
) -> VeilidAPIResult<()> {
let mut blob = nonce.bytes.to_vec();
let mut blob = nonce.to_vec();
blob.extend_from_slice(&[0u8; 8]);
let blob = do_xor_32(&blob, &shared_secret.bytes);
do_xor_inplace(body, &blob);
let blob = do_xor_32(&blob, shared_secret)?;
do_xor_inplace(body, &blob)?;
body.append(&mut blob.to_vec());
Ok(())
}
@ -317,11 +356,11 @@ impl CryptoSystem for CryptoSystemNONE {
body: &mut [u8],
nonce: &BareNonce,
shared_secret: &BareSharedSecret,
) {
let mut blob = nonce.bytes.to_vec();
) -> VeilidAPIResult<()> {
let mut blob = nonce.to_vec();
blob.extend_from_slice(&[0u8; 8]);
let blob = do_xor_32(&blob, &shared_secret.bytes);
do_xor_inplace(body, &blob);
let blob = do_xor_32(&blob, shared_secret)?;
do_xor_inplace(body, &blob)
}
fn crypt_b2b_no_auth(
@ -330,11 +369,11 @@ impl CryptoSystem for CryptoSystemNONE {
out_buf: &mut [u8],
nonce: &BareNonce,
shared_secret: &BareSharedSecret,
) {
let mut blob = nonce.bytes.to_vec();
) -> VeilidAPIResult<()> {
let mut blob = nonce.to_vec();
blob.extend_from_slice(&[0u8; 8]);
let blob = do_xor_32(&blob, &shared_secret.bytes);
do_xor_b2b(in_buf, out_buf, &blob);
let blob = do_xor_32(&blob, shared_secret)?;
do_xor_b2b(in_buf, out_buf, &blob)
}
fn crypt_no_auth_aligned_8(
@ -342,10 +381,10 @@ impl CryptoSystem for CryptoSystemNONE {
in_buf: &[u8],
nonce: &BareNonce,
shared_secret: &BareSharedSecret,
) -> Vec<u8> {
) -> VeilidAPIResult<Vec<u8>> {
let mut out_buf = unsafe { aligned_8_u8_vec_uninit(in_buf.len()) };
self.crypt_b2b_no_auth(in_buf, &mut out_buf, nonce, shared_secret);
out_buf
self.crypt_b2b_no_auth(in_buf, &mut out_buf, nonce, shared_secret)?;
Ok(out_buf)
}
fn crypt_no_auth_unaligned(
@ -353,9 +392,9 @@ impl CryptoSystem for CryptoSystemNONE {
in_buf: &[u8],
nonce: &BareNonce,
shared_secret: &BareSharedSecret,
) -> Vec<u8> {
) -> VeilidAPIResult<Vec<u8>> {
let mut out_buf = unsafe { unaligned_u8_vec_uninit(in_buf.len()) };
self.crypt_b2b_no_auth(in_buf, &mut out_buf, nonce, shared_secret);
out_buf
self.crypt_b2b_no_auth(in_buf, &mut out_buf, nonce, shared_secret)?;
Ok(out_buf)
}
}

View file

@ -0,0 +1,12 @@
/// Length of a crypto key in bytes
pub const NONE_PUBLIC_KEY_LENGTH: usize = 32;
/// Length of a secret key in bytes
pub const NONE_SECRET_KEY_LENGTH: usize = 32;
/// Length of a signature in bytes
pub const NONE_SIGNATURE_LENGTH: usize = 64;
/// Length of a nonce in bytes
pub const NONE_NONCE_LENGTH: usize = 24;
/// Length of a hash digest in bytes
pub const NONE_HASH_DIGEST_LENGTH: usize = 32;
/// Length of a shared secret in bytes
pub const NONE_SHARED_SECRET_LENGTH: usize = 32;

View file

@ -125,7 +125,11 @@ impl Receipt {
// Validate signature
if !vcrypto
.verify(&sender_id.into(), &data[0..(data.len() - 64)], &signature)
.verify(
&sender_id.clone().into(),
&data[0..(data.len() - 64)],
&signature,
)
.map_err(VeilidAPIError::generic)?
{
apibail_parse_error!("signature failure in receipt", signature);
@ -178,9 +182,9 @@ impl Receipt {
// Write size
data[0x08..0x0A].copy_from_slice(&(receipt_size as u16).to_le_bytes());
// Write nonce
data[0x0A..0x22].copy_from_slice(&self.nonce.bytes);
data[0x0A..0x22].copy_from_slice(&self.nonce);
// Write sender node id
data[0x22..0x42].copy_from_slice(&self.sender_id.bytes);
data[0x22..0x42].copy_from_slice(&self.sender_id);
// Write extra data
if !self.extra_data.is_empty() {
data[0x42..(receipt_size - 64)].copy_from_slice(self.extra_data.as_slice());
@ -188,13 +192,13 @@ impl Receipt {
// Sign the receipt
let signature = vcrypto
.sign(
&self.sender_id.into(),
&self.sender_id.clone().into(),
secret,
&data[0..(receipt_size - 64)],
)
.map_err(VeilidAPIError::generic)?;
// Append the signature
data[(receipt_size - 64)..].copy_from_slice(&signature.bytes);
data[(receipt_size - 64)..].copy_from_slice(&signature);
Ok(data)
}
@ -210,17 +214,17 @@ impl Receipt {
}
pub fn get_nonce(&self) -> BareNonce {
self.nonce
self.nonce.clone()
}
#[expect(dead_code)]
pub fn get_sender_id(&self) -> BareNodeId {
self.sender_id
pub fn get_bare_sender_id(&self) -> BareNodeId {
self.sender_id.clone()
}
#[expect(dead_code)]
pub fn get_sender_typed_id(&self) -> NodeId {
NodeId::new(self.crypto_kind, self.sender_id)
pub fn get_sender_id(&self) -> NodeId {
NodeId::new(self.crypto_kind, self.sender_id.clone())
}
#[must_use]

View file

@ -118,7 +118,10 @@ pub async fn test_no_auth(vcrypto: &AsyncCryptoSystemGuard<'_>) {
let mut body = LOREM_IPSUM.to_vec();
let body2 = body.clone();
let size_before_encrypt = body.len();
vcrypto.crypt_in_place_no_auth(&mut body, &n1, &ss1).await;
vcrypto
.crypt_in_place_no_auth(&mut body, &n1, &ss1)
.await
.expect("should succeed");
let size_after_encrypt = body.len();
assert_eq!(
@ -128,32 +131,51 @@ pub async fn test_no_auth(vcrypto: &AsyncCryptoSystemGuard<'_>) {
let mut body3 = body.clone();
let mut body4 = body.clone();
vcrypto.crypt_in_place_no_auth(&mut body, &n1, &ss1).await;
vcrypto
.crypt_in_place_no_auth(&mut body, &n1, &ss1)
.await
.expect("should succeed");
assert_eq!(body, body2, "result after decrypt should be the same");
vcrypto.crypt_in_place_no_auth(&mut body3, &n2, &ss1).await;
vcrypto
.crypt_in_place_no_auth(&mut body3, &n2, &ss1)
.await
.expect("should succeed");
assert_ne!(body3, body, "decrypt should not be equal with wrong nonce");
vcrypto.crypt_in_place_no_auth(&mut body4, &n1, &ss2).await;
vcrypto
.crypt_in_place_no_auth(&mut body4, &n1, &ss2)
.await
.expect("should succeed");
assert_ne!(body4, body, "decrypt should not be equal with wrong secret");
let body5 = vcrypto
.crypt_no_auth_unaligned(LOREM_IPSUM, &n1, &ss1)
.await;
let body6 = vcrypto.crypt_no_auth_unaligned(&body5, &n1, &ss1).await;
.await
.unwrap();
let body6 = vcrypto
.crypt_no_auth_unaligned(&body5, &n1, &ss1)
.await
.unwrap();
let body7 = vcrypto
.crypt_no_auth_unaligned(LOREM_IPSUM, &n1, &ss1)
.await;
.await
.unwrap();
assert_eq!(body6, LOREM_IPSUM);
assert_eq!(body5, body7);
let body5 = vcrypto
.crypt_no_auth_aligned_8(LOREM_IPSUM, &n1, &ss1)
.await;
let body6 = vcrypto.crypt_no_auth_aligned_8(&body5, &n1, &ss1).await;
.await
.unwrap();
let body6 = vcrypto
.crypt_no_auth_aligned_8(&body5, &n1, &ss1)
.await
.unwrap();
let body7 = vcrypto
.crypt_no_auth_aligned_8(LOREM_IPSUM, &n1, &ss1)
.await;
.await
.unwrap();
assert_eq!(body6, LOREM_IPSUM);
assert_eq!(body5, body7);
}

View file

@ -3,8 +3,8 @@ use core::convert::TryFrom;
static LOREM_IPSUM:&str = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. ";
static CHEEZBURGER: &str = "I can has cheezburger";
static EMPTY_KEY: [u8; PUBLIC_KEY_LENGTH] = [0u8; PUBLIC_KEY_LENGTH];
static EMPTY_KEY_SECRET: [u8; SECRET_KEY_LENGTH] = [0u8; SECRET_KEY_LENGTH];
static EMPTY_KEY: [u8; VLD0_PUBLIC_KEY_LENGTH] = [0u8; VLD0_PUBLIC_KEY_LENGTH];
static EMPTY_KEY_SECRET: [u8; VLD0_SECRET_KEY_LENGTH] = [0u8; VLD0_SECRET_KEY_LENGTH];
pub async fn test_generate_secret(vcrypto: &AsyncCryptoSystemGuard<'_>) {
// Verify keys generate
@ -12,8 +12,8 @@ pub async fn test_generate_secret(vcrypto: &AsyncCryptoSystemGuard<'_>) {
let (dht_key2, dht_key_secret2) = vcrypto.generate_keypair().await.into_split();
// Verify byte patterns are different between public and secret
assert_ne!(dht_key.bytes, dht_key_secret.bytes);
assert_ne!(dht_key2.bytes, dht_key_secret2.bytes);
assert_ne!(dht_key.bytes(), dht_key_secret.bytes());
assert_ne!(dht_key2.bytes(), dht_key_secret2.bytes());
// Verify the keys and secrets are different across keypairs
assert_ne!(dht_key, dht_key2);
@ -142,8 +142,8 @@ pub async fn test_sign_and_verify(vcrypto: &AsyncCryptoSystemGuard<'_>) {
pub async fn test_key_conversions(vcrypto: &AsyncCryptoSystemGuard<'_>) {
// Test default key
let (dht_key, dht_key_secret) = (BarePublicKey::default(), BareSecretKey::default());
assert_eq!(dht_key.bytes, EMPTY_KEY);
assert_eq!(dht_key_secret.bytes, EMPTY_KEY_SECRET);
assert!(dht_key.bytes().is_empty());
assert!(dht_key_secret.bytes().is_empty());
let dht_key_string = String::from(&dht_key);
trace!("dht_key_string: {:?}", dht_key_string);
let dht_key_string2 = String::from(&dht_key);
@ -193,28 +193,18 @@ pub async fn test_key_conversions(vcrypto: &AsyncCryptoSystemGuard<'_>) {
// Assert string roundtrip
assert_eq!(String::from(&dht_key2_back), dht_key2_string);
// These conversions should fail
assert!(BarePublicKey::try_from("whatever").is_err());
assert!(BareSecretKey::try_from("whatever").is_err());
assert!(BarePublicKey::try_from("").is_err());
assert!(BareSecretKey::try_from("").is_err());
assert!(BarePublicKey::try_from("whatever!").is_err());
assert!(BareSecretKey::try_from("whatever!").is_err());
assert!(BarePublicKey::try_from(" ").is_err());
assert!(BareSecretKey::try_from(" ").is_err());
assert!(BarePublicKey::try_from(
"qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq"
)
.is_err());
assert!(BareSecretKey::try_from(
"qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq"
)
.is_err());
}
pub async fn test_encode_decode(vcrypto: &AsyncCryptoSystemGuard<'_>) {
let dht_key = BarePublicKey::try_decode("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA").unwrap();
let dht_key_secret =
BareSecretKey::try_decode("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA").unwrap();
let dht_key_b = BarePublicKey::new(EMPTY_KEY);
let dht_key_secret_b = BareSecretKey::new(EMPTY_KEY_SECRET);
let dht_key_b = BarePublicKey::new(&EMPTY_KEY);
let dht_key_secret_b = BareSecretKey::new(&EMPTY_KEY_SECRET);
assert_eq!(dht_key, dht_key_b);
assert_eq!(dht_key_secret, dht_key_secret_b);
@ -247,7 +237,7 @@ pub async fn test_encode_decode(vcrypto: &AsyncCryptoSystemGuard<'_>) {
assert_eq!(dht_key_secret2, d2s);
// Failures
let f1 = BareSecretKey::try_decode("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA");
let f1 = BareSecretKey::try_decode("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA!");
assert!(f1.is_err());
let f2 = BareSecretKey::try_decode("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA&");
assert!(f2.is_err());
@ -259,6 +249,7 @@ pub fn test_typed_convert(vcrypto: &AsyncCryptoSystemGuard<'_>) {
vcrypto.kind()
);
let tk1 = PublicKey::from_str(&tks1).expect("failed");
assert!(vcrypto.check_public_key(tk1.ref_value()).is_ok());
let tks1x = tk1.to_string();
assert_eq!(tks1, tks1x);
@ -266,29 +257,35 @@ pub fn test_typed_convert(vcrypto: &AsyncCryptoSystemGuard<'_>) {
"{}:7lxDEabK_qgjbe38RtBa3IZLrud84P6NhGP-pRTZzd",
vcrypto.kind()
);
let _tk2 = PublicKey::from_str(&tks2).expect_err("succeeded when it shouldnt have");
let _tk2 = PublicKey::from_str(&tks2).expect_err("should fail");
let tks3 = "XXXX:7lxDEabK_qgjbe38RtBa3IZLrud84P6NhGP-pRTZzdQ".to_string();
let tks3 = format!(
"{}:7lxDEabK_qgjbe38RtBa3IZLrud84P6NhGP-pRTZ",
vcrypto.kind()
);
let tk3 = PublicKey::from_str(&tks3).expect("failed");
let tks3x = tk3.to_string();
assert_eq!(tks3, tks3x);
assert!(vcrypto.check_public_key(tk3.ref_value()).is_err());
let tks4 = "XXXX:7lxDEabK_qgjbe38RtBa3IZLrud84P6NhGP-pRTZzd".to_string();
let _tk4 = PublicKey::from_str(&tks4).expect_err("succeeded when it shouldnt have");
let tks4 = "XXXX:7lxDEabK_qgjbe38RtBa3IZLrud84P6NhGP-pRTZzdQ".to_string();
let tk4 = PublicKey::from_str(&tks4).expect("failed");
let tks4x = tk4.to_string();
assert_eq!(tks4, tks4x);
// Enable this when we switch crypto to using typed keys too
//assert!(vcrypto.check_public_key(&tk4).is_err());
let tks5 = "XXX:7lxDEabK_qgjbe38RtBa3IZLrud84P6NhGP-pRTZzdQ".to_string();
let _tk5 = PublicKey::from_str(&tks5).expect_err("succeeded when it shouldnt have");
let _tk5 = PublicKey::from_str(&tks5).expect_err("should fail");
let tks6 = "7lxDEabK_qgjbe38RtBa3IZLrud84P6NhGP-pRTZzdQ".to_string();
let tk6 = PublicKey::from_str(&tks6).expect("failed");
let tks6x = tk6.to_string();
assert!(tks6x.ends_with(&tks6));
let b = Vec::from(tk6);
let b = Vec::from(tk6.clone());
let tk7 = PublicKey::try_from(b).expect("should succeed");
assert_eq!(tk7, tk6);
let b = Vec::from(tk6);
let b = Vec::from(tk6.clone());
let tk8 = PublicKey::try_from(b.as_slice()).expect("should succeed");
assert_eq!(tk8, tk6);
}
@ -303,12 +300,12 @@ async fn test_hash(vcrypto: &AsyncCryptoSystemGuard<'_>) {
let k5 = vcrypto.generate_hash(LOREM_IPSUM.as_bytes()).await;
let k6 = vcrypto.generate_hash(CHEEZBURGER.as_bytes()).await;
s.insert(k1);
s.insert(k2);
s.insert(k3);
s.insert(k4);
s.insert(k5);
s.insert(k6);
s.insert(k1.clone());
s.insert(k2.clone());
s.insert(k3.clone());
s.insert(k4.clone());
s.insert(k5.clone());
s.insert(k6.clone());
assert_eq!(s.len(), 6);
let v1 = vcrypto.generate_hash("abc".as_bytes()).await;
@ -390,23 +387,23 @@ async fn test_operations(vcrypto: &AsyncCryptoSystemGuard<'_>) {
}
pub fn test_public_key_ordering() {
let k1 = BarePublicKey::new([
let k1 = BarePublicKey::new(&[
128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
]);
let k2 = BarePublicKey::new([
let k2 = BarePublicKey::new(&[
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
]);
let k3 = BarePublicKey::new([
let k3 = BarePublicKey::new(&[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 128,
]);
let k4 = BarePublicKey::new([
let k4 = BarePublicKey::new(&[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1,
]);
let k5 = BarePublicKey::new([
let k5 = BarePublicKey::new(&[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0,
]);

View file

@ -1,71 +1,21 @@
use super::*;
use core::cmp::{Eq, Ord, PartialEq, PartialOrd};
use core::convert::{TryFrom, TryInto};
use core::convert::TryFrom;
use core::fmt;
use core::hash::Hash;
use bytes::{Bytes, BytesMut};
use data_encoding::BASE64URL_NOPAD;
//////////////////////////////////////////////////////////////////////
/// Length of a crypto key in bytes
#[allow(dead_code)]
pub const CRYPTO_KEY_LENGTH: usize = 32;
/// Length of a crypto key in bytes after encoding to base64url
#[allow(dead_code)]
pub const CRYPTO_KEY_LENGTH_ENCODED: usize = 43;
/// Length of a crypto key in bytes
#[allow(dead_code)]
pub const PUBLIC_KEY_LENGTH: usize = CRYPTO_KEY_LENGTH;
/// Length of a crypto key in bytes after encoding to base64url
#[allow(dead_code)]
pub const PUBLIC_KEY_LENGTH_ENCODED: usize = CRYPTO_KEY_LENGTH_ENCODED;
/// Length of a secret key in bytes
#[allow(dead_code)]
pub const SECRET_KEY_LENGTH: usize = CRYPTO_KEY_LENGTH;
/// Length of a secret key in bytes after encoding to base64url
#[allow(dead_code)]
pub const SECRET_KEY_LENGTH_ENCODED: usize = CRYPTO_KEY_LENGTH_ENCODED;
/// Length of a signature in bytes
#[allow(dead_code)]
pub const SIGNATURE_LENGTH: usize = 64;
/// Length of a signature in bytes after encoding to base64url
#[allow(dead_code)]
pub const SIGNATURE_LENGTH_ENCODED: usize = 86;
/// Length of a nonce in bytes
#[allow(dead_code)]
pub const NONCE_LENGTH: usize = 24;
/// Length of a nonce in bytes after encoding to base64url
#[allow(dead_code)]
pub const NONCE_LENGTH_ENCODED: usize = 32;
/// Length of a hash digest in bytes
#[allow(dead_code)]
pub const HASH_DIGEST_LENGTH: usize = CRYPTO_KEY_LENGTH;
/// Length of a hash digest in bytes after encoding to base64url
#[allow(dead_code)]
pub const HASH_DIGEST_LENGTH_ENCODED: usize = CRYPTO_KEY_LENGTH_ENCODED;
/// Length of a shared secret in bytes
#[allow(dead_code)]
pub const SHARED_SECRET_LENGTH: usize = HASH_DIGEST_LENGTH;
/// Length of a shared secret in bytes after encoding to base64url
#[allow(dead_code)]
pub const SHARED_SECRET_LENGTH_ENCODED: usize = HASH_DIGEST_LENGTH_ENCODED;
/// Length of a route id in bytes
#[allow(dead_code)]
pub const ROUTE_ID_LENGTH: usize = HASH_DIGEST_LENGTH;
/// Length of a route id in bytes after encoding to base64url
#[allow(dead_code)]
pub const ROUTE_ID_LENGTH_ENCODED: usize = HASH_DIGEST_LENGTH_ENCODED;
//////////////////////////////////////////////////////////////////////
pub trait Encodable
where
Self: Sized,
{
fn encode(&self) -> String;
fn encoded_len() -> usize;
fn encoded_len(&self) -> usize;
fn try_decode<S: AsRef<str>>(input: S) -> VeilidAPIResult<Self> {
let b = input.as_ref().as_bytes();
Self::try_decode_bytes(b)
@ -73,11 +23,25 @@ where
fn try_decode_bytes(b: &[u8]) -> VeilidAPIResult<Self>;
}
//////////////////////////////////////////////////////////////////////
pub trait ByteArrayType
where
Self: Sized,
{
fn len(&self) -> usize;
fn is_empty(&self) -> bool;
fn bytes(&self) -> &[u8];
fn bit(&self, index: usize) -> bool;
fn first_nonzero_bit(&self) -> Option<usize>;
fn nibble(&self, index: usize) -> u8;
fn first_nonzero_nibble(&self) -> Option<(usize, u8)>;
}
//////////////////////////////////////////////////////////////////////
macro_rules! byte_array_type {
($name:ident, $size:expr, $encoded_size:expr) => {
#[derive(Clone, Copy, Hash, PartialOrd, Ord, PartialEq, Eq)]
($name:ident) => {
#[derive(Clone, Hash, PartialOrd, Ord, PartialEq, Eq)]
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))]
#[cfg_attr(
all(target_arch = "wasm32", target_os = "unknown"),
@ -90,7 +54,110 @@ macro_rules! byte_array_type {
all(target_arch = "wasm32", target_os = "unknown"),
tsify(type = "string")
)]
pub bytes: [u8; $size],
bytes: Bytes,
}
impl $name {
pub fn new(data: &[u8]) -> Self {
Self {
bytes: Bytes::copy_from_slice(data),
}
}
fn new_from_bytes(bytes: Bytes) -> Self {
Self { bytes }
}
}
impl Default for $name {
fn default() -> Self {
Self {
bytes: Bytes::new(),
}
}
}
impl ByteArrayType for $name {
fn len(&self) -> usize {
self.bytes.len()
}
fn is_empty(&self) -> bool {
self.bytes.is_empty()
}
fn bytes(&self) -> &[u8] {
&self.bytes
}
// Big endian bit ordering
#[must_use]
fn bit(&self, index: usize) -> bool {
let bi = index / 8;
let ti = 7 - (index % 8);
((self.bytes[bi] >> ti) & 1) != 0
}
#[must_use]
fn first_nonzero_bit(&self) -> Option<usize> {
for i in 0..self.bytes.len() {
let b = self.bytes[i];
if b != 0 {
for n in 0..8 {
if ((b >> (7 - n)) & 1u8) != 0u8 {
return Some((i * 8) + n);
}
}
unreachable!("nonzero byte must have nonzero bit");
}
}
None
}
// Big endian nibble ordering
#[must_use]
fn nibble(&self, index: usize) -> u8 {
let bi = index / 2;
if index & 1 == 0 {
(self.bytes[bi] >> 4) & 0xFu8
} else {
self.bytes[bi] & 0xFu8
}
}
#[must_use]
fn first_nonzero_nibble(&self) -> Option<(usize, u8)> {
for i in 0..(self.bytes.len() * 2) {
let n = self.nibble(i);
if n != 0 {
return Some((i, n));
}
}
None
}
}
impl Encodable for $name {
fn encode(&self) -> String {
BASE64URL_NOPAD.encode(&self.bytes)
}
fn encoded_len(&self) -> usize {
BASE64URL_NOPAD.encode_len(self.bytes.len())
}
fn try_decode_bytes(b: &[u8]) -> VeilidAPIResult<Self> {
if b.len() == 0 {
return Ok(Self::default());
}
let decode_len = BASE64URL_NOPAD
.decode_len(b.len())
.map_err(|_| VeilidAPIError::generic("failed to get decode length"))?;
let mut bytes = BytesMut::zeroed(decode_len);
let bytes_len = BASE64URL_NOPAD
.decode_mut(b, &mut bytes)
.map_err(|_| VeilidAPIError::generic("failed to decode"))?;
bytes.truncate(bytes_len);
Ok(Self::new_from_bytes(bytes.freeze()))
}
}
impl core::ops::Deref for $name {
type Target = [u8];
fn deref(&self) -> &Self::Target {
&self.bytes
}
}
impl serde::Serialize for $name {
@ -109,103 +176,10 @@ macro_rules! byte_array_type {
D: serde::Deserializer<'de>,
{
let s = <String as serde::Deserialize>::deserialize(deserializer)?;
if s == "" {
return Ok($name::default());
}
$name::try_decode(s.as_str()).map_err(serde::de::Error::custom)
Self::try_decode(s.as_str()).map_err(serde::de::Error::custom)
}
}
impl Default for $name {
fn default() -> Self {
Self {
bytes: [0u8; $size],
}
}
}
impl $name {
pub fn new(bytes: [u8; $size]) -> Self {
Self { bytes }
}
// Big endian bit ordering
#[must_use]
pub fn bit(&self, index: usize) -> bool {
assert!(index < ($size * 8));
let bi = index / 8;
let ti = 7 - (index % 8);
((self.bytes[bi] >> ti) & 1) != 0
}
#[must_use]
pub fn first_nonzero_bit(&self) -> Option<usize> {
for i in 0..$size {
let b = self.bytes[i];
if b != 0 {
for n in 0..8 {
if ((b >> (7 - n)) & 1u8) != 0u8 {
return Some((i * 8) + n);
}
}
panic!("wtf")
}
}
None
}
// Big endian nibble ordering
#[must_use]
pub fn nibble(&self, index: usize) -> u8 {
assert!(index < ($size * 2));
let bi = index / 2;
if index & 1 == 0 {
(self.bytes[bi] >> 4) & 0xFu8
} else {
self.bytes[bi] & 0xFu8
}
}
#[must_use]
pub fn first_nonzero_nibble(&self) -> Option<(usize, u8)> {
for i in 0..($size * 2) {
let n = self.nibble(i);
if n != 0 {
return Some((i, n));
}
}
None
}
}
impl Encodable for $name {
fn encode(&self) -> String {
BASE64URL_NOPAD.encode(&self.bytes)
}
fn encoded_len() -> usize {
$encoded_size
}
fn try_decode_bytes(b: &[u8]) -> VeilidAPIResult<Self> {
let mut bytes = [0u8; $size];
let res = BASE64URL_NOPAD.decode_len(b.len());
match res {
Ok(v) => {
if v != $size {
apibail_generic!("Incorrect length in decode");
}
}
Err(_) => {
apibail_generic!("Failed to decode");
}
}
let res = BASE64URL_NOPAD.decode_mut(b, &mut bytes);
match res {
Ok(_) => Ok(Self::new(bytes)),
Err(_) => apibail_generic!("Failed to decode"),
}
}
}
impl fmt::Display for $name {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.encode())
@ -237,7 +211,7 @@ macro_rules! byte_array_type {
impl TryFrom<String> for $name {
type Error = VeilidAPIError;
fn try_from(value: String) -> Result<Self, Self::Error> {
$name::try_from(value.as_str())
Self::try_from(value.as_str())
}
}
@ -247,43 +221,30 @@ macro_rules! byte_array_type {
Self::try_decode(value)
}
}
impl TryFrom<&[u8]> for $name {
type Error = VeilidAPIError;
fn try_from(v: &[u8]) -> Result<Self, Self::Error> {
let vl = v.len();
Ok(Self {
bytes: v.try_into().map_err(|_| {
VeilidAPIError::generic(format!(
"Expected a slice of length {} but it was {}",
$size, vl
))
})?,
})
}
}
impl TryFrom<Vec<u8>> for $name {
type Error = VeilidAPIError;
fn try_from(v: Vec<u8>) -> Result<Self, Self::Error> {
let vl = v.len();
Ok(Self {
bytes: v.try_into().map_err(|_| {
VeilidAPIError::generic(format!(
"Expected a vec of length {} but it was {}",
$size, vl
))
})?,
})
}
}
impl From<$name> for Vec<u8> {
fn from(value: $name) -> Self {
value.bytes.to_vec()
value.bytes().to_vec()
}
}
impl AsRef<[u8]> for $name {
fn as_ref(&self) -> &[u8] {
&self.bytes
self.bytes()
}
}
impl From<&[u8]> for $name {
fn from(v: &[u8]) -> Self {
Self {
bytes: Bytes::copy_from_slice(v),
}
}
}
impl From<Vec<u8>> for $name {
fn from(v: Vec<u8>) -> Self {
Self {
bytes: Bytes::from(v),
}
}
}
};
@ -291,80 +252,53 @@ macro_rules! byte_array_type {
/////////////////////////////////////////
byte_array_type!(BarePublicKey, PUBLIC_KEY_LENGTH, PUBLIC_KEY_LENGTH_ENCODED);
byte_array_type!(BareSecretKey, SECRET_KEY_LENGTH, SECRET_KEY_LENGTH_ENCODED);
byte_array_type!(BareSignature, SIGNATURE_LENGTH, SIGNATURE_LENGTH_ENCODED);
byte_array_type!(BareNonce, NONCE_LENGTH, NONCE_LENGTH_ENCODED);
byte_array_type!(BarePublicKey);
byte_array_type!(BareSecretKey);
byte_array_type!(BareSignature);
byte_array_type!(BareNonce);
/*
Notes:
- These are actually BareHashDigest types, but not interchangable:
- BareRouteId (eventually will be a BareRecordKey type with DHT Routes)
- BareRecordKey
- BareSharedSecret
- BareRecordKey
- BareRouteId (eventually will be a BareRecordKey type with DHT Routes)
- BareNodeId (constructible from BarePublicKey)
- BareMemberId (constructible from BarePublicKey)
*/
// BareHashDigest sub-types
byte_array_type!(
BareHashDigest,
HASH_DIGEST_LENGTH,
HASH_DIGEST_LENGTH_ENCODED
);
byte_array_type!(
BareSharedSecret,
SHARED_SECRET_LENGTH,
SHARED_SECRET_LENGTH_ENCODED
);
byte_array_type!(BareRouteId, ROUTE_ID_LENGTH, ROUTE_ID_LENGTH_ENCODED);
byte_array_type!(
BareRecordKey,
HASH_DIGEST_LENGTH,
HASH_DIGEST_LENGTH_ENCODED
);
byte_array_type!(
BareHashDistance,
HASH_DIGEST_LENGTH,
HASH_DIGEST_LENGTH_ENCODED
);
// BareNodeId is currently the same as BarePublicKey, but will eventually be a sub-type of BareHashDigest.
byte_array_type!(BareNodeId, PUBLIC_KEY_LENGTH, PUBLIC_KEY_LENGTH_ENCODED);
#[expect(dead_code)]
trait HashCoordinate {
fn from_hash_coordinate(hash_digest: BareHashDigest) -> Self;
fn to_hash_coordinate(&self) -> BareHashDigest;
}
byte_array_type!(BareHashDigest);
byte_array_type!(BareSharedSecret);
byte_array_type!(BareRecordKey);
byte_array_type!(BareHashDistance);
byte_array_type!(BareRouteId);
byte_array_type!(BareNodeId);
byte_array_type!(BareMemberId);
// Temporary adapters for converting to/from BareHashDigest types
// Removing these will show where there's still issues.
impl From<BareHashDigest> for BareSharedSecret {
fn from(value: BareHashDigest) -> Self {
Self::new(value.bytes)
Self::new(value.bytes())
}
}
impl From<BareHashDigest> for BareRecordKey {
fn from(value: BareHashDigest) -> Self {
Self::new(value.bytes)
Self::new(value.bytes())
}
}
impl From<BareRecordKey> for BareHashDigest {
fn from(value: BareRecordKey) -> Self {
Self::new(value.bytes)
Self::new(value.bytes())
}
}
impl From<BareNodeId> for BareHashDigest {
fn from(value: BareNodeId) -> Self {
Self::new(value.bytes)
}
}
impl From<BareHashDigest> for BarePublicKey {
fn from(value: BareHashDigest) -> Self {
Self::new(value.bytes)
Self::new(value.bytes())
}
}
@ -374,12 +308,12 @@ impl From<BareHashDigest> for BarePublicKey {
*/
impl From<BarePublicKey> for BareNodeId {
fn from(value: BarePublicKey) -> Self {
Self::new(value.bytes)
Self::new(value.bytes())
}
}
impl From<BareNodeId> for BarePublicKey {
fn from(value: BareNodeId) -> Self {
Self::new(value.bytes)
Self::new(value.bytes())
}
}

View file

@ -1,13 +1,13 @@
use super::*;
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
#[must_use]
pub struct CryptoTyped<K>
where
K: Clone + Copy + fmt::Debug + PartialEq + Eq + Hash,
K: Clone + fmt::Debug + PartialEq + Eq + Hash,
{
pub kind: CryptoKind,
pub value: K,
kind: CryptoKind,
value: K,
}
cfg_if::cfg_if! {
@ -21,15 +21,29 @@ export type CryptoTyped<TCryptoKey extends string> = `${CryptoKind}:${TCryptoKey
impl<K> CryptoTyped<K>
where
K: Clone + Copy + fmt::Debug + PartialEq + Eq + Hash,
K: Clone + fmt::Debug + PartialEq + Eq + Hash,
{
pub fn new(kind: CryptoKind, value: K) -> Self {
Self { kind, value }
}
pub fn kind(&self) -> CryptoKind {
self.kind
}
pub fn value(&self) -> K {
self.value.clone()
}
pub fn ref_value(&self) -> &K {
&self.value
}
pub fn into_value(self) -> K {
self.value
}
}
impl<K> PartialOrd for CryptoTyped<K>
where
K: Clone + Copy + fmt::Debug + PartialEq + Eq + Hash,
K: Clone + fmt::Debug + PartialEq + Eq + Hash,
K: Ord + PartialOrd,
{
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
@ -39,7 +53,7 @@ where
impl<K> Ord for CryptoTyped<K>
where
K: Clone + Copy + fmt::Debug + PartialEq + Eq + Hash,
K: Clone + fmt::Debug + PartialEq + Eq + Hash,
K: Ord + PartialOrd,
{
fn cmp(&self, other: &Self) -> cmp::Ordering {
@ -53,7 +67,7 @@ where
impl<K> fmt::Display for CryptoTyped<K>
where
K: Clone + Copy + fmt::Debug + PartialEq + Eq + Hash,
K: Clone + fmt::Debug + PartialEq + Eq + Hash,
K: fmt::Display,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
@ -63,29 +77,27 @@ where
impl<K> FromStr for CryptoTyped<K>
where
K: Clone + Copy + fmt::Debug + PartialEq + Eq + Hash,
K: Clone + fmt::Debug + PartialEq + Eq + Hash,
K: Encodable,
{
type Err = VeilidAPIError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let b = s.as_bytes();
if b.len() == (5 + K::encoded_len()) && b[4..5] == b":"[..] {
if b.len() > 5 && b[4..5] == b":"[..] {
let kind: CryptoKind = b[0..4].try_into().expect("should not fail to convert");
let value = K::try_decode_bytes(&b[5..])?;
Ok(Self { kind, value })
} else if b.len() == K::encoded_len() {
} else {
let kind = best_crypto_kind();
let value = K::try_decode_bytes(b)?;
Ok(Self { kind, value })
} else {
apibail_generic!("invalid cryptotyped format");
}
}
}
impl<K> TryFrom<String> for CryptoTyped<K>
where
K: Clone + Copy + fmt::Debug + PartialEq + Eq + Hash,
K: Clone + fmt::Debug + PartialEq + Eq + Hash,
K: Encodable,
{
type Error = VeilidAPIError;
@ -97,7 +109,7 @@ where
impl<K> TryFrom<&str> for CryptoTyped<K>
where
K: Clone + Copy + fmt::Debug + PartialEq + Eq + Hash,
K: Clone + fmt::Debug + PartialEq + Eq + Hash,
K: Encodable,
{
type Error = VeilidAPIError;
@ -109,8 +121,8 @@ where
impl<'a, K> TryFrom<&'a [u8]> for CryptoTyped<K>
where
K: Clone + Copy + fmt::Debug + PartialEq + Eq + Hash,
K: TryFrom<&'a [u8], Error = VeilidAPIError>,
K: Clone + fmt::Debug + PartialEq + Eq + Hash,
K: From<&'a [u8]>,
{
type Error = VeilidAPIError;
@ -119,15 +131,15 @@ where
apibail_generic!("invalid cryptotyped format");
}
let kind: CryptoKind = b[0..4].try_into()?;
let value: K = b[4..].try_into()?;
let value: K = b[4..].into();
Ok(Self { kind, value })
}
}
impl<K> TryFrom<Vec<u8>> for CryptoTyped<K>
where
K: Clone + Copy + fmt::Debug + PartialEq + Eq + Hash,
K: for<'a> TryFrom<&'a [u8], Error = VeilidAPIError>,
K: Clone + fmt::Debug + PartialEq + Eq + Hash,
K: for<'a> From<&'a [u8]>,
{
type Error = VeilidAPIError;
@ -138,7 +150,7 @@ where
impl<K> From<CryptoTyped<K>> for Vec<u8>
where
K: Clone + Copy + fmt::Debug + PartialEq + Eq + Hash,
K: Clone + fmt::Debug + PartialEq + Eq + Hash,
K: AsRef<[u8]>,
{
fn from(v: CryptoTyped<K>) -> Self {
@ -150,7 +162,7 @@ where
impl<'de, K> Deserialize<'de> for CryptoTyped<K>
where
K: Clone + Copy + fmt::Debug + PartialEq + Eq + Hash,
K: Clone + fmt::Debug + PartialEq + Eq + Hash,
K: Encodable,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
@ -163,7 +175,7 @@ where
}
impl<K> Serialize for CryptoTyped<K>
where
K: Clone + Copy + fmt::Debug + PartialEq + Eq + Hash,
K: Clone + fmt::Debug + PartialEq + Eq + Hash,
K: fmt::Display,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
@ -173,3 +185,25 @@ where
serializer.collect_str(self)
}
}
impl CryptoTyped<BareKeyPair> {
pub fn new_from_parts(key: PublicKey, bare_secret: BareSecretKey) -> Self {
Self {
kind: key.kind(),
value: BareKeyPair::new(key.value(), bare_secret),
}
}
pub fn key(&self) -> PublicKey {
PublicKey::new(self.kind, self.ref_value().key())
}
pub fn secret(&self) -> SecretKey {
SecretKey::new(self.kind, self.ref_value().secret())
}
pub fn bare_secret(&self) -> BareSecretKey {
self.ref_value().secret()
}
pub fn ref_bare_secret(&self) -> &BareSecretKey {
self.ref_value().ref_secret()
}
}

View file

@ -5,7 +5,6 @@ use super::*;
pub struct CryptoTypedGroup<K>
where
K: Clone
+ Copy
+ fmt::Debug
+ fmt::Display
+ FromStr
@ -31,7 +30,6 @@ export type CryptoTypedGroup<TCryptoKey extends string> = Array<CryptoTyped<TCry
impl<K> CryptoTypedGroup<K>
where
K: Clone
+ Copy
+ fmt::Debug
+ fmt::Display
+ FromStr
@ -55,7 +53,7 @@ where
pub fn kinds(&self) -> Vec<CryptoKind> {
let mut out = Vec::new();
for tk in &self.items {
out.push(tk.kind);
out.push(tk.kind());
}
out.sort_by(compare_crypto_kind);
out
@ -64,17 +62,17 @@ where
pub fn keys(&self) -> Vec<K> {
let mut out = Vec::new();
for tk in &self.items {
out.push(tk.value);
out.push(tk.value());
}
out
}
#[must_use]
pub fn get(&self, kind: CryptoKind) -> Option<CryptoTyped<K>> {
self.items.iter().find(|x| x.kind == kind).copied()
self.items.iter().find(|x| x.kind() == kind).cloned()
}
pub fn add(&mut self, typed_key: CryptoTyped<K>) {
for x in &mut self.items {
if x.kind == typed_key.kind {
if x.kind() == typed_key.kind() {
*x = typed_key;
return;
}
@ -85,17 +83,17 @@ where
pub fn add_all(&mut self, typed_keys: &[CryptoTyped<K>]) {
'outer: for typed_key in typed_keys {
for x in &mut self.items {
if x.kind == typed_key.kind {
*x = *typed_key;
if x.kind() == typed_key.kind() {
*x = typed_key.clone();
continue 'outer;
}
}
self.items.push(*typed_key);
self.items.push(typed_key.clone());
}
self.items.sort()
}
pub fn remove(&mut self, kind: CryptoKind) -> Option<CryptoTyped<K>> {
if let Some(idx) = self.items.iter().position(|x| x.kind == kind) {
if let Some(idx) = self.items.iter().position(|x| x.kind() == kind) {
return Some(self.items.remove(idx));
}
None
@ -110,8 +108,8 @@ where
pub fn best(&self) -> Option<CryptoTyped<K>> {
self.items
.iter()
.find(|k| VALID_CRYPTO_KINDS.contains(&k.kind))
.copied()
.find(|k| VALID_CRYPTO_KINDS.contains(&k.kind()))
.cloned()
}
#[must_use]
pub fn is_empty(&self) -> bool {
@ -137,7 +135,7 @@ where
}
pub fn contains_value(&self, value: &K) -> bool {
for tk in &self.items {
if tk.value == *value {
if tk.ref_value() == value {
return true;
}
}
@ -148,7 +146,6 @@ where
impl<K> core::ops::Deref for CryptoTypedGroup<K>
where
K: Clone
+ Copy
+ fmt::Debug
+ fmt::Display
+ FromStr
@ -170,7 +167,6 @@ where
impl<K> fmt::Display for CryptoTypedGroup<K>
where
K: Clone
+ Copy
+ fmt::Debug
+ fmt::Display
+ FromStr
@ -198,7 +194,6 @@ where
impl<K> FromStr for CryptoTypedGroup<K>
where
K: Clone
+ Copy
+ fmt::Debug
+ fmt::Display
+ FromStr
@ -229,7 +224,6 @@ where
impl<K> From<CryptoTyped<K>> for CryptoTypedGroup<K>
where
K: Clone
+ Copy
+ fmt::Debug
+ fmt::Display
+ FromStr
@ -249,7 +243,6 @@ where
impl<K> From<Vec<CryptoTyped<K>>> for CryptoTypedGroup<K>
where
K: Clone
+ Copy
+ fmt::Debug
+ fmt::Display
+ FromStr
@ -269,7 +262,6 @@ where
impl<K> From<&[CryptoTyped<K>]> for CryptoTypedGroup<K>
where
K: Clone
+ Copy
+ fmt::Debug
+ fmt::Display
+ FromStr
@ -289,7 +281,6 @@ where
impl<K> From<CryptoTypedGroup<K>> for Vec<CryptoTyped<K>>
where
K: Clone
+ Copy
+ fmt::Debug
+ fmt::Display
+ FromStr

View file

@ -1,10 +1,10 @@
use super::*;
#[derive(Clone, Copy, Default, PartialOrd, Ord, PartialEq, Eq, Hash)]
#[derive(Clone, Default, PartialOrd, Ord, PartialEq, Eq, Hash)]
#[must_use]
pub struct BareKeyPair {
pub key: BarePublicKey,
pub secret: BareSecretKey,
key: BarePublicKey,
secret: BareSecretKey,
}
cfg_if::cfg_if! {
@ -20,8 +20,23 @@ impl BareKeyPair {
pub fn new(key: BarePublicKey, secret: BareSecretKey) -> Self {
Self { key, secret }
}
pub fn key(&self) -> BarePublicKey {
self.key.clone()
}
pub fn secret(&self) -> BareSecretKey {
self.secret.clone()
}
pub fn ref_key(&self) -> &BarePublicKey {
&self.key
}
pub fn ref_secret(&self) -> &BareSecretKey {
&self.secret
}
pub fn split(&self) -> (BarePublicKey, BareSecretKey) {
(self.key, self.secret)
(self.key.clone(), self.secret.clone())
}
pub fn ref_split(&self) -> (&BarePublicKey, &BareSecretKey) {
(&self.key, &self.secret)
}
pub fn into_split(self) -> (BarePublicKey, BareSecretKey) {
(self.key, self.secret)
@ -32,15 +47,19 @@ impl Encodable for BareKeyPair {
fn encode(&self) -> String {
format!("{}:{}", self.key.encode(), self.secret.encode())
}
fn encoded_len() -> usize {
BarePublicKey::encoded_len() + 1 + BareSecretKey::encoded_len()
fn encoded_len(&self) -> usize {
self.key.encoded_len() + 1 + self.secret.encoded_len()
}
fn try_decode_bytes(b: &[u8]) -> VeilidAPIResult<Self> {
if b.len() != Self::encoded_len() {
apibail_parse_error!("input has wrong encoded length", format!("len={}", b.len()));
let parts: Vec<_> = b.split(|x| *x == b':').collect();
if parts.len() != 2 {
apibail_parse_error!(
"input has incorrect parts",
format!("parts={}", parts.len())
);
}
let key = BarePublicKey::try_decode_bytes(&b[0..BarePublicKey::encoded_len()])?;
let secret = BareSecretKey::try_decode_bytes(&b[(BarePublicKey::encoded_len() + 1)..])?;
let key = BarePublicKey::try_decode_bytes(parts[0])?;
let secret = BareSecretKey::try_decode_bytes(parts[1])?;
Ok(BareKeyPair { key, secret })
}
}

View file

@ -63,13 +63,15 @@ pub type Signature = CryptoTyped<BareSignature>;
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)]
pub type SharedSecret = CryptoTyped<BareSharedSecret>;
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)]
pub type RouteId = CryptoTyped<BareRouteId>;
pub type HashDigest = CryptoTyped<BareHashDigest>;
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)]
pub type RecordKey = CryptoTyped<BareRecordKey>;
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)]
pub type NodeId = CryptoTyped<BareNodeId>;
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)]
pub type HashDigest = CryptoTyped<BareHashDigest>;
pub type RouteId = CryptoTyped<BareRouteId>;
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)]
pub type MemberId = CryptoTyped<BareMemberId>;
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)]
pub type PublicKeyGroup = CryptoTypedGroup<BarePublicKey>;
@ -82,48 +84,50 @@ pub type SignatureGroup = CryptoTypedGroup<BareSignature>;
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)]
pub type SharedSecretGroup = CryptoTypedGroup<BareSharedSecret>;
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)]
pub type RouteIdGroup = CryptoTypedGroup<BareRouteId>;
pub type HashDigestGroup = CryptoTypedGroup<BareHashDigest>;
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)]
pub type RecordKeyGroup = CryptoTypedGroup<BareRecordKey>;
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)]
pub type NodeIdGroup = CryptoTypedGroup<BareNodeId>;
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)]
pub type HashDigestGroup = CryptoTypedGroup<BareHashDigest>;
pub type RouteIdGroup = CryptoTypedGroup<BareRouteId>;
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)]
pub type MemberIdGroup = CryptoTypedGroup<BareMemberId>;
impl From<NodeId> for HashDigest {
fn from(value: NodeId) -> Self {
HashDigest::new(value.kind, value.value.into())
HashDigest::new(value.kind(), value.into_value().into())
}
}
impl From<RecordKey> for HashDigest {
fn from(value: RecordKey) -> Self {
HashDigest::new(value.kind, value.value.into())
HashDigest::new(value.kind(), value.into_value().into())
}
}
impl From<NodeId> for PublicKey {
fn from(value: NodeId) -> Self {
PublicKey::new(value.kind, value.value.into())
PublicKey::new(value.kind(), value.into_value().into())
}
}
impl From<PublicKey> for NodeId {
fn from(value: PublicKey) -> Self {
NodeId::new(value.kind, value.value.into())
NodeId::new(value.kind(), value.into_value().into())
}
}
impl From<NodeIdGroup> for PublicKeyGroup {
fn from(value: NodeIdGroup) -> Self {
let items: Vec<PublicKey> = value.iter().map(|node_id| (*node_id).into()).collect();
let items: Vec<PublicKey> = value.iter().map(|node_id| node_id.clone().into()).collect();
PublicKeyGroup::from(items)
}
}
impl From<PublicKeyGroup> for NodeIdGroup {
fn from(value: PublicKeyGroup) -> Self {
let items: Vec<NodeId> = value.iter().map(|node_id| (*node_id).into()).collect();
let items: Vec<NodeId> = value.iter().map(|node_id| node_id.clone().into()).collect();
NodeIdGroup::from(items)
}
}

View file

@ -1,3 +1,5 @@
pub mod sizes;
use super::*;
use argon2::{
@ -13,25 +15,32 @@ use curve25519_dalek::digest::Digest;
use ed25519_dalek as ed;
use x25519_dalek as xd;
const VEILID_DOMAIN_SIGN: &[u8] = b"VLD0_SIGN";
const VEILID_DOMAIN_CRYPT: &[u8] = b"VLD0_CRYPT";
const VLD0_DOMAIN_SIGN: &[u8] = b"VLD0_SIGN";
const VLD0_DOMAIN_CRYPT: &[u8] = b"VLD0_CRYPT";
const AEAD_OVERHEAD: usize = 16;
const VLD0_AEAD_OVERHEAD: usize = 16;
pub const CRYPTO_KIND_VLD0: CryptoKind = CryptoKind(*b"VLD0");
pub use sizes::*;
fn public_to_x25519_pk(public: &BarePublicKey) -> VeilidAPIResult<xd::PublicKey> {
let pk_ed = ed::VerifyingKey::from_bytes(&public.bytes).map_err(VeilidAPIError::internal)?;
let pk_ed = ed::VerifyingKey::from_bytes(
public
.bytes()
.try_into()
.map_err(VeilidAPIError::internal)?,
)
.map_err(VeilidAPIError::internal)?;
Ok(xd::PublicKey::from(*pk_ed.to_montgomery().as_bytes()))
}
fn secret_to_x25519_sk(secret: &BareSecretKey) -> VeilidAPIResult<xd::StaticSecret> {
// NOTE: ed::SigningKey.to_scalar() does not produce an unreduced scalar, we want the raw bytes here
// See https://github.com/dalek-cryptography/curve25519-dalek/issues/565
let hash: [u8; SIGNATURE_LENGTH] = ed::Sha512::default()
.chain_update(secret.bytes)
let hash: [u8; VLD0_SIGNATURE_LENGTH] = ed::Sha512::default()
.chain_update(secret.bytes())
.finalize()
.into();
let mut output = [0u8; SECRET_KEY_LENGTH];
output.copy_from_slice(&hash[..SECRET_KEY_LENGTH]);
let mut output = [0u8; VLD0_SECRET_KEY_LENGTH];
output.copy_from_slice(&hash[..VLD0_SECRET_KEY_LENGTH]);
Ok(xd::StaticSecret::from(output))
}
@ -40,8 +49,8 @@ pub(crate) fn vld0_generate_keypair() -> BareKeyPair {
let mut csprng = VeilidRng {};
let signing_key = ed::SigningKey::generate(&mut csprng);
let verifying_key = signing_key.verifying_key();
let public_key = BarePublicKey::new(verifying_key.to_bytes());
let secret_key = BareSecretKey::new(signing_key.to_bytes());
let public_key = BarePublicKey::new(&verifying_key.to_bytes());
let secret_key = BareSecretKey::new(&signing_key.to_bytes());
BareKeyPair::new(public_key, secret_key)
}
@ -86,9 +95,7 @@ impl CryptoSystem for CryptoSystemVLD0 {
random_bytes(bytes.as_mut());
bytes
}
fn default_salt_length(&self) -> u32 {
16
}
#[instrument(level = "trace", target = "crypto", skip_all)]
fn hash_password(&self, password: &[u8], salt: &[u8]) -> VeilidAPIResult<String> {
if salt.len() < Salt::MIN_LENGTH || salt.len() > Salt::MAX_LENGTH {
@ -129,25 +136,25 @@ impl CryptoSystem for CryptoSystemVLD0 {
// Argon2 with default params (Argon2id v19)
let argon2 = Argon2::default();
let mut output_key_material = [0u8; SHARED_SECRET_LENGTH];
let mut output_key_material = [0u8; VLD0_SHARED_SECRET_LENGTH];
argon2
.hash_password_into(password, salt, &mut output_key_material)
.map_err(VeilidAPIError::generic)?;
Ok(BareSharedSecret::new(output_key_material))
Ok(BareSharedSecret::new(&output_key_material))
}
#[instrument(level = "trace", target = "crypto", skip_all)]
fn random_nonce(&self) -> BareNonce {
let mut nonce = [0u8; NONCE_LENGTH];
let mut nonce = [0u8; VLD0_NONCE_LENGTH];
random_bytes(&mut nonce);
BareNonce::new(nonce)
BareNonce::new(&nonce)
}
#[instrument(level = "trace", target = "crypto", skip_all)]
fn random_shared_secret(&self) -> BareSharedSecret {
let mut s = [0u8; SHARED_SECRET_LENGTH];
let mut s = [0u8; VLD0_SHARED_SECRET_LENGTH];
random_bytes(&mut s);
BareSharedSecret::new(s)
BareSharedSecret::new(&s)
}
#[instrument(level = "trace", target = "crypto", skip_all)]
@ -162,11 +169,11 @@ impl CryptoSystem for CryptoSystemVLD0 {
let dh_bytes = sk_xd.diffie_hellman(&pk_xd).to_bytes();
let mut hasher = blake3::Hasher::new();
hasher.update(VEILID_DOMAIN_CRYPT);
hasher.update(VLD0_DOMAIN_CRYPT);
hasher.update(&dh_bytes);
let output = hasher.finalize();
Ok(BareSharedSecret::new(*output.as_bytes()))
Ok(BareSharedSecret::new(output.as_bytes()))
}
#[instrument(level = "trace", target = "crypto", skip_all)]
@ -176,7 +183,7 @@ impl CryptoSystem for CryptoSystemVLD0 {
#[instrument(level = "trace", target = "crypto", skip_all)]
fn generate_hash(&self, data: &[u8]) -> BareHashDigest {
BareHashDigest::new(*blake3::hash(data).as_bytes())
BareHashDigest::new(blake3::hash(data).as_bytes())
}
#[instrument(level = "trace", target = "crypto", skip_all)]
@ -186,10 +193,35 @@ impl CryptoSystem for CryptoSystemVLD0 {
) -> VeilidAPIResult<BarePublicKey> {
let mut hasher = blake3::Hasher::new();
std::io::copy(reader, &mut hasher).map_err(VeilidAPIError::generic)?;
Ok(BarePublicKey::new(*hasher.finalize().as_bytes()))
Ok(BarePublicKey::new(hasher.finalize().as_bytes()))
}
// Validation
fn shared_secret_length(&self) -> usize {
VLD0_SHARED_SECRET_LENGTH
}
fn nonce_length(&self) -> usize {
VLD0_NONCE_LENGTH
}
fn hash_digest_length(&self) -> usize {
VLD0_HASH_DIGEST_LENGTH
}
fn public_key_length(&self) -> usize {
VLD0_PUBLIC_KEY_LENGTH
}
fn secret_key_length(&self) -> usize {
VLD0_SECRET_KEY_LENGTH
}
fn signature_length(&self) -> usize {
VLD0_SIGNATURE_LENGTH
}
fn default_salt_length(&self) -> usize {
16
}
fn aead_overhead(&self) -> usize {
VLD0_AEAD_OVERHEAD
}
#[instrument(level = "trace", target = "crypto", skip_all)]
fn validate_keypair(&self, public_key: &BarePublicKey, secret_key: &BareSecretKey) -> bool {
let data = vec![0u8; 512];
@ -206,7 +238,7 @@ impl CryptoSystem for CryptoSystemVLD0 {
fn validate_hash(&self, data: &[u8], hash_digest: &BareHashDigest) -> bool {
let bytes = *blake3::hash(data).as_bytes();
bytes == hash_digest.bytes
bytes == hash_digest.bytes()
}
#[instrument(level = "trace", target = "crypto", skip_all)]
@ -218,19 +250,19 @@ impl CryptoSystem for CryptoSystemVLD0 {
let mut hasher = blake3::Hasher::new();
std::io::copy(reader, &mut hasher).map_err(VeilidAPIError::generic)?;
let bytes = *hasher.finalize().as_bytes();
Ok(bytes == hash_digest.bytes)
Ok(bytes == hash_digest.bytes())
}
// Distance Metric
#[instrument(level = "trace", target = "crypto", skip_all)]
fn distance(&self, hash1: &BareHashDigest, hash2: &BareHashDigest) -> BareHashDistance {
let mut bytes = [0u8; CRYPTO_KEY_LENGTH];
let mut bytes = [0u8; VLD0_HASH_DIGEST_LENGTH];
(0..CRYPTO_KEY_LENGTH).for_each(|n| {
bytes[n] = hash1.bytes[n] ^ hash2.bytes[n];
(0..VLD0_HASH_DIGEST_LENGTH).for_each(|n| {
bytes[n] = hash1[n] ^ hash2[n];
});
BareHashDistance::new(bytes)
BareHashDistance::new(&bytes)
}
// Authentication
@ -241,11 +273,11 @@ impl CryptoSystem for CryptoSystemVLD0 {
secret_key: &BareSecretKey,
data: &[u8],
) -> VeilidAPIResult<BareSignature> {
let mut kpb: [u8; SECRET_KEY_LENGTH + PUBLIC_KEY_LENGTH] =
[0u8; SECRET_KEY_LENGTH + PUBLIC_KEY_LENGTH];
let mut kpb: [u8; VLD0_SECRET_KEY_LENGTH + VLD0_PUBLIC_KEY_LENGTH] =
[0u8; VLD0_SECRET_KEY_LENGTH + VLD0_PUBLIC_KEY_LENGTH];
kpb[..SECRET_KEY_LENGTH].copy_from_slice(&secret_key.bytes);
kpb[SECRET_KEY_LENGTH..].copy_from_slice(&public_key.bytes);
kpb[..VLD0_SECRET_KEY_LENGTH].copy_from_slice(secret_key);
kpb[VLD0_SECRET_KEY_LENGTH..].copy_from_slice(public_key);
let keypair = ed::SigningKey::from_keypair_bytes(&kpb)
.map_err(|e| VeilidAPIError::parse_error("Keypair is invalid", e))?;
@ -253,10 +285,10 @@ impl CryptoSystem for CryptoSystemVLD0 {
dig.update(data);
let sig_bytes = keypair
.sign_prehashed(dig, Some(VEILID_DOMAIN_SIGN))
.sign_prehashed(dig, Some(VLD0_DOMAIN_SIGN))
.map_err(VeilidAPIError::internal)?;
let sig = BareSignature::new(sig_bytes.to_bytes());
let sig = BareSignature::new(&sig_bytes.to_bytes());
if !self.verify(public_key, data, &sig)? {
apibail_internal!("newly created signature does not verify");
@ -271,15 +303,25 @@ impl CryptoSystem for CryptoSystemVLD0 {
data: &[u8],
signature: &BareSignature,
) -> VeilidAPIResult<bool> {
let pk = ed::VerifyingKey::from_bytes(&public_key.bytes)
let pk = ed::VerifyingKey::from_bytes(
public_key
.bytes()
.try_into()
.map_err(VeilidAPIError::internal)?,
)
.map_err(|e| VeilidAPIError::parse_error("Public key is invalid", e))?;
let sig = ed::Signature::from_bytes(&signature.bytes);
let sig = ed::Signature::from_bytes(
signature
.bytes()
.try_into()
.map_err(VeilidAPIError::internal)?,
);
let mut dig: ed::Sha512 = ed::Sha512::default();
dig.update(data);
if pk
.verify_prehashed_strict(dig, Some(VEILID_DOMAIN_SIGN), &sig)
.verify_prehashed_strict(dig, Some(VLD0_DOMAIN_SIGN), &sig)
.is_err()
{
return Ok(false);
@ -288,10 +330,6 @@ impl CryptoSystem for CryptoSystemVLD0 {
}
// AEAD Encrypt/Decrypt
fn aead_overhead(&self) -> usize {
AEAD_OVERHEAD
}
#[instrument(level = "trace", target = "crypto", skip_all)]
fn decrypt_in_place_aead(
&self,
@ -300,8 +338,15 @@ impl CryptoSystem for CryptoSystemVLD0 {
shared_secret: &BareSharedSecret,
associated_data: Option<&[u8]>,
) -> VeilidAPIResult<()> {
let key = ch::Key::from(shared_secret.bytes);
let xnonce = ch::XNonce::from(nonce.bytes);
let shared_secret_bytes: [u8; VLD0_SHARED_SECRET_LENGTH] = shared_secret
.bytes()
.try_into()
.map_err(VeilidAPIError::internal)?;
let nonce_bytes: [u8; VLD0_NONCE_LENGTH] =
nonce.bytes().try_into().map_err(VeilidAPIError::internal)?;
let key = ch::Key::from(shared_secret_bytes);
let xnonce = ch::XNonce::from(nonce_bytes);
let aead = ch::XChaCha20Poly1305::new(&key);
aead.decrypt_in_place(&xnonce, associated_data.unwrap_or(b""), body)
.map_err(map_to_string)
@ -331,8 +376,15 @@ impl CryptoSystem for CryptoSystemVLD0 {
shared_secret: &BareSharedSecret,
associated_data: Option<&[u8]>,
) -> VeilidAPIResult<()> {
let key = ch::Key::from(shared_secret.bytes);
let xnonce = ch::XNonce::from(nonce.bytes);
let shared_secret_bytes: [u8; VLD0_SHARED_SECRET_LENGTH] = shared_secret
.bytes()
.try_into()
.map_err(VeilidAPIError::internal)?;
let nonce_bytes: [u8; VLD0_NONCE_LENGTH] =
nonce.bytes().try_into().map_err(VeilidAPIError::internal)?;
let key = ch::Key::from(shared_secret_bytes);
let xnonce = ch::XNonce::from(nonce_bytes);
let aead = ch::XChaCha20Poly1305::new(&key);
aead.encrypt_in_place(&xnonce, associated_data.unwrap_or(b""), body)
@ -362,10 +414,19 @@ impl CryptoSystem for CryptoSystemVLD0 {
body: &mut [u8],
nonce: &BareNonce,
shared_secret: &BareSharedSecret,
) {
let mut cipher =
<XChaCha20 as KeyIvInit>::new(&shared_secret.bytes.into(), &nonce.bytes.into());
) -> VeilidAPIResult<()> {
let shared_secret_bytes: [u8; VLD0_SHARED_SECRET_LENGTH] = shared_secret
.bytes()
.try_into()
.map_err(VeilidAPIError::internal)?;
let nonce_bytes: [u8; VLD0_NONCE_LENGTH] =
nonce.bytes().try_into().map_err(VeilidAPIError::internal)?;
let key = ch::Key::from(shared_secret_bytes);
let xnonce = ch::XNonce::from(nonce_bytes);
let mut cipher = <XChaCha20 as KeyIvInit>::new(&key, &xnonce);
cipher.apply_keystream(body);
Ok(())
}
#[instrument(level = "trace", target = "crypto", skip_all)]
@ -375,10 +436,21 @@ impl CryptoSystem for CryptoSystemVLD0 {
out_buf: &mut [u8],
nonce: &BareNonce,
shared_secret: &BareSharedSecret,
) {
let mut cipher =
<XChaCha20 as KeyIvInit>::new(&shared_secret.bytes.into(), &nonce.bytes.into());
cipher.apply_keystream_b2b(in_buf, out_buf).unwrap();
) -> VeilidAPIResult<()> {
let shared_secret_bytes: [u8; VLD0_SHARED_SECRET_LENGTH] = shared_secret
.bytes()
.try_into()
.map_err(VeilidAPIError::internal)?;
let nonce_bytes: [u8; VLD0_NONCE_LENGTH] =
nonce.bytes().try_into().map_err(VeilidAPIError::internal)?;
let key = ch::Key::from(shared_secret_bytes);
let xnonce = ch::XNonce::from(nonce_bytes);
let mut cipher = <XChaCha20 as KeyIvInit>::new(&key, &xnonce);
cipher
.apply_keystream_b2b(in_buf, out_buf)
.map_err(VeilidAPIError::generic)?;
Ok(())
}
#[instrument(level = "trace", target = "crypto", skip_all)]
@ -387,10 +459,10 @@ impl CryptoSystem for CryptoSystemVLD0 {
in_buf: &[u8],
nonce: &BareNonce,
shared_secret: &BareSharedSecret,
) -> Vec<u8> {
) -> VeilidAPIResult<Vec<u8>> {
let mut out_buf = unsafe { aligned_8_u8_vec_uninit(in_buf.len()) };
self.crypt_b2b_no_auth(in_buf, &mut out_buf, nonce, shared_secret);
out_buf
self.crypt_b2b_no_auth(in_buf, &mut out_buf, nonce, shared_secret)?;
Ok(out_buf)
}
#[instrument(level = "trace", target = "crypto", skip_all)]
@ -399,9 +471,9 @@ impl CryptoSystem for CryptoSystemVLD0 {
in_buf: &[u8],
nonce: &BareNonce,
shared_secret: &BareSharedSecret,
) -> Vec<u8> {
) -> VeilidAPIResult<Vec<u8>> {
let mut out_buf = unsafe { unaligned_u8_vec_uninit(in_buf.len()) };
self.crypt_b2b_no_auth(in_buf, &mut out_buf, nonce, shared_secret);
out_buf
self.crypt_b2b_no_auth(in_buf, &mut out_buf, nonce, shared_secret)?;
Ok(out_buf)
}
}

View file

@ -0,0 +1,12 @@
/// Length of a crypto key in bytes
pub const VLD0_PUBLIC_KEY_LENGTH: usize = 32;
/// Length of a secret key in bytes
pub const VLD0_SECRET_KEY_LENGTH: usize = 32;
/// Length of a signature in bytes
pub const VLD0_SIGNATURE_LENGTH: usize = 64;
/// Length of a nonce in bytes
pub const VLD0_NONCE_LENGTH: usize = 24;
/// Length of a hash digest in bytes
pub const VLD0_HASH_DIGEST_LENGTH: usize = 32;
/// Length of a shared secret in bytes
pub const VLD0_SHARED_SECRET_LENGTH: usize = 32;

View file

@ -184,7 +184,7 @@ impl AddressFilter {
if cur_ts.as_u64().saturating_sub(value.timestamp.as_u64())
> self.punishment_duration_min as u64 * 60_000_000u64
{
dead_keys.push(*key);
dead_keys.push(key.clone());
}
}
for key in dead_keys {
@ -310,7 +310,7 @@ impl AddressFilter {
}
pub fn punish_node_id(&self, node_id: NodeId, reason: PunishmentReason) {
if let Ok(Some(nr)) = self.routing_table().lookup_node_ref(node_id) {
if let Ok(Some(nr)) = self.routing_table().lookup_node_ref(node_id.clone()) {
// make the entry dead if it's punished
nr.operate_mut(|_rti, e| e.set_punished(Some(reason)));
}

View file

@ -156,7 +156,7 @@ impl BootstrapRecord {
let crypto = network_manager.crypto();
let sig = match crypto.generate_signatures(v1.as_bytes(), &[signing_key_pair], |kp, sig| {
Signature::new(kp.kind, sig).to_string()
Signature::new(kp.kind(), sig).to_string()
}) {
Ok(v) => {
let Some(sig) = v.first().cloned() else {
@ -393,13 +393,13 @@ impl BootstrapRecord {
// Validate signature against any signing keys if we have them
if !signing_keys.is_empty() {
let mut validated = false;
for key in signing_keys.iter().copied() {
for key in signing_keys {
if let Some(valid_keys) = network_manager.crypto().verify_signatures(
&[key],
&[key.clone()],
signed_str.as_bytes(),
&[sig],
&[sig.clone()],
)? {
if valid_keys.contains(&key) {
if valid_keys.contains(key) {
validated = true;
break;
}

View file

@ -286,7 +286,7 @@ impl NetworkManager {
Some(
bcs.derive_shared_secret(
network_key_password.as_bytes(),
&bcs.generate_hash(network_key_password.as_bytes()).bytes,
&bcs.generate_hash(network_key_password.as_bytes()),
)
.expect("failed to derive network key"),
)
@ -616,9 +616,9 @@ impl NetworkManager {
let receipt = Receipt::try_new(
best_envelope_version(),
node_id.kind,
node_id.kind(),
nonce,
node_id.value,
node_id.value(),
extra_data,
)?;
let out = receipt
@ -656,9 +656,9 @@ impl NetworkManager {
let receipt = Receipt::try_new(
best_envelope_version(),
node_id.kind,
node_id.kind(),
nonce,
node_id.value,
node_id.value(),
extra_data,
)?;
let out = receipt
@ -881,7 +881,7 @@ impl NetworkManager {
// DH to get encryption key
let routing_table = self.routing_table();
let crypto = self.crypto();
let Some(vcrypto) = crypto.get(dest_node_id.kind) else {
let Some(vcrypto) = crypto.get(dest_node_id.kind()) else {
bail!("should not have a destination with incompatible crypto here");
};
@ -895,11 +895,11 @@ impl NetworkManager {
// Encode envelope
let envelope = Envelope::new(
version,
node_id.kind,
node_id.kind(),
ts,
nonce,
node_id.value,
dest_node_id.value,
node_id.value(),
dest_node_id.value(),
);
envelope
.to_encrypted_data(&crypto, body.as_ref(), &node_id_secret, &self.network_key)
@ -1101,15 +1101,15 @@ impl NetworkManager {
let rpc = self.rpc_processor();
// See if this sender is punished, if so, ignore the packet
let sender_id = envelope.get_sender_typed_id();
if self.address_filter().is_node_id_punished(sender_id) {
let sender_id = envelope.get_sender_id();
if self.address_filter().is_node_id_punished(sender_id.clone()) {
return Ok(false);
}
// Peek at header and see if we need to relay this
// If the recipient id is not our node id, then it needs relaying
let recipient_id = envelope.get_recipient_typed_id();
if !routing_table.matches_own_node_id(&[recipient_id]) {
let recipient_id = envelope.get_recipient_id();
if !routing_table.matches_own_node_id(&[recipient_id.clone()]) {
// See if the source node is allowed to resolve nodes
// This is a costly operation, so only outbound-relay permitted
// nodes are allowed to do this, for example PWA users
@ -1119,10 +1119,13 @@ impl NetworkManager {
// xxx: that 'localnetwork' routing domain nodes could be allowed to
// xxx: full relay as well as client_allowlist nodes...
let some_relay_nr = if self.check_client_allowlist(sender_id) {
let some_relay_nr = if self.check_client_allowlist(sender_id.clone()) {
// Full relay allowed, do a full resolve_node
match rpc
.resolve_node(recipient_id, SafetySelection::Unsafe(Sequencing::default()))
.resolve_node(
recipient_id.clone(),
SafetySelection::Unsafe(Sequencing::default()),
)
.await
{
Ok(v) => v.map(|nr| nr.default_filtered()),

View file

@ -231,7 +231,7 @@ impl ReceiptManager {
let receipt_inner = v.lock();
if receipt_inner.expiration_ts <= now {
// Expire this receipt
expired_nonces.push(*k);
expired_nonces.push(k.clone());
} else if new_next_oldest_ts.is_none()
|| receipt_inner.expiration_ts < new_next_oldest_ts.unwrap()
{

View file

@ -503,7 +503,7 @@ impl NetworkManager {
if target_node_ref
.node_ids()
.iter()
.any(|nid| self.address_filter().is_node_id_punished(*nid))
.any(|nid| self.address_filter().is_node_id_punished(nid.clone()))
{
return Ok(None);
}
@ -647,7 +647,11 @@ impl NetworkManager {
ContactMethod::Direct(di) => Some(NodeContactMethodKind::Direct(di)),
ContactMethod::SignalReverse(relay_key, target_key) => {
let mut relay_nr = routing_table
.lookup_and_filter_noderef(relay_key, routing_domain.into(), dial_info_filter)?
.lookup_and_filter_noderef(
relay_key.clone(),
routing_domain.into(),
dial_info_filter,
)?
.ok_or_else(|| {
eyre!(
"couldn't look up relay for signal reverse: {} with filter {:?}",
@ -702,7 +706,11 @@ impl NetworkManager {
}
ContactMethod::SignalHolePunch(relay_key, target_key) => {
let mut relay_nr = routing_table
.lookup_and_filter_noderef(relay_key, routing_domain.into(), dial_info_filter)?
.lookup_and_filter_noderef(
relay_key.clone(),
routing_domain.into(),
dial_info_filter,
)?
.ok_or_else(|| {
eyre!(
"couldn't look up relay for hole punch: {} with filter {:?}",
@ -732,7 +740,11 @@ impl NetworkManager {
}
ContactMethod::InboundRelay(relay_key) => {
let mut relay_nr = routing_table
.lookup_and_filter_noderef(relay_key, routing_domain.into(), dial_info_filter)?
.lookup_and_filter_noderef(
relay_key.clone(),
routing_domain.into(),
dial_info_filter,
)?
.ok_or_else(|| {
eyre!(
"couldn't look up relay for inbound relay: {} with filter {:?}",
@ -745,7 +757,11 @@ impl NetworkManager {
}
ContactMethod::OutboundRelay(relay_key) => {
let mut relay_nr = routing_table
.lookup_and_filter_noderef(relay_key, routing_domain.into(), dial_info_filter)?
.lookup_and_filter_noderef(
relay_key.clone(),
routing_domain.into(),
dial_info_filter,
)?
.ok_or_else(|| {
eyre!(
"couldn't look up relay for outbound relay: {} with filter {:?}",

View file

@ -127,7 +127,7 @@ impl NetworkManager {
if let Ok(Some(nr)) = routing_table.lookup_node_ref(k) {
let peer_stats = nr.peer_stats();
let peer = PeerTableData {
node_ids: nr.node_ids().iter().copied().collect(),
node_ids: nr.node_ids().iter().cloned().collect(),
peer_address: v.last_connection.remote().to_string(),
peer_stats,
};

View file

@ -77,10 +77,14 @@ pub async fn test_bootstrap_v1() {
println!("signing_key_pairs: {:?}", signing_key_pairs);
let signing_keys = signing_key_pairs
.iter()
.map(|skp| PublicKey::new(skp.kind, skp.value.key))
.map(|skp| PublicKey::new(skp.kind(), skp.ref_value().key()))
.collect::<Vec<_>>();
let v1str = bsrec
.to_v1_string(&network_manager, &dial_info_converter, signing_key_pairs[0])
.to_v1_string(
&network_manager,
&dial_info_converter,
signing_key_pairs[0].clone(),
)
.await
.expect("should make string");
let bsrec2 = BootstrapRecord::new_from_v1_str(

View file

@ -31,11 +31,11 @@ pub async fn test_signed_node_info() {
let keypair = vcrypto.generate_keypair();
let sni = SignedDirectNodeInfo::make_signatures(
&crypto,
vec![KeyPair::new(ck, keypair)],
vec![KeyPair::new(ck, keypair.clone())],
node_info.clone(),
)
.unwrap();
let tks: PublicKeyGroup = PublicKey::new(ck, keypair.key).into();
let tks: PublicKeyGroup = PublicKey::new(ck, keypair.key()).into();
let tks_node_ids = NodeIdGroup::from(tks.clone());
let oldtkslen = tks.len();
let sdni = SignedDirectNodeInfo::new(
@ -49,7 +49,7 @@ pub async fn test_signed_node_info() {
// Test incorrect validation
let keypair1 = vcrypto.generate_keypair();
let tks1: PublicKeyGroup = PublicKey::new(ck, keypair1.key).into();
let tks1: PublicKeyGroup = PublicKey::new(ck, keypair1.key()).into();
let sdni = SignedDirectNodeInfo::new(
node_info.clone(),
sni.timestamp(),
@ -63,7 +63,7 @@ pub async fn test_signed_node_info() {
PublicKey::new(fake_crypto_kind, BarePublicKey::default()).into();
let mut sigsfake = sni.signatures().to_vec();
sigsfake.push(Signature::new(fake_crypto_kind, BareSignature::default()));
tksfake.add(PublicKey::new(ck, keypair.key));
tksfake.add(PublicKey::new(ck, keypair.key()));
let sdnifake =
SignedDirectNodeInfo::new(node_info.clone(), sni.timestamp(), sigsfake.clone());
let tksfake_validated = sdnifake.validate(&tksfake.into(), &crypto).unwrap();
@ -86,12 +86,12 @@ pub async fn test_signed_node_info() {
// Test correct validation
let keypair2 = vcrypto.generate_keypair();
let tks2: PublicKeyGroup = PublicKey::new(ck, keypair2.key).into();
let tks2: PublicKeyGroup = PublicKey::new(ck, keypair2.key()).into();
let oldtks2len = tks2.len();
let sni2 = SignedRelayedNodeInfo::make_signatures(
&crypto,
vec![KeyPair::new(ck, keypair2)],
vec![KeyPair::new(ck, keypair2.clone())],
node_info2.clone(),
tks_node_ids.clone(),
sni.clone(),
@ -111,7 +111,7 @@ pub async fn test_signed_node_info() {
// Test incorrect validation
let keypair3 = vcrypto.generate_keypair();
let tks3: PublicKeyGroup = PublicKey::new(ck, keypair3.key).into();
let tks3: PublicKeyGroup = PublicKey::new(ck, keypair3.key()).into();
let srni = SignedRelayedNodeInfo::new(
node_info2.clone(),
@ -128,7 +128,7 @@ pub async fn test_signed_node_info() {
PublicKey::new(fake_crypto_kind, BarePublicKey::default()).into();
let mut sigsfake3 = sni2.signatures().to_vec();
sigsfake3.push(Signature::new(fake_crypto_kind, BareSignature::default()));
tksfake3.add(PublicKey::new(ck, keypair2.key));
tksfake3.add(PublicKey::new(ck, keypair2.key()));
let srnifake = SignedRelayedNodeInfo::new(
node_info2.clone(),
tks.clone().into(),

View file

@ -69,7 +69,7 @@ impl Bucket {
entry_index as u32
});
entries.push(SerializedBucketEntryData {
key: *k,
key: k.clone(),
value: *entry_index,
});
}
@ -83,7 +83,10 @@ impl Bucket {
veilid_log!(self trace "Node added: {}:{}", self.kind, node_id_key);
// Add new entry
let entry = Arc::new(BucketEntry::new(NodeId::new(self.kind, node_id_key)));
let entry = Arc::new(BucketEntry::new(NodeId::new(
self.kind,
node_id_key.clone(),
)));
self.entries.insert(node_id_key, entry.clone());
// Return the new entry
@ -132,8 +135,11 @@ impl Bucket {
let mut extra_entries = bucket_len - bucket_depth;
// Get the sorted list of entries by their kick order
let mut sorted_entries: Vec<(BareNodeId, Arc<BucketEntry>)> =
self.entries.iter().map(|(k, v)| (*k, v.clone())).collect();
let mut sorted_entries: Vec<(BareNodeId, Arc<BucketEntry>)> = self
.entries
.iter()
.map(|(k, v)| (k.clone(), v.clone()))
.collect();
let cur_ts = Timestamp::now();
sorted_entries.sort_by(|a, b| -> core::cmp::Ordering {
if a.0 == b.0 {

View file

@ -295,13 +295,13 @@ impl BucketEntryInner {
/// Results Err() if this operation would add more crypto kinds than we support
pub fn add_node_id(&mut self, node_id: NodeId) -> EyreResult<Option<NodeId>> {
let total_node_id_count = self.validated_node_ids.len() + self.unsupported_node_ids.len();
let node_ids = if VALID_CRYPTO_KINDS.contains(&node_id.kind) {
let node_ids = if VALID_CRYPTO_KINDS.contains(&node_id.kind()) {
&mut self.validated_node_ids
} else {
&mut self.unsupported_node_ids
};
if let Some(old_node_id) = node_ids.get(node_id.kind) {
if let Some(old_node_id) = node_ids.get(node_id.kind()) {
// If this was already there we do nothing
if old_node_id == node_id {
return Ok(None);
@ -1223,7 +1223,7 @@ pub(crate) struct BucketEntry {
impl BucketEntry {
pub(super) fn new(first_node_id: NodeId) -> Self {
// First node id should always be one we support since TypedKeySets are sorted and we must have at least one supported key
assert!(VALID_CRYPTO_KINDS.contains(&first_node_id.kind));
assert!(VALID_CRYPTO_KINDS.contains(&first_node_id.kind()));
let now = Timestamp::now();
let inner = BucketEntryInner {

View file

@ -217,7 +217,7 @@ impl RoutingTable {
if !filtered_entries.is_empty() {
out += &format!("{} Bucket #{}:\n", ck, b);
for e in filtered_entries {
let node = *e.0;
let node = e.0.clone();
let can_be_relay = e.1.with(inner, |_rti, e| relay_node_filter(e));
let is_relay = inner

View file

@ -1,17 +1,17 @@
use super::*;
impl RoutingTable {
/// Utility to find the closest nodes to a particular key, preferring reliable nodes first,
/// Utility to find the closest nodes to a particular hash coordinate, preferring reliable nodes first,
/// including possibly our own node and nodes further away from the key than our own,
/// returning their peer info
#[instrument(level = "trace", target = "rtab", skip_all)]
pub fn find_preferred_closest_peers(
&self,
routing_domain: RoutingDomain,
key: NodeId,
hash_coordinate: &HashDigest,
capabilities: &[VeilidCapability],
) -> NetworkResult<Vec<Arc<PeerInfo>>> {
if Crypto::validate_crypto_kind(key.kind).is_err() {
if Crypto::validate_crypto_kind(hash_coordinate.kind()).is_err() {
return NetworkResult::invalid_message("invalid crypto kind");
}
@ -48,7 +48,7 @@ impl RoutingTable {
let closest_nodes = match self.find_preferred_closest_nodes(
node_count,
key.into(),
hash_coordinate.clone(),
filters,
// transform
|rti, entry| {
@ -57,7 +57,10 @@ impl RoutingTable {
) {
Ok(v) => v,
Err(e) => {
error!("failed to find closest nodes for key {}: {}", key, e);
error!(
"failed to find closest nodes for key {}: {}",
hash_coordinate, e
);
return NetworkResult::invalid_message("failed to find closest nodes for key");
}
};
@ -72,11 +75,11 @@ impl RoutingTable {
pub fn find_preferred_peers_closer_to_key(
&self,
routing_domain: RoutingDomain,
key: RecordKey,
hash_coordinate: &HashDigest,
required_capabilities: Vec<VeilidCapability>,
) -> NetworkResult<Vec<Arc<PeerInfo>>> {
// add node information for the requesting node to our routing table
let crypto_kind = key.kind;
let crypto_kind = hash_coordinate.kind();
let own_node_id = self.node_id(crypto_kind);
// find N nodes closest to the target node in our routing table
@ -88,10 +91,11 @@ impl RoutingTable {
let vcrypto = &vcrypto;
let own_distance = vcrypto.distance(
&BareHashDigest::from(own_node_id.value),
&BareHashDigest::from(key.value),
&BareHashDigest::from(own_node_id.value()),
&hash_coordinate.value(),
);
let value = hash_coordinate.value();
let filter = Box::new(
move |rti: &RoutingTableInner, opt_entry: Option<Arc<BucketEntry>>| {
// Exclude our own node
@ -115,10 +119,8 @@ impl RoutingTable {
let Some(entry_node_id) = e.node_ids().get(crypto_kind) else {
return false;
};
let entry_distance = vcrypto.distance(
&BareHashDigest::from(entry_node_id.value),
&BareHashDigest::from(key.value),
);
let entry_distance = vcrypto
.distance(&BareHashDigest::from(entry_node_id.value()), &value.clone());
if entry_distance >= own_distance {
return false;
}
@ -135,7 +137,7 @@ impl RoutingTable {
//
let closest_nodes = match self.find_preferred_closest_nodes(
node_count,
key.into(),
hash_coordinate.clone(),
filters,
// transform
|rti, entry| {
@ -146,7 +148,10 @@ impl RoutingTable {
) {
Ok(v) => v,
Err(e) => {
error!("failed to find closest nodes for key {}: {}", key, e);
error!(
"failed to find closest nodes for key {}: {}",
hash_coordinate, e
);
return NetworkResult::invalid_message("failed to find closest nodes for key");
}
};
@ -155,8 +160,8 @@ impl RoutingTable {
// This same test is used on the other side so we vet things here
let valid = match Self::verify_peers_closer(
vcrypto,
own_node_id.into(),
key.into(),
&own_node_id.clone().into(),
&hash_coordinate.clone(),
&closest_nodes,
) {
Ok(v) => v,
@ -167,7 +172,7 @@ impl RoutingTable {
if !valid {
error!(
"non-closer peers returned: own_node_id={:#?} key={:#?} closest_nodes={:#?}",
own_node_id, key, closest_nodes
own_node_id, hash_coordinate, closest_nodes
);
}
@ -178,23 +183,23 @@ impl RoutingTable {
#[instrument(level = "trace", target = "rtab", skip_all, err)]
pub fn verify_peers_closer(
vcrypto: &crypto::CryptoSystemGuard<'_>,
key_far: HashDigest,
key_near: HashDigest,
key_far: &HashDigest,
key_near: &HashDigest,
peers: &[Arc<PeerInfo>],
) -> EyreResult<bool> {
let kind = vcrypto.kind();
if key_far.kind != kind || key_near.kind != kind {
if key_far.kind() != kind || key_near.kind() != kind {
bail!("keys all need the same cryptosystem");
}
let mut closer = true;
let d_far = vcrypto.distance(&key_far.value, &key_near.value);
let d_far = vcrypto.distance(key_far.ref_value(), key_near.ref_value());
for peer in peers {
let Some(key_peer) = peer.node_ids().get(kind) else {
bail!("peers need to have a key with the same cryptosystem");
};
let d_near = vcrypto.distance(&key_near.value, &key_peer.value.into());
let d_near = vcrypto.distance(key_near.ref_value(), &key_peer.value().into());
if d_far < d_near {
let warning = format!(
r#"peer: {}
@ -203,9 +208,9 @@ far (self): {}
d_near: {}
d_far: {}
cmp: {:?}"#,
key_peer.value,
key_near.value,
key_far.value,
key_peer,
key_near,
key_far,
d_near,
d_far,
d_near.cmp(&d_far)

View file

@ -35,6 +35,12 @@ impl_veilid_log_facility!("rtab");
//////////////////////////////////////////////////////////////////////////
/// Routing table bucket count (one per bit per 32 byte node id)
pub const BUCKET_COUNT: usize = 256;
/// Fixed length for NodeId in bytes
#[expect(dead_code)]
pub const NODE_ID_LENGTH: usize = 32;
/// Minimum number of nodes we need, per crypto kind, per routing domain, or we trigger a bootstrap
pub const MIN_BOOTSTRAP_CONNECTIVITY_PEERS: usize = 4;
/// Set of routing domains that use the bootstrap mechanism
@ -312,7 +318,7 @@ impl RoutingTable {
pub fn node_id_secret_key(&self, kind: CryptoKind) -> BareSecretKey {
self.config()
.with(|c| c.network.routing_table.node_id_secret.get(kind).unwrap())
.value
.value()
}
pub fn node_ids(&self) -> NodeIdGroup {
@ -325,7 +331,7 @@ impl RoutingTable {
for ck in VALID_CRYPTO_KINDS {
tkps.push(KeyPair::new(
ck,
BareKeyPair::new(self.node_id(ck).value.into(), self.node_id_secret_key(ck)),
BareKeyPair::new(self.node_id(ck).value().into(), self.node_id_secret_key(ck)),
));
}
tkps
@ -333,8 +339,8 @@ impl RoutingTable {
pub fn matches_own_node_id(&self, node_ids: &[NodeId]) -> bool {
for ni in node_ids {
if let Some(v) = self.node_ids().get(ni.kind) {
if v.value == ni.value {
if let Some(v) = self.node_ids().get(ni.kind()) {
if v.ref_value() == ni.ref_value() {
return true;
}
}
@ -344,22 +350,52 @@ impl RoutingTable {
pub fn matches_own_node_id_key(&self, node_id_key: &BareNodeId) -> bool {
for tk in self.node_ids().iter() {
if tk.value == *node_id_key {
if tk.ref_value() == node_id_key {
return true;
}
}
false
}
pub fn calculate_bucket_index(&self, node_id: &NodeId) -> BucketIndex {
/// Produce node id from public key
pub fn generate_node_id(&self, public_key: &PublicKey) -> VeilidAPIResult<NodeId> {
if public_key.ref_value().len() == NODE_ID_LENGTH {
return Ok(NodeId::new(
public_key.kind(),
BareNodeId::new(public_key.ref_value()),
));
}
let crypto = self.crypto();
let self_node_id_key = self.node_id(node_id.kind).value;
let vcrypto = crypto.get(node_id.kind).unwrap();
let Some(vcrypto) = crypto.get(public_key.kind()) else {
apibail_generic!("unsupported cryptosystem");
};
let idhash = vcrypto.generate_hash(public_key.ref_value());
assert!(
idhash.len() >= NODE_ID_LENGTH,
"generate_hash needs to produce at least {} bytes",
NODE_ID_LENGTH
);
Ok(NodeId::new(
public_key.kind(),
BareNodeId::new(&idhash[0..NODE_ID_LENGTH]),
))
}
pub fn calculate_bucket_index(&self, node_id: &NodeId) -> BucketIndex {
assert_eq!(
node_id.ref_value().len() * 8,
BUCKET_COUNT,
"NodeId should be hashed down to BUCKET_COUNT bits"
);
let crypto = self.crypto();
let self_node_id_key = self.node_id(node_id.kind()).value();
let vcrypto = crypto.get(node_id.kind()).unwrap();
(
node_id.kind,
node_id.kind(),
vcrypto
.distance(
&BareHashDigest::from(node_id.value),
&BareHashDigest::from(node_id.value()),
&BareHashDigest::from(self_node_id_key),
)
.first_nonzero_bit()
@ -427,20 +463,18 @@ impl RoutingTable {
let c = config.get();
for ck in VALID_CRYPTO_KINDS {
if let Some(nid) = c.network.routing_table.node_id.get(ck) {
cache_validity_key.append(&mut nid.value.bytes.to_vec());
cache_validity_key.extend_from_slice(nid.ref_value());
}
}
for b in &c.network.routing_table.bootstrap {
cache_validity_key.append(&mut b.as_bytes().to_vec());
cache_validity_key.extend_from_slice(b.as_bytes());
}
cache_validity_key.append(
&mut c
.network
cache_validity_key.extend_from_slice(
c.network
.network_key_password
.clone()
.unwrap_or_default()
.as_bytes()
.to_vec(),
.as_bytes(),
);
};
@ -515,7 +549,7 @@ impl RoutingTable {
veilid_log!(inner warn "crypto kind is not valid, not loading routing table");
return Ok(());
}
if v.len() != PUBLIC_KEY_LENGTH * 8 {
if v.len() != BUCKET_COUNT {
veilid_log!(inner warn "bucket count is different, not loading routing table");
return Ok(());
}
@ -665,7 +699,7 @@ impl RoutingTable {
fn queue_bucket_kicks(&self, node_ids: NodeIdGroup) {
for node_id in node_ids.iter() {
// Skip node ids we didn't add to buckets
if !VALID_CRYPTO_KINDS.contains(&node_id.kind) {
if !VALID_CRYPTO_KINDS.contains(&node_id.kind()) {
continue;
}
@ -677,7 +711,7 @@ impl RoutingTable {
/// Resolve an existing routing table entry using any crypto kind and return a reference to it
pub fn lookup_any_node_ref(&self, node_id_key: BareNodeId) -> EyreResult<Option<NodeRef>> {
self.inner.read().lookup_any_node_ref(node_id_key)
self.inner.read().lookup_bare_node_ref(node_id_key)
}
/// Resolve an existing routing table entry and return a reference to it
@ -748,22 +782,22 @@ impl RoutingTable {
{
let inner = self.inner.read();
for (k, _v) in &inner.recent_peers {
recent_peers.push(*k);
recent_peers.push(k.clone());
}
}
// look up each node and make sure the connection is still live
// (uses same logic as send_data, ensuring last_connection works for UDP)
for e in &recent_peers {
for node_id in &recent_peers {
let mut dead = true;
if let Ok(Some(nr)) = self.lookup_node_ref(*e) {
if let Ok(Some(nr)) = self.lookup_node_ref(node_id.clone()) {
if let Some(last_connection) = nr.last_flow() {
out.push((*e, RecentPeersEntry { last_connection }));
out.push((node_id.clone(), RecentPeersEntry { last_connection }));
dead = false;
}
}
if dead {
dead_peers.push(e);
dead_peers.push(node_id);
}
}

View file

@ -26,7 +26,7 @@ impl fmt::Debug for RouteHopData {
#[derive(Clone, Debug)]
pub(crate) enum RouteNode {
/// Route node is optimized, no contact method information as this node id has been seen before
BareNodeId(BareNodeId),
NodeId(NodeId),
/// Route node with full contact method information to ensure the peer is reachable
PeerInfo(Arc<PeerInfo>),
}
@ -34,20 +34,16 @@ pub(crate) enum RouteNode {
impl RouteNode {
pub fn validate(&self, crypto: &Crypto) -> VeilidAPIResult<()> {
match self {
RouteNode::BareNodeId(_) => Ok(()),
RouteNode::NodeId(_) => Ok(()),
RouteNode::PeerInfo(pi) => pi.validate(crypto),
}
}
pub fn node_ref(
&self,
routing_table: &RoutingTable,
crypto_kind: CryptoKind,
) -> Option<NodeRef> {
pub fn node_ref(&self, routing_table: &RoutingTable) -> Option<NodeRef> {
match self {
RouteNode::BareNodeId(id) => {
RouteNode::NodeId(id) => {
//
match routing_table.lookup_node_ref(NodeId::new(crypto_kind, *id)) {
match routing_table.lookup_node_ref(id.clone()) {
Ok(nr) => nr,
Err(e) => {
veilid_log!(routing_table debug "failed to look up route node: {}", e);
@ -68,15 +64,15 @@ impl RouteNode {
}
}
pub fn describe(&self, crypto_kind: CryptoKind) -> String {
pub fn describe(&self) -> String {
match self {
RouteNode::BareNodeId(id) => {
format!("{}", NodeId::new(crypto_kind, *id))
RouteNode::NodeId(id) => {
format!("{}", id)
}
RouteNode::PeerInfo(pi) => match pi.node_ids().get(crypto_kind) {
RouteNode::PeerInfo(pi) => match pi.node_ids().best() {
Some(id) => format!("{}", id),
None => {
format!("({})?{}", crypto_kind, pi.node_ids())
format!("?({})", pi.node_ids())
}
},
}
@ -122,7 +118,6 @@ impl PrivateRouteHops {
pub(crate) struct PrivateRoute {
/// The public key used for the entire route
pub public_key: PublicKey,
pub hop_count: u8,
pub hops: PrivateRouteHops,
}
@ -131,7 +126,6 @@ impl PrivateRoute {
pub fn new_stub(public_key: PublicKey, node: RouteNode) -> Self {
Self {
public_key,
hop_count: 1,
hops: PrivateRouteHops::FirstHop(Box::new(RouteHop {
node,
next_hop: None,
@ -153,7 +147,7 @@ impl PrivateRoute {
/// Get the crypto kind in use for this route
pub fn crypto_kind(&self) -> CryptoKind {
self.public_key.kind
self.public_key.kind()
}
/// Remove the first unencrypted hop if possible
@ -162,13 +156,6 @@ impl PrivateRoute {
PrivateRouteHops::FirstHop(first_hop) => {
let first_hop_node = first_hop.node.clone();
// Reduce hop count
if self.hop_count > 0 {
self.hop_count -= 1;
} else {
error!("hop count should not be 0 for first hop");
}
// Go to next hop
self.hops = match first_hop.next_hop.take() {
Some(rhd) => PrivateRouteHops::Data(rhd),
@ -189,8 +176,8 @@ impl PrivateRoute {
// Get the safety route to use from the spec
Some(match &pr_first_hop.node {
RouteNode::BareNodeId(n) => NodeId::new(self.public_key.kind, *n),
RouteNode::PeerInfo(p) => p.node_ids().get(self.public_key.kind).unwrap(),
RouteNode::NodeId(n) => n.clone(),
RouteNode::PeerInfo(p) => p.node_ids().get(self.public_key.kind()).unwrap(),
})
}
}
@ -199,9 +186,8 @@ impl fmt::Display for PrivateRoute {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"PR({:?}+{}{})",
"PR({:?}+{})",
self.public_key,
self.hop_count,
match &self.hops {
PrivateRouteHops::FirstHop(_) => {
format!(
@ -233,7 +219,6 @@ pub(crate) enum SafetyRouteHops {
#[derive(Clone, Debug)]
pub(crate) struct SafetyRoute {
pub public_key: PublicKey,
pub hop_count: u8,
pub hops: SafetyRouteHops,
}
@ -245,7 +230,6 @@ impl SafetyRoute {
assert!(matches!(private_route.hops, PrivateRouteHops::Data(_)));
Self {
public_key,
hop_count: 0,
hops: SafetyRouteHops::Private(private_route),
}
}
@ -257,7 +241,7 @@ impl SafetyRoute {
/// Get the crypto kind in use for this route
pub fn crypto_kind(&self) -> CryptoKind {
self.public_key.kind
self.public_key.kind()
}
}
@ -265,9 +249,8 @@ impl fmt::Display for SafetyRoute {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"SR({:?}+{}{})",
"SR({:?}+{})",
self.public_key,
self.hop_count,
match &self.hops {
SafetyRouteHops::Data(_) => "".to_owned(),
SafetyRouteHops::Private(p) => format!("->{}", p),

View file

@ -168,7 +168,7 @@ impl RouteSpecStore {
directions: DirectionSet,
avoid_nodes: &[NodeId],
automatic: bool,
) -> VeilidAPIResult<BareRouteId> {
) -> VeilidAPIResult<RouteId> {
let inner = &mut *self.inner.lock();
let routing_table = self.routing_table();
let rti = &mut *routing_table.inner.write();
@ -195,7 +195,7 @@ impl RouteSpecStore {
directions: DirectionSet,
avoid_nodes: &[NodeId],
automatic: bool,
) -> VeilidAPIResult<BareRouteId> {
) -> VeilidAPIResult<RouteId> {
use core::cmp::Ordering;
if safety_spec.preferred_route.is_some() {
@ -488,20 +488,21 @@ impl RouteSpecStore {
// Get the hop cache key for a particular route permutation
// uses the same algorithm as RouteSetSpecDetail::make_cache_key
let route_permutation_to_hop_cache =
|_rti: &RoutingTableInner, nodes: &[NodeRef], perm: &[usize]| -> Vec<u8> {
let mut cache: Vec<u8> = Vec::with_capacity(perm.len() * PUBLIC_KEY_LENGTH);
|_rti: &RoutingTableInner, nodes: &[NodeRef], perm: &[usize]| -> Option<Vec<u8>> {
let mut cachelen = 0usize;
let mut nodebytes = Vec::<BareNodeId>::with_capacity(perm.len());
for n in perm {
cache.extend_from_slice(
&nodes[*n]
.locked(rti)
.best_node_id()
.map(|bni| bni.value.bytes)
.unwrap_or_default(),
);
let b = nodes[*n].locked(rti).best_node_id()?.value();
cachelen += b.len();
nodebytes.push(b);
}
cache
let mut cache: Vec<u8> = Vec::with_capacity(cachelen);
for b in nodebytes {
cache.extend_from_slice(&b);
}
Some(cache)
};
let cache_key = route_permutation_to_hop_cache(rti, &nodes, permutation);
let cache_key = route_permutation_to_hop_cache(rti, &nodes, permutation)?;
// Skip routes we have already seen
if inner.cache.contains_route(&cache_key) {
@ -641,23 +642,16 @@ impl RouteSpecStore {
for crypto_kind in crypto_kinds.iter().copied() {
let vcrypto = crypto.get(crypto_kind).unwrap();
let keypair = vcrypto.generate_keypair();
let hops: Vec<BareNodeId> = route_nodes
let hops: Vec<NodeId> = route_nodes
.iter()
.map(|v| {
nodes[*v]
.locked(rti)
.node_ids()
.get(crypto_kind)
.unwrap()
.value
})
.map(|v| nodes[*v].locked(rti).node_ids().get(crypto_kind).unwrap())
.collect();
route_set.insert(
keypair.key,
keypair.key(),
RouteSpecDetail {
crypto_kind,
secret_key: keypair.secret,
secret_key: keypair.secret(),
hops,
},
);
@ -680,7 +674,7 @@ impl RouteSpecStore {
inner.cache.add_to_cache(rti, &rssd);
// Keep route in spec store
inner.content.add_detail(id, rssd);
inner.content.add_detail(id.clone(), rssd);
Ok(id)
}
@ -690,9 +684,9 @@ impl RouteSpecStore {
pub fn with_signature_validated_route<F, R>(
&self,
public_key: &PublicKey,
signatures: &[BareSignature],
signatures: &[Signature],
data: &[u8],
last_hop_id: BareNodeId,
last_hop_id: &NodeId,
callback: F,
) -> Option<R>
where
@ -701,21 +695,21 @@ impl RouteSpecStore {
{
let inner = &*self.inner.lock();
let crypto = self.crypto();
let Some(vcrypto) = crypto.get(public_key.kind) else {
let Some(vcrypto) = crypto.get(public_key.kind()) else {
veilid_log!(self debug "can't handle route with public key: {:?}", public_key);
return None;
};
let Some(rsid) = inner.content.get_id_by_key(&public_key.value) else {
veilid_log!(self debug target: "network_result", "route id does not exist: {:?}", public_key.value);
let Some(rsid) = inner.content.get_id_by_key(public_key.ref_value()) else {
veilid_log!(self debug target: "network_result", "route id does not exist: {:?}", public_key.ref_value());
return None;
};
let Some(rssd) = inner.content.get_detail(&rsid) else {
veilid_log!(self debug "route detail does not exist: {:?}", rsid);
return None;
};
let Some(rsd) = rssd.get_route_by_key(&public_key.value) else {
veilid_log!(self debug "route set {:?} does not have key: {:?}", rsid, public_key.value);
let Some(rsd) = rssd.get_route_by_key(public_key.ref_value()) else {
veilid_log!(self debug "route set {:?} does not have key: {:?}", rsid, public_key.ref_value());
return None;
};
@ -727,24 +721,28 @@ impl RouteSpecStore {
}
// Validate signatures to ensure the route was handled by the nodes and not messed with
// This is in private route (reverse) order as we are receiving over the route
for (hop_n, hop_public_key) in rsd.hops.iter().rev().enumerate() {
for (hop_n, hop_node_id) in rsd.hops.iter().rev().enumerate() {
// The last hop is not signed, as the whole packet is signed
if hop_n == signatures.len() {
// Verify the node we received the routed operation from is the last hop in our route
if *hop_public_key != last_hop_id {
veilid_log!(self debug "received routed operation from the wrong hop ({} should be {}) on private route {}", hop_public_key.encode(), last_hop_id.encode(), public_key);
if hop_node_id != last_hop_id {
veilid_log!(self debug "received routed operation from the wrong hop ({} should be {}) on private route {}", hop_node_id, last_hop_id, public_key);
return None;
}
} else {
// Verify a signature for a hop node along the route
match vcrypto.verify(&(*hop_public_key).into(), data, &signatures[hop_n]) {
match vcrypto.verify(
&hop_node_id.ref_value().clone().into(),
data,
signatures[hop_n].ref_value(),
) {
Ok(true) => {}
Ok(false) => {
veilid_log!(self debug "invalid signature for hop {} at {} on private route {}", hop_n, hop_public_key, public_key);
veilid_log!(self debug "invalid signature for hop {} at {} on private route {}", hop_n, hop_node_id, public_key);
return None;
}
Err(e) => {
veilid_log!(self debug "error verifying signature for hop {} at {} on private route {}: {}", hop_n, hop_public_key, public_key, e);
veilid_log!(self debug "error verifying signature for hop {} at {} on private route {}: {}", hop_n, hop_node_id, public_key, e);
return None;
}
}
@ -757,7 +755,7 @@ impl RouteSpecStore {
#[instrument(level = "trace", target = "route", skip(self), ret, err)]
async fn test_allocated_route(
&self,
private_route_id: BareRouteId,
private_route_id: RouteId,
) -> VeilidAPIResult<Option<bool>> {
// Make loopback route to test with
let (dest, hops) = {
@ -836,10 +834,7 @@ impl RouteSpecStore {
}
#[instrument(level = "trace", target = "route", skip(self), ret, err)]
async fn test_remote_route(
&self,
private_route_id: BareRouteId,
) -> VeilidAPIResult<Option<bool>> {
async fn test_remote_route(&self, private_route_id: RouteId) -> VeilidAPIResult<Option<bool>> {
// Make private route test
let dest = {
// Get the route to test
@ -882,7 +877,7 @@ impl RouteSpecStore {
/// Release an allocated route that is no longer in use
#[instrument(level = "trace", target = "route", skip(self), ret)]
fn release_allocated_route(&self, id: BareRouteId) -> bool {
fn release_allocated_route(&self, id: RouteId) -> bool {
let mut inner = self.inner.lock();
let Some(rssd) = inner.content.remove_detail(&id) else {
return false;
@ -899,7 +894,7 @@ impl RouteSpecStore {
}
/// Check if a route id is remote or not
pub fn is_route_id_remote(&self, id: &BareRouteId) -> bool {
pub fn is_route_id_remote(&self, id: &RouteId) -> bool {
let inner = &mut *self.inner.lock();
let cur_ts = Timestamp::now();
inner
@ -910,7 +905,7 @@ impl RouteSpecStore {
/// Test an allocated route for continuity
#[instrument(level = "trace", target = "route", skip(self), ret, err)]
pub async fn test_route(&self, id: BareRouteId) -> VeilidAPIResult<Option<bool>> {
pub async fn test_route(&self, id: RouteId) -> VeilidAPIResult<Option<bool>> {
let is_remote = self.is_route_id_remote(&id);
if is_remote {
self.test_remote_route(id).await
@ -921,7 +916,7 @@ impl RouteSpecStore {
/// Release an allocated or remote route that is no longer in use
#[instrument(level = "trace", target = "route", skip(self), ret)]
pub fn release_route(&self, id: BareRouteId) -> bool {
pub fn release_route(&self, id: RouteId) -> bool {
let is_remote = self.is_route_id_remote(&id);
if is_remote {
self.release_remote_private_route(id)
@ -943,7 +938,7 @@ impl RouteSpecStore {
sequencing: Sequencing,
directions: DirectionSet,
avoid_nodes: &[NodeId],
) -> Option<BareRouteId> {
) -> Option<RouteId> {
let cur_ts = Timestamp::now();
let mut routes = Vec::new();
@ -993,13 +988,13 @@ impl RouteSpecStore {
});
// Return the best one if we got one
routes.first().map(|r| *r.0)
routes.first().map(|r| r.0.clone())
}
/// List all allocated routes
pub fn list_allocated_routes<F, R>(&self, mut filter: F) -> Vec<R>
where
F: FnMut(&BareRouteId, &RouteSetSpecDetail) -> Option<R>,
F: FnMut(&RouteId, &RouteSetSpecDetail) -> Option<R>,
{
let inner = self.inner.lock();
let mut out = Vec::with_capacity(inner.content.get_detail_count());
@ -1014,7 +1009,7 @@ impl RouteSpecStore {
/// List all allocated routes
pub fn list_remote_routes<F, R>(&self, mut filter: F) -> Vec<R>
where
F: FnMut(&BareRouteId, &RemotePrivateRouteInfo) -> Option<R>,
F: FnMut(&RouteId, &RemotePrivateRouteInfo) -> Option<R>,
{
let inner = self.inner.lock();
let cur_ts = Timestamp::now();
@ -1031,7 +1026,7 @@ impl RouteSpecStore {
}
/// Get the debug description of a route
pub fn debug_route(&self, id: &BareRouteId) -> Option<String> {
pub fn debug_route(&self, id: &RouteId) -> Option<String> {
let inner = &mut *self.inner.lock();
let cur_ts = Timestamp::now();
if let Some(rpri) = inner.cache.peek_remote_private_route(cur_ts, id) {
@ -1046,7 +1041,7 @@ impl RouteSpecStore {
//////////////////////////////////////////////////////////////////////
/// Choose the best private route from a private route set to communicate with
pub fn best_remote_private_route(&self, id: &BareRouteId) -> Option<PrivateRoute> {
pub fn best_remote_private_route(&self, id: &RouteId) -> Option<PrivateRoute> {
let inner = &mut *self.inner.lock();
let cur_ts = Timestamp::now();
let rpri = inner.cache.get_remote_private_route(cur_ts, id)?;
@ -1075,18 +1070,8 @@ impl RouteSpecStore {
let Some(vcrypto) = crypto.get(crypto_kind) else {
apibail_generic!("crypto not supported for route");
};
let pr_pubkey = private_route.public_key.value;
let pr_hopcount = private_route.hop_count as usize;
let max_route_hop_count = self.max_route_hop_count;
let pr_pubkey = private_route.public_key.value();
// Check private route hop count isn't larger than the max route hop count plus one for the 'first hop' header
if pr_hopcount > (max_route_hop_count + 1) {
apibail_invalid_argument!(
"private route hop count too long",
"private_route.hop_count",
pr_hopcount
);
}
// See if we are using a safety route, if not, short circuit this operation
let safety_spec = match safety_selection {
// Safety route spec to use
@ -1098,9 +1083,9 @@ impl RouteSpecStore {
};
let opt_first_hop = match pr_first_hop_node {
RouteNode::BareNodeId(id) => rti
.lookup_node_ref(NodeId::new(crypto_kind, id))
.map_err(VeilidAPIError::internal)?,
RouteNode::NodeId(id) => {
rti.lookup_node_ref(id).map_err(VeilidAPIError::internal)?
}
RouteNode::PeerInfo(pi) => Some(
rti.register_node_with_peer_info(pi, false)
.map_err(VeilidAPIError::internal)?
@ -1139,7 +1124,7 @@ impl RouteSpecStore {
&& safety_spec.preferred_route == opt_private_route_id
{
// Private route is also safety route during loopback test
pr_pubkey
pr_pubkey.clone()
} else {
let Some(avoid_node_id) = private_route.first_hop_node_id() else {
apibail_generic!("compiled private route should have first hop");
@ -1181,13 +1166,13 @@ impl RouteSpecStore {
.merge_filter(NodeRefFilter::new().with_routing_domain(RoutingDomain::PublicInternet));
// Get the safety route secret key
let secret = safety_rsd.secret_key;
let secret = safety_rsd.secret_key.clone();
// See if we have a cached route we can use
if optimize {
if let Some(safety_route) = inner
.cache
.lookup_compiled_route_cache(sr_pubkey, pr_pubkey)
.lookup_compiled_route_cache(sr_pubkey.clone(), pr_pubkey.clone())
{
// Build compiled route
let compiled_route = CompiledRoute {
@ -1227,7 +1212,7 @@ impl RouteSpecStore {
blob_data = {
// Encrypt the previous blob ENC(nonce, DH(PKhop,SKsr))
let dh_secret = vcrypto
.cached_dh(&safety_rsd.hops[h].into(), &safety_rsd.secret_key)
.cached_dh(&safety_rsd.hops[h].value().into(), &safety_rsd.secret_key)
.map_err(VeilidAPIError::internal)?;
let enc_msg_data = vcrypto
.encrypt_aead(blob_data.as_slice(), &nonce, &dh_secret, None)
@ -1243,10 +1228,10 @@ impl RouteSpecStore {
let route_hop = RouteHop {
node: if optimize {
// Optimized, no peer info, just the dht key
RouteNode::BareNodeId(safety_rsd.hops[h])
RouteNode::NodeId(safety_rsd.hops[h].clone())
} else {
// Full peer info, required until we are sure the route has been fully established
let node_id = NodeId::new(safety_rsd.crypto_kind, safety_rsd.hops[h]);
let node_id = safety_rsd.hops[h].clone();
let pi = rti
.with_node_entry(node_id, |entry| {
entry.with(rti, |_rti, e| {
@ -1279,7 +1264,7 @@ impl RouteSpecStore {
// Encode first RouteHopData
let dh_secret = vcrypto
.cached_dh(&safety_rsd.hops[0].into(), &safety_rsd.secret_key)
.cached_dh(&safety_rsd.hops[0].value().into(), &safety_rsd.secret_key)
.map_err(VeilidAPIError::internal)?;
let enc_msg_data = vcrypto
.encrypt_aead(blob_data.as_slice(), &nonce, &dh_secret, None)
@ -1295,8 +1280,7 @@ impl RouteSpecStore {
// Build safety route
let safety_route = SafetyRoute {
public_key: PublicKey::new(crypto_kind, sr_pubkey),
hop_count: safety_spec.hop_count as u8,
public_key: PublicKey::new(crypto_kind, sr_pubkey.clone()),
hops,
};
@ -1304,7 +1288,7 @@ impl RouteSpecStore {
if optimize {
inner
.cache
.add_to_compiled_route_cache(pr_pubkey, safety_route.clone());
.add_to_compiled_route_cache(pr_pubkey.clone(), safety_route.clone());
}
// Build compiled route
@ -1348,13 +1332,13 @@ impl RouteSpecStore {
}
// See if the preferred route is here
if let Some(preferred_route) = safety_spec.preferred_route {
if let Some(preferred_rssd) = inner.content.get_detail(&preferred_route) {
if let Some(preferred_route) = &safety_spec.preferred_route {
if let Some(preferred_rssd) = inner.content.get_detail(preferred_route) {
// Only use the preferred route if it has the desired crypto kind
if let Some(preferred_key) = preferred_rssd.get_route_set_keys().get(crypto_kind) {
// Only use the preferred route if it doesn't contain the avoid nodes
if !preferred_rssd.contains_nodes(avoid_nodes) {
return Ok(preferred_key.value);
return Ok(preferred_key.value());
}
}
}
@ -1393,7 +1377,7 @@ impl RouteSpecStore {
.get_route_set_keys()
.get(crypto_kind)
.unwrap()
.value;
.value();
Ok(sr_pubkey)
}
@ -1455,7 +1439,7 @@ impl RouteSpecStore {
rsd.crypto_kind
);
};
RouteNode::BareNodeId(node_id.value)
RouteNode::NodeId(node_id)
} else {
RouteNode::PeerInfo(published_peer_info)
},
@ -1476,7 +1460,7 @@ impl RouteSpecStore {
};
// Encrypt the previous blob ENC(nonce, DH(PKhop,SKpr))
let dh_secret = vcrypto.cached_dh(&rsd.hops[h].into(), &rsd.secret_key)?;
let dh_secret = vcrypto.cached_dh(&rsd.hops[h].value().into(), &rsd.secret_key)?;
let enc_msg_data =
vcrypto.encrypt_aead(blob_data.as_slice(), &nonce, &dh_secret, None)?;
let route_hop_data = RouteHopData {
@ -1487,10 +1471,10 @@ impl RouteSpecStore {
route_hop = RouteHop {
node: if optimized {
// Optimized, no peer info, just the dht key
RouteNode::BareNodeId(rsd.hops[h])
RouteNode::NodeId(rsd.hops[h].clone())
} else {
// Full peer info, required until we are sure the route has been fully established
let node_id = NodeId::new(rsd.crypto_kind, rsd.hops[h]);
let node_id = rsd.hops[h].clone();
let pi = rti
.with_node_entry(node_id, |entry| {
entry.with(rti, |_rti, e| {
@ -1508,9 +1492,7 @@ impl RouteSpecStore {
}
let private_route = PrivateRoute {
public_key: PublicKey::new(rsd.crypto_kind, *key),
// add hop for 'FirstHop'
hop_count: (hop_count + 1).try_into().unwrap(),
public_key: PublicKey::new(rsd.crypto_kind, key.clone()),
hops: PrivateRouteHops::FirstHop(Box::new(route_hop)),
};
Ok(private_route)
@ -1549,7 +1531,7 @@ impl RouteSpecStore {
#[instrument(level = "trace", target = "route", skip_all)]
pub fn assemble_private_routes(
&self,
id: &BareRouteId,
id: &RouteId,
optimized: Option<bool>,
) -> VeilidAPIResult<Vec<PrivateRoute>> {
let inner = &*self.inner.lock();
@ -1572,7 +1554,7 @@ impl RouteSpecStore {
/// It is safe to import the same route more than once and it will return the same route id
/// Returns a route set id
#[instrument(level = "trace", target = "route", skip_all)]
pub fn import_remote_private_route_blob(&self, blob: Vec<u8>) -> VeilidAPIResult<BareRouteId> {
pub fn import_remote_private_route_blob(&self, blob: Vec<u8>) -> VeilidAPIResult<RouteId> {
let cur_ts = Timestamp::now();
// decode the pr blob
@ -1597,7 +1579,7 @@ impl RouteSpecStore {
inner
.cache
.cache_remote_private_route(cur_ts, id, private_routes);
.cache_remote_private_route(cur_ts, id.clone(), private_routes);
Ok(id)
}
@ -1609,7 +1591,7 @@ impl RouteSpecStore {
pub fn add_remote_private_route(
&self,
private_route: PrivateRoute,
) -> VeilidAPIResult<BareRouteId> {
) -> VeilidAPIResult<RouteId> {
let cur_ts = Timestamp::now();
// Make a single route set
@ -1634,20 +1616,20 @@ impl RouteSpecStore {
inner
.cache
.cache_remote_private_route(cur_ts, id, private_routes);
.cache_remote_private_route(cur_ts, id.clone(), private_routes);
Ok(id)
}
/// Release a remote private route that is no longer in use
#[instrument(level = "trace", target = "route", skip_all)]
pub fn release_remote_private_route(&self, id: BareRouteId) -> bool {
pub fn release_remote_private_route(&self, id: RouteId) -> bool {
let inner = &mut *self.inner.lock();
inner.cache.remove_remote_private_route(id)
}
/// Get a route id for a route's public key
pub fn get_route_id_for_key(&self, key: &BarePublicKey) -> Option<BareRouteId> {
pub fn get_route_id_for_key(&self, key: &BarePublicKey) -> Option<RouteId> {
let inner = &mut *self.inner.lock();
// Check for local route
if let Some(id) = inner.content.get_id_by_key(key) {
@ -1740,7 +1722,10 @@ impl RouteSpecStore {
let inner = &mut *self.inner.lock();
// Check for stub route
if self.routing_table().matches_own_node_id_key(&(*key).into()) {
if self
.routing_table()
.matches_own_node_id_key(&key.clone().into())
{
return None;
}
@ -1778,7 +1763,7 @@ impl RouteSpecStore {
/// Mark route as published
/// When first deserialized, routes must be re-published in order to ensure they remain
/// in the RouteSpecStore.
pub fn mark_route_published(&self, id: &BareRouteId, published: bool) -> VeilidAPIResult<()> {
pub fn mark_route_published(&self, id: &RouteId, published: bool) -> VeilidAPIResult<()> {
let inner = &mut *self.inner.lock();
let Some(rssd) = inner.content.get_detail_mut(id) else {
apibail_invalid_target!("route does not exist");
@ -1886,55 +1871,67 @@ impl RouteSpecStore {
Ok(out)
}
/// Generate BareRouteId from typed key set of route public keys
fn generate_allocated_route_id(
&self,
rssd: &RouteSetSpecDetail,
) -> VeilidAPIResult<BareRouteId> {
/// Generate RouteId from typed key set of route public keys
fn generate_allocated_route_id(&self, rssd: &RouteSetSpecDetail) -> VeilidAPIResult<RouteId> {
let route_set_keys = rssd.get_route_set_keys();
let crypto = self.crypto();
let mut idbytes = Vec::with_capacity(PUBLIC_KEY_LENGTH * route_set_keys.len());
let pkbyteslen = route_set_keys
.iter()
.fold(0, |acc, x| acc + x.ref_value().len());
let mut pkbytes = Vec::with_capacity(pkbyteslen);
let mut best_kind: Option<CryptoKind> = None;
for tk in route_set_keys.iter() {
if best_kind.is_none()
|| compare_crypto_kind(&tk.kind, best_kind.as_ref().unwrap()) == cmp::Ordering::Less
|| compare_crypto_kind(&tk.kind(), best_kind.as_ref().unwrap())
== cmp::Ordering::Less
{
best_kind = Some(tk.kind);
best_kind = Some(tk.kind());
}
idbytes.extend_from_slice(&tk.value.bytes);
pkbytes.extend_from_slice(tk.ref_value());
}
let Some(best_kind) = best_kind else {
apibail_internal!("no compatible crypto kinds in route");
};
let vcrypto = crypto.get(best_kind).unwrap();
Ok(BareRouteId::new(vcrypto.generate_hash(&idbytes).bytes))
Ok(RouteId::new(
vcrypto.kind(),
BareRouteId::new(vcrypto.generate_hash(&pkbytes).bytes()),
))
}
/// Generate BareRouteId from set of private routes
/// Generate RouteId from set of private routes
fn generate_remote_route_id(
&self,
private_routes: &[PrivateRoute],
) -> VeilidAPIResult<BareRouteId> {
) -> VeilidAPIResult<RouteId> {
let crypto = self.crypto();
let mut idbytes = Vec::with_capacity(PUBLIC_KEY_LENGTH * private_routes.len());
let pkbyteslen = private_routes
.iter()
.fold(0, |acc, x| acc + x.public_key.ref_value().len());
let mut pkbytes = Vec::with_capacity(pkbyteslen);
let mut best_kind: Option<CryptoKind> = None;
for private_route in private_routes {
if best_kind.is_none()
|| compare_crypto_kind(&private_route.public_key.kind, best_kind.as_ref().unwrap())
== cmp::Ordering::Less
|| compare_crypto_kind(
&private_route.public_key.kind(),
best_kind.as_ref().unwrap(),
) == cmp::Ordering::Less
{
best_kind = Some(private_route.public_key.kind);
best_kind = Some(private_route.public_key.kind());
}
idbytes.extend_from_slice(&private_route.public_key.value.bytes);
pkbytes.extend_from_slice(private_route.public_key.ref_value());
}
let Some(best_kind) = best_kind else {
apibail_internal!("no compatible crypto kinds in route");
};
let vcrypto = crypto.get(best_kind).unwrap();
Ok(BareRouteId::new(vcrypto.generate_hash(&idbytes).bytes))
Ok(RouteId::new(
vcrypto.kind(),
BareRouteId::new(vcrypto.generate_hash(&pkbytes).bytes()),
))
}
}

View file

@ -35,7 +35,7 @@ impl RemotePrivateRouteInfo {
acc
}
})
.filter(|x| VALID_CRYPTO_KINDS.contains(&x.public_key.kind))
.filter(|x| VALID_CRYPTO_KINDS.contains(&x.public_key.kind()))
.cloned()
}
pub fn get_stats(&self) -> &RouteStats {

View file

@ -7,7 +7,7 @@ pub struct RouteSpecDetail {
/// Secret key
pub secret_key: BareSecretKey,
/// Route hops (node id keys)
pub hops: Vec<BareNodeId>,
pub hops: Vec<NodeId>,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
@ -60,12 +60,12 @@ impl RouteSetSpecDetail {
pub fn get_route_set_keys(&self) -> PublicKeyGroup {
let mut tks = PublicKeyGroup::new();
for (k, v) in &self.route_set {
tks.add(PublicKey::new(v.crypto_kind, *k));
tks.add(PublicKey::new(v.crypto_kind, k.clone()));
}
tks
}
pub fn get_best_route_set_key(&self) -> Option<BarePublicKey> {
self.get_route_set_keys().best().map(|k| k.value)
self.get_route_set_keys().best().map(|k| k.value())
}
pub fn set_hop_node_refs(&mut self, node_refs: Vec<NodeRef>) {
self.hop_node_refs = node_refs;
@ -112,7 +112,7 @@ impl RouteSetSpecDetail {
pub fn contains_nodes(&self, nodes: &[NodeId]) -> bool {
for tk in nodes {
for rsd in self.route_set.values() {
if rsd.crypto_kind == tk.kind && rsd.hops.contains(&tk.value) {
if rsd.crypto_kind == tk.kind() && rsd.hops.contains(tk) {
return true;
}
}
@ -124,17 +124,21 @@ impl RouteSetSpecDetail {
}
/// Generate a key for the cache that can be used to uniquely identify this route's contents
pub fn make_cache_key(&self, rti: &RoutingTableInner) -> Vec<u8> {
pub fn make_cache_key(&self, rti: &RoutingTableInner) -> Option<Vec<u8>> {
let hops = &self.hop_node_refs;
let mut cache: Vec<u8> = Vec::with_capacity(hops.len() * PUBLIC_KEY_LENGTH);
let mut cachelen = 0usize;
let mut nodebytes = Vec::<BareNodeId>::with_capacity(hops.len());
for hop in hops {
cache.extend_from_slice(
&hop.locked(rti)
.best_node_id()
.map(|bni| bni.value.bytes)
.unwrap_or_default(),
);
let b = hop.locked(rti).best_node_id()?.value();
cachelen += b.len();
nodebytes.push(b);
}
cache
let mut cache: Vec<u8> = Vec::with_capacity(cachelen);
for b in nodebytes {
cache.extend_from_slice(&b);
}
Some(cache)
}
}

View file

@ -26,21 +26,21 @@ pub struct RouteSpecStoreCache {
/// Registry accessor
registry: VeilidComponentRegistry,
/// How many times nodes have been used
used_nodes: HashMap<BareNodeId, usize>,
used_nodes: HashMap<NodeId, usize>,
/// How many times nodes have been used at the terminal point of a route
used_end_nodes: HashMap<BareNodeId, usize>,
used_end_nodes: HashMap<NodeId, usize>,
/// Route spec hop cache, used to quickly disqualify routes
hop_cache: HashSet<Vec<u8>>,
/// Remote private routes we've imported and statistics
remote_private_route_set_cache: LruCache<BareRouteId, RemotePrivateRouteInfo>,
remote_private_route_set_cache: LruCache<RouteId, RemotePrivateRouteInfo>,
/// Remote private route ids indexed by route's public key
remote_private_routes_by_key: HashMap<BarePublicKey, BareRouteId>,
remote_private_routes_by_key: HashMap<BarePublicKey, RouteId>,
/// Compiled route cache
compiled_route_cache: LruCache<CompiledRouteCacheKey, SafetyRoute>,
/// List of dead allocated routes
dead_routes: Vec<BareRouteId>,
dead_routes: Vec<RouteId>,
/// List of dead remote routes
dead_remote_routes: Vec<BareRouteId>,
dead_remote_routes: Vec<RouteId>,
}
impl_veilid_component_registry_accessor!(RouteSpecStoreCache);
@ -62,19 +62,21 @@ impl RouteSpecStoreCache {
/// add an allocated route set to our cache via its cache key
pub fn add_to_cache(&mut self, rti: &RoutingTableInner, rssd: &RouteSetSpecDetail) {
let cache_key = rssd.make_cache_key(rti);
let Some(cache_key) = rssd.make_cache_key(rti) else {
panic!("all routes should have a cache key");
};
if !self.hop_cache.insert(cache_key) {
panic!("route should never be inserted twice");
}
for (_pk, rsd) in rssd.iter_route_set() {
for h in &rsd.hops {
self.used_nodes
.entry(*h)
.entry(h.clone())
.and_modify(|e| *e += 1)
.or_insert(1);
}
self.used_end_nodes
.entry(*rsd.hops.last().unwrap())
.entry(rsd.hops.last().unwrap().clone())
.and_modify(|e| *e += 1)
.or_insert(1);
}
@ -89,10 +91,12 @@ impl RouteSpecStoreCache {
pub fn remove_from_cache(
&mut self,
rti: &RoutingTableInner,
id: BareRouteId,
id: RouteId,
rssd: &RouteSetSpecDetail,
) -> bool {
let cache_key = rssd.make_cache_key(rti);
let Some(cache_key) = rssd.make_cache_key(rti) else {
panic!("all routes should have a cache key");
};
// Remove from hop cache
if !self.hop_cache.remove(&cache_key) {
@ -101,7 +105,7 @@ impl RouteSpecStoreCache {
for (pk, rsd) in rssd.iter_route_set() {
for h in &rsd.hops {
// Remove from used nodes cache
match self.used_nodes.entry(*h) {
match self.used_nodes.entry(h.clone()) {
std::collections::hash_map::Entry::Occupied(mut o) => {
*o.get_mut() -= 1;
if *o.get() == 0 {
@ -114,7 +118,7 @@ impl RouteSpecStoreCache {
}
}
// Remove from end nodes cache
match self.used_end_nodes.entry(*rsd.hops.last().unwrap()) {
match self.used_end_nodes.entry(rsd.hops.last().cloned().unwrap()) {
std::collections::hash_map::Entry::Occupied(mut o) => {
*o.get_mut() -= 1;
if *o.get() == 0 {
@ -141,27 +145,23 @@ impl RouteSpecStoreCache {
/// calculate how many times a node with a particular node id set has been used anywhere in the path of our allocated routes
pub fn get_used_node_count(&self, node_ids: &NodeIdGroup) -> usize {
node_ids.iter().fold(0usize, |acc, k| {
acc + self.used_nodes.get(&k.value).cloned().unwrap_or_default()
acc + self.used_nodes.get(k).cloned().unwrap_or_default()
})
}
/// calculate how many times a node with a particular node id set has been used at the end of the path of our allocated routes
pub fn get_used_end_node_count(&self, node_ids: &NodeIdGroup) -> usize {
node_ids.iter().fold(0usize, |acc, k| {
acc + self
.used_end_nodes
.get(&k.value)
.cloned()
.unwrap_or_default()
acc + self.used_end_nodes.get(k).cloned().unwrap_or_default()
})
}
/// add remote private route to caches
fn add_remote_private_route(&mut self, id: BareRouteId, rprinfo: RemotePrivateRouteInfo) {
fn add_remote_private_route(&mut self, id: RouteId, rprinfo: RemotePrivateRouteInfo) {
// also store in id by key table
for private_route in rprinfo.get_private_routes() {
self.remote_private_routes_by_key
.insert(private_route.public_key.value, id);
.insert(private_route.public_key.value(), id.clone());
}
let mut dead = None;
@ -179,21 +179,21 @@ impl RouteSpecStoreCache {
for dead_private_route in dead_rpri.get_private_routes() {
let _ = self
.remote_private_routes_by_key
.remove(&dead_private_route.public_key.value)
.remove(dead_private_route.public_key.ref_value())
.unwrap();
self.invalidate_compiled_route_cache(&dead_private_route.public_key.value);
self.invalidate_compiled_route_cache(dead_private_route.public_key.ref_value());
}
self.dead_remote_routes.push(dead_id);
}
}
/// iterate all of the remote private routes we have in the cache
pub fn get_remote_private_route_ids(&self, cur_ts: Timestamp) -> Vec<BareRouteId> {
pub fn get_remote_private_route_ids(&self, cur_ts: Timestamp) -> Vec<RouteId> {
self.remote_private_route_set_cache
.iter()
.filter_map(|(id, rpri)| {
if !rpri.did_expire(cur_ts) {
Some(*id)
Some(id.clone())
} else {
None
}
@ -206,7 +206,7 @@ impl RouteSpecStoreCache {
pub fn get_remote_private_route(
&mut self,
cur_ts: Timestamp,
id: &BareRouteId,
id: &RouteId,
) -> Option<&RemotePrivateRouteInfo> {
if let Some(rpri) = self.remote_private_route_set_cache.get_mut(id) {
if !rpri.did_expire(cur_ts) {
@ -222,7 +222,7 @@ impl RouteSpecStoreCache {
pub fn get_remote_private_route_mut(
&mut self,
cur_ts: Timestamp,
id: &BareRouteId,
id: &RouteId,
) -> Option<&mut RemotePrivateRouteInfo> {
if let Some(rpri) = self.remote_private_route_set_cache.get_mut(id) {
if !rpri.did_expire(cur_ts) {
@ -238,7 +238,7 @@ impl RouteSpecStoreCache {
pub fn peek_remote_private_route(
&self,
cur_ts: Timestamp,
id: &BareRouteId,
id: &RouteId,
) -> Option<&RemotePrivateRouteInfo> {
if let Some(rpri) = self.remote_private_route_set_cache.peek(id) {
if !rpri.did_expire(cur_ts) {
@ -253,7 +253,7 @@ impl RouteSpecStoreCache {
pub fn peek_remote_private_route_mut(
&mut self,
cur_ts: Timestamp,
id: &BareRouteId,
id: &RouteId,
) -> Option<&mut RemotePrivateRouteInfo> {
if let Some(rpri) = self.remote_private_route_set_cache.peek_mut(id) {
if !rpri.did_expire(cur_ts) {
@ -265,7 +265,7 @@ impl RouteSpecStoreCache {
}
/// look up a remote private route id by one of the route public keys
pub fn get_remote_private_route_id_by_key(&self, key: &BarePublicKey) -> Option<BareRouteId> {
pub fn get_remote_private_route_id_by_key(&self, key: &BarePublicKey) -> Option<RouteId> {
self.remote_private_routes_by_key.get(key).cloned()
}
@ -276,7 +276,7 @@ impl RouteSpecStoreCache {
pub fn cache_remote_private_route(
&mut self,
cur_ts: Timestamp,
id: BareRouteId,
id: RouteId,
private_routes: Vec<PrivateRoute>,
) {
// get id for this route set
@ -292,7 +292,7 @@ impl RouteSpecStoreCache {
// New remote private route cache entry
let rpri = RemotePrivateRouteInfo::new(private_routes, cur_ts);
self.add_remote_private_route(id, rpri);
self.add_remote_private_route(id.clone(), rpri);
if self.peek_remote_private_route_mut(cur_ts, &id).is_none() {
panic!("remote private route should exist");
};
@ -300,16 +300,16 @@ impl RouteSpecStoreCache {
}
/// remove a remote private route from the cache
pub fn remove_remote_private_route(&mut self, id: BareRouteId) -> bool {
pub fn remove_remote_private_route(&mut self, id: RouteId) -> bool {
let Some(rprinfo) = self.remote_private_route_set_cache.remove(&id) else {
return false;
};
for private_route in rprinfo.get_private_routes() {
let _ = self
.remote_private_routes_by_key
.remove(&private_route.public_key.value)
.remove(private_route.public_key.ref_value())
.unwrap();
self.invalidate_compiled_route_cache(&private_route.public_key.value);
self.invalidate_compiled_route_cache(private_route.public_key.ref_value());
}
self.dead_remote_routes.push(id);
true
@ -322,8 +322,8 @@ impl RouteSpecStoreCache {
safety_route: SafetyRoute,
) {
let key = CompiledRouteCacheKey {
sr_pubkey: safety_route.public_key.value,
pr_pubkey,
sr_pubkey: safety_route.public_key.value(),
pr_pubkey: pr_pubkey.clone(),
};
if let Some(v) = self.compiled_route_cache.insert(key, safety_route) {
@ -358,7 +358,7 @@ impl RouteSpecStoreCache {
}
/// Take the dead local and remote routes so we can update clients
pub fn take_dead_routes(&mut self) -> Option<(Vec<BareRouteId>, Vec<BareRouteId>)> {
pub fn take_dead_routes(&mut self) -> Option<(Vec<RouteId>, Vec<RouteId>)> {
if self.dead_routes.is_empty() && self.dead_remote_routes.is_empty() {
// Nothing to do
return None;

View file

@ -4,9 +4,9 @@ use super::*;
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub(super) struct RouteSpecStoreContent {
/// All of the route sets we have allocated so far indexed by key (many to one)
id_by_key: HashMap<BarePublicKey, BareRouteId>,
id_by_key: HashMap<BarePublicKey, RouteId>,
/// All of the route sets we have allocated so far
details: HashMap<BareRouteId, RouteSetSpecDetail>,
details: HashMap<RouteId, RouteSetSpecDetail>,
}
impl RouteSpecStoreContent {
@ -24,19 +24,18 @@ impl RouteSpecStoreContent {
for (rsid, rssd) in content.details.iter_mut() {
// Get best route since they all should resolve
let Some(pk) = rssd.get_best_route_set_key() else {
dead_ids.push(*rsid);
dead_ids.push(rsid.clone());
continue;
};
let Some(rsd) = rssd.get_route_by_key(&pk) else {
dead_ids.push(*rsid);
dead_ids.push(rsid.clone());
continue;
};
// Go through best route and resolve noderefs
let mut hop_node_refs = Vec::with_capacity(rsd.hops.len());
for h in &rsd.hops {
let Ok(Some(nr)) = routing_table.lookup_node_ref(NodeId::new(rsd.crypto_kind, *h))
else {
dead_ids.push(*rsid);
let Ok(Some(nr)) = routing_table.lookup_node_ref(h.clone()) else {
dead_ids.push(rsid.clone());
break;
};
hop_node_refs.push(nr);
@ -62,16 +61,16 @@ impl RouteSpecStoreContent {
Ok(())
}
pub fn add_detail(&mut self, id: BareRouteId, detail: RouteSetSpecDetail) {
pub fn add_detail(&mut self, id: RouteId, detail: RouteSetSpecDetail) {
assert!(!self.details.contains_key(&id));
// also store in id by key table
for (pk, _) in detail.iter_route_set() {
self.id_by_key.insert(*pk, id);
self.id_by_key.insert(pk.clone(), id.clone());
}
self.details.insert(id, detail);
}
pub fn remove_detail(&mut self, id: &BareRouteId) -> Option<RouteSetSpecDetail> {
pub fn remove_detail(&mut self, id: &RouteId) -> Option<RouteSetSpecDetail> {
let detail = self.details.remove(id)?;
for (pk, _) in detail.iter_route_set() {
let _ = self.id_by_key.remove(pk).unwrap();
@ -81,21 +80,19 @@ impl RouteSpecStoreContent {
pub fn get_detail_count(&self) -> usize {
self.details.len()
}
pub fn get_detail(&self, id: &BareRouteId) -> Option<&RouteSetSpecDetail> {
pub fn get_detail(&self, id: &RouteId) -> Option<&RouteSetSpecDetail> {
self.details.get(id)
}
pub fn get_detail_mut(&mut self, id: &BareRouteId) -> Option<&mut RouteSetSpecDetail> {
pub fn get_detail_mut(&mut self, id: &RouteId) -> Option<&mut RouteSetSpecDetail> {
self.details.get_mut(id)
}
pub fn get_id_by_key(&self, key: &BarePublicKey) -> Option<BareRouteId> {
pub fn get_id_by_key(&self, key: &BarePublicKey) -> Option<RouteId> {
self.id_by_key.get(key).cloned()
}
// pub fn iter_ids(&self) -> std::collections::hash_map::Keys<BareRouteId, RouteSetSpecDetail> {
// pub fn iter_ids(&self) -> std::collections::hash_map::Keys<RouteId, RouteSetSpecDetail> {
// self.details.keys()
// }
pub fn iter_details(
&self,
) -> std::collections::hash_map::Iter<BareRouteId, RouteSetSpecDetail> {
pub fn iter_details(&self) -> std::collections::hash_map::Iter<RouteId, RouteSetSpecDetail> {
self.details.iter()
}

View file

@ -342,8 +342,8 @@ impl RoutingTableInner {
// Size the buckets (one per bit), one bucket set per crypto kind
self.buckets.clear();
for ck in VALID_CRYPTO_KINDS {
let mut ckbuckets = Vec::with_capacity(PUBLIC_KEY_LENGTH * 8);
for _ in 0..PUBLIC_KEY_LENGTH * 8 {
let mut ckbuckets = Vec::with_capacity(BUCKET_COUNT);
for _ in 0..BUCKET_COUNT {
let bucket = Bucket::new(self.registry(), ck);
ckbuckets.push(bucket);
}
@ -666,7 +666,7 @@ impl RoutingTableInner {
let mut old_peer_infos = vec![];
for node_id in node_ids {
let ck = node_id.kind;
let ck = node_id.kind();
let is_existing_node_id = existing_node_ids.contains(node_id);
existing_node_ids.remove(ck);
@ -686,12 +686,12 @@ impl RoutingTableInner {
}
// Add new node id to entry
if let Some(old_node_id) = e.add_node_id(*node_id)? {
if let Some(old_node_id) = e.add_node_id(node_id.clone())? {
// Remove any old node id for this crypto kind
if VALID_CRYPTO_KINDS.contains(&ck) {
let bucket_index = routing_table.calculate_bucket_index(&old_node_id);
let bucket = self.get_bucket_mut(bucket_index);
bucket.remove_entry(&old_node_id.value);
bucket.remove_entry(old_node_id.ref_value());
routing_table.kick_queue.lock().insert(bucket_index);
}
}
@ -700,7 +700,7 @@ impl RoutingTableInner {
if VALID_CRYPTO_KINDS.contains(&ck) {
let bucket_index = routing_table.calculate_bucket_index(node_id);
let bucket = self.get_bucket_mut(bucket_index);
bucket.add_existing_entry(node_id.value, entry.clone());
bucket.add_existing_entry(node_id.value(), entry.clone());
// Kick bucket
routing_table.kick_queue.lock().insert(bucket_index);
@ -709,11 +709,11 @@ impl RoutingTableInner {
// Remove from buckets if node id wasn't seen in new peer info list
for node_id in existing_node_ids.iter() {
let ck = node_id.kind;
let ck = node_id.kind();
if VALID_CRYPTO_KINDS.contains(&ck) {
let bucket_index = routing_table.calculate_bucket_index(node_id);
let bucket = self.get_bucket_mut(bucket_index);
bucket.remove_entry(&node_id.value);
bucket.remove_entry(node_id.ref_value());
entry.with_mut_inner(|e| e.remove_node_id(ck));
}
}
@ -763,15 +763,15 @@ impl RoutingTableInner {
let mut supported_node_ids = NodeIdGroup::new();
for node_id in node_ids.iter() {
// Ignore node ids we don't support
if !VALID_CRYPTO_KINDS.contains(&node_id.kind) {
if !VALID_CRYPTO_KINDS.contains(&node_id.kind()) {
continue;
}
supported_node_ids.add(*node_id);
supported_node_ids.add(node_id.clone());
// Find the first in crypto sort order
let bucket_index = routing_table.calculate_bucket_index(node_id);
let bucket = self.get_bucket(bucket_index);
if let Some(entry) = bucket.entry(&node_id.value) {
if let Some(entry) = bucket.entry(node_id.ref_value()) {
// Best entry is the first one in sorted order that exists from the node id list
// Everything else that matches will be overwritten in the bucket and the
// existing noderefs will eventually unref and drop the old unindexed bucketentry
@ -806,10 +806,10 @@ impl RoutingTableInner {
}
// If no entry exists yet, add the first entry to a bucket, possibly evicting a bucket member
let first_node_id = supported_node_ids[0];
let first_node_id = supported_node_ids[0].clone();
let bucket_entry = routing_table.calculate_bucket_index(&first_node_id);
let bucket = self.get_bucket_mut(bucket_entry);
let new_entry = bucket.add_new_entry(first_node_id.value);
let new_entry = bucket.add_new_entry(first_node_id.value());
self.all_entries.insert(new_entry.clone());
routing_table.kick_queue.lock().insert(bucket_entry);
@ -832,9 +832,9 @@ impl RoutingTableInner {
/// Resolve an existing routing table entry using any crypto kind and return a reference to it
#[instrument(level = "trace", skip_all, err)]
pub fn lookup_any_node_ref(&self, node_id_key: BareNodeId) -> EyreResult<Option<NodeRef>> {
pub fn lookup_bare_node_ref(&self, node_id_key: BareNodeId) -> EyreResult<Option<NodeRef>> {
for ck in VALID_CRYPTO_KINDS {
if let Some(nr) = self.lookup_node_ref(NodeId::new(ck, node_id_key))? {
if let Some(nr) = self.lookup_node_ref(NodeId::new(ck, node_id_key.clone()))? {
return Ok(Some(nr));
}
}
@ -844,17 +844,17 @@ impl RoutingTableInner {
/// Resolve an existing routing table entry and return a reference to it
#[instrument(level = "trace", skip_all, err)]
pub fn lookup_node_ref(&self, node_id: NodeId) -> EyreResult<Option<NodeRef>> {
if self.routing_table().matches_own_node_id(&[node_id]) {
if self.routing_table().matches_own_node_id(&[node_id.clone()]) {
bail!("can't look up own node id in routing table");
}
if !VALID_CRYPTO_KINDS.contains(&node_id.kind) {
if !VALID_CRYPTO_KINDS.contains(&node_id.kind()) {
bail!("can't look up node id with invalid crypto kind");
}
let bucket_index = self.routing_table().calculate_bucket_index(&node_id);
let bucket = self.get_bucket(bucket_index);
Ok(bucket
.entry(&node_id.value)
.entry(node_id.ref_value())
.map(|e| NodeRef::new(self.registry(), e)))
}
@ -881,17 +881,17 @@ impl RoutingTableInner {
where
F: FnOnce(Arc<BucketEntry>) -> R,
{
if self.routing_table().matches_own_node_id(&[node_id]) {
if self.routing_table().matches_own_node_id(&[node_id.clone()]) {
veilid_log!(self error "can't look up own node id in routing table");
return None;
}
if !VALID_CRYPTO_KINDS.contains(&node_id.kind) {
if !VALID_CRYPTO_KINDS.contains(&node_id.kind()) {
veilid_log!(self error "can't look up node id with invalid crypto kind");
return None;
}
let bucket_entry = self.routing_table().calculate_bucket_index(&node_id);
let bucket = self.get_bucket(bucket_entry);
bucket.entry(&node_id.value).map(f)
bucket.entry(node_id.ref_value()).map(f)
}
/// Shortcut function to add a node to our routing table if it doesn't exist
@ -1326,7 +1326,7 @@ impl RoutingTableInner {
pub fn find_preferred_closest_nodes<T, O>(
&self,
node_count: usize,
node_id: HashDigest,
hash_coordinate: HashDigest,
mut filters: VecDeque<RoutingTableEntryFilter>,
transform: T,
) -> VeilidAPIResult<Vec<O>>
@ -1337,7 +1337,7 @@ impl RoutingTableInner {
let routing_table = self.routing_table();
// Get the crypto kind
let crypto_kind = node_id.kind;
let crypto_kind = hash_coordinate.kind();
let crypto = self.crypto();
let Some(vcrypto) = crypto.get(crypto_kind) else {
apibail_generic!("invalid crypto kind");
@ -1400,8 +1400,14 @@ impl RoutingTableInner {
};
// distance is the next metric, closer nodes first
let da = vcrypto.distance(&BareHashDigest::from(a_key.value), &node_id.value);
let db = vcrypto.distance(&BareHashDigest::from(b_key.value), &node_id.value);
let da = vcrypto.distance(
&BareHashDigest::from(a_key.value()),
hash_coordinate.ref_value(),
);
let db = vcrypto.distance(
&BareHashDigest::from(b_key.value()),
hash_coordinate.ref_value(),
);
da.cmp(&db)
};
@ -1418,7 +1424,7 @@ impl RoutingTableInner {
closest_nodes: &[NodeRef],
) -> Vec<NodeRef> {
// Lock all noderefs
let kind = node_id.kind;
let kind = node_id.kind();
let mut closest_nodes_locked: Vec<LockedNodeRef> = closest_nodes
.iter()
.filter_map(|nr| {
@ -1593,9 +1599,9 @@ impl RoutingTableInner {
#[instrument(level = "trace", skip_all)]
pub fn make_closest_noderef_sort<'a>(
crypto: &'a Crypto,
node_id: HashDigest,
hash_coordinate: HashDigest,
) -> impl Fn(&LockedNodeRef, &LockedNodeRef) -> core::cmp::Ordering + 'a {
let kind = node_id.kind;
let kind = hash_coordinate.kind();
// Get cryptoversion to check distance with
let vcrypto = crypto.get(kind).unwrap();
@ -1612,8 +1618,14 @@ pub fn make_closest_noderef_sort<'a>(
let b_key = b_entry.node_ids().get(kind).unwrap();
// distance is the next metric, closer nodes first
let da = vcrypto.distance(&BareHashDigest::from(a_key.value), &node_id.value);
let db = vcrypto.distance(&BareHashDigest::from(b_key.value), &node_id.value);
let da = vcrypto.distance(
&BareHashDigest::from(a_key.value()),
hash_coordinate.ref_value(),
);
let db = vcrypto.distance(
&BareHashDigest::from(b_key.value()),
hash_coordinate.ref_value(),
);
da.cmp(&db)
})
})
@ -1622,21 +1634,21 @@ pub fn make_closest_noderef_sort<'a>(
pub fn make_closest_node_id_sort(
crypto: &Crypto,
node_id: NodeId,
hash_coordinate: HashDigest,
) -> impl Fn(&BareNodeId, &BareNodeId) -> core::cmp::Ordering + '_ {
let kind = node_id.kind;
let kind = hash_coordinate.kind();
// Get cryptoversion to check distance with
let vcrypto = crypto.get(kind).unwrap();
move |a: &BareNodeId, b: &BareNodeId| -> core::cmp::Ordering {
// distance is the next metric, closer nodes first
let da = vcrypto.distance(
&BareHashDigest::from(*a),
&BareHashDigest::from(node_id.value),
&BareHashDigest::from(a.bytes()),
hash_coordinate.ref_value(),
);
let db = vcrypto.distance(
&BareHashDigest::from(*b),
&BareHashDigest::from(node_id.value),
&BareHashDigest::from(b.bytes()),
hash_coordinate.ref_value(),
);
da.cmp(&db)
}

View file

@ -77,7 +77,7 @@ impl RoutingTable {
if self.matches_own_node_id(peer_node_ids) {
veilid_log!(self debug "Ignoring own node in bootstrap response");
} else {
for nid in peer.node_ids().iter().copied() {
for nid in peer.node_ids().iter().cloned() {
bootstrapped_peer_id_set.insert(nid);
}
bootstrapped_peers.push(peer);
@ -134,7 +134,7 @@ impl RoutingTable {
let mut rd_peer_ids = BTreeSet::new();
for peer in bootstrapped_peers.iter() {
if peer.routing_domain() == rd {
for nid in peer.node_ids().iter().copied() {
for nid in peer.node_ids().iter().cloned() {
rd_peer_ids.insert(nid);
}
}

View file

@ -30,7 +30,7 @@ impl RoutingTable {
let Some(buckets) = inner.buckets.get(&kind) else {
continue;
};
let sort = make_closest_node_id_sort(&crypto, our_node_id);
let sort = make_closest_node_id_sort(&crypto, our_node_id.into());
let mut closest_peers = BTreeSet::<BareNodeId>::new();
let mut closest_unreliable_count = 0usize;
@ -60,14 +60,14 @@ impl RoutingTable {
BucketEntryState::Unreliable => {
// Add to closest unreliable nodes list
if closest_unreliable_count < KEEP_N_CLOSEST_UNRELIABLE_PEERS_COUNT {
closest_peers.insert(*key);
closest_peers.insert(key.clone());
closest_unreliable_count += 1;
}
}
BucketEntryState::Reliable => {
// Add to closest reliable nodes list
if closest_reliable_count < KEEP_N_CLOSEST_RELIABLE_PEERS_COUNT {
closest_peers.insert(*key);
closest_peers.insert(key.clone());
closest_reliable_count += 1;
}
}

View file

@ -19,7 +19,7 @@ impl RoutingTable {
})
}
/// Fastest routes sort
fn route_sort_latency_fn(a: &(BareRouteId, u64), b: &(BareRouteId, u64)) -> cmp::Ordering {
fn route_sort_latency_fn(a: &(RouteId, u64), b: &(RouteId, u64)) -> cmp::Ordering {
let mut al = a.1;
let mut bl = b.1;
// Treat zero latency as uncalculated
@ -46,14 +46,14 @@ impl RoutingTable {
///
/// If a route doesn't 'need_testing', then we neither test nor drop it
#[instrument(level = "trace", skip(self))]
fn get_allocated_routes_to_test(&self, cur_ts: Timestamp) -> Vec<BareRouteId> {
fn get_allocated_routes_to_test(&self, cur_ts: Timestamp) -> Vec<RouteId> {
let default_route_hop_count = self
.config()
.with(|c| c.network.rpc.default_route_hop_count as usize);
let mut must_test_routes = Vec::<BareRouteId>::new();
let mut unpublished_routes = Vec::<(BareRouteId, u64)>::new();
let mut expired_routes = Vec::<BareRouteId>::new();
let mut must_test_routes = Vec::<RouteId>::new();
let mut unpublished_routes = Vec::<(RouteId, u64)>::new();
let mut expired_routes = Vec::<RouteId>::new();
self.route_spec_store().list_allocated_routes(|k, v| {
let stats = v.get_stats();
// Ignore nodes that don't need testing
@ -63,15 +63,15 @@ impl RoutingTable {
// If this has been published, always test if we need it
// Also if the route has never been tested, test it at least once
if v.is_published() || stats.last_known_valid_ts.is_none() {
must_test_routes.push(*k);
must_test_routes.push(k.clone());
}
// If this is a default route hop length, include it in routes to keep alive
else if v.hop_count() == default_route_hop_count {
unpublished_routes.push((*k, stats.latency.average.as_u64()));
unpublished_routes.push((k.clone(), stats.latency.average.as_u64()));
}
// Else this is a route that hasnt been used recently enough and we can tear it down
else {
expired_routes.push(*k);
expired_routes.push(k.clone());
}
Option::<()>::None
});
@ -85,13 +85,13 @@ impl RoutingTable {
background_safety_route_count,
unpublished_routes.len(),
)) {
must_test_routes.push(unpublished_route.0);
must_test_routes.push(unpublished_route.0.clone());
}
// Kill off all but N unpublished routes rather than testing them
if unpublished_routes.len() > background_safety_route_count {
for x in &unpublished_routes[background_safety_route_count..] {
expired_routes.push(x.0);
expired_routes.push(x.0.clone());
}
}
@ -110,7 +110,7 @@ impl RoutingTable {
async fn test_route_set(
&self,
stop_token: StopToken,
routes_needing_testing: Vec<BareRouteId>,
routes_needing_testing: Vec<RouteId>,
) -> EyreResult<()> {
if routes_needing_testing.is_empty() {
return Ok(());
@ -119,7 +119,7 @@ impl RoutingTable {
#[derive(Default, Debug)]
struct TestRouteContext {
dead_routes: Vec<BareRouteId>,
dead_routes: Vec<RouteId>,
}
let ctx = Arc::new(Mutex::new(TestRouteContext::default()));
@ -129,7 +129,7 @@ impl RoutingTable {
let ctx = ctx.clone();
unord.push(
async move {
let success = match self.route_spec_store().test_route(r).await {
let success = match self.route_spec_store().test_route(r.clone()).await {
// Test had result
Ok(Some(v)) => v,
// Test could not be performed at this time
@ -242,7 +242,7 @@ impl RoutingTable {
let remote_routes_needing_testing = self.route_spec_store().list_remote_routes(|k, v| {
let stats = v.get_stats();
if stats.needs_testing(cur_ts) {
Some(*k)
Some(k.clone())
} else {
None
}

View file

@ -4,7 +4,7 @@ use crate::{routing_table::*, RegisteredComponents, VALID_CRYPTO_KINDS};
fn make_mock_typed_node_id(kind: CryptoKind, idx: u8) -> NodeId {
NodeId::new(
kind,
BareNodeId::new([
BareNodeId::new(&[
idx, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0,
]),

View file

@ -63,7 +63,7 @@ impl SignedDirectNodeInfo {
let node_info_bytes = Self::make_signature_bytes(&node_info, timestamp)?;
let typed_signatures =
crypto.generate_signatures(&node_info_bytes, &typed_key_pairs, |kp, s| {
Signature::new(kp.kind, s)
Signature::new(kp.kind(), s)
})?;
Ok(Self {
node_info,

View file

@ -93,7 +93,7 @@ impl SignedRelayedNodeInfo {
Self::make_signature_bytes(&node_info, &relay_ids, &relay_info, timestamp)?;
let typed_signatures =
crypto.generate_signatures(&node_info_bytes, &typed_key_pairs, |kp, s| {
Signature::new(kp.kind, s)
Signature::new(kp.kind(), s)
})?;
Ok(Self {
node_info,
@ -121,8 +121,8 @@ impl SignedRelayedNodeInfo {
// Add relay ids to signature
for relay_id in relay_ids {
let mut rid_msg = ::capnp::message::Builder::new_default();
let mut rid_builder = rid_msg.init_root::<veilid_capnp::typed_key::Builder>();
encode_typed_node_id(relay_id, &mut rid_builder);
let mut rid_builder = rid_msg.init_root::<veilid_capnp::node_id::Builder>();
encode_node_id(relay_id, &mut rid_builder);
sig_bytes.append(&mut builder_to_vec(rid_msg).map_err(VeilidAPIError::internal)?);
}

View file

@ -0,0 +1,72 @@
use super::*;
use paste::paste;
// Utility Macros
macro_rules! define_typed_byte_data_coder {
($capnp_name: ident, $rust_name: ident) => {
paste! {
pub fn [< decode_ $capnp_name >](
reader: &veilid_capnp::$capnp_name::Reader,
) -> Result<$rust_name, RPCError> {
rpc_ignore_missing_property!(reader, value);
let value = reader.get_value()?;
let kind = reader.get_kind();
Ok($rust_name::new(
CryptoKind::from(kind.to_be_bytes()),
[< Bare $rust_name >]::new(value),
))
}
pub fn [< encode_ $capnp_name >](
$capnp_name: &$rust_name,
builder: &mut veilid_capnp::$capnp_name::Builder,
) {
builder.set_kind(u32::from_be_bytes($capnp_name.kind().0));
builder.set_value($capnp_name.ref_value());
}
}
};
}
macro_rules! define_untyped_byte_data_coder {
($capnp_name: ident, $rust_name: ident) => {
paste! {
pub fn [< decode_ $capnp_name >](
reader: &veilid_capnp::$capnp_name::Reader,
) -> Result<$rust_name, RPCError> {
rpc_ignore_missing_property!(reader, value);
let value = reader.get_value()?;
Ok(
[< $rust_name >]::new(value),
)
}
pub fn [< encode_ $capnp_name >](
$capnp_name: &$rust_name,
builder: &mut veilid_capnp::$capnp_name::Builder,
) {
builder.set_value($capnp_name);
}
}
};
}
// RecordKey
define_typed_byte_data_coder!(record_key, RecordKey);
// BlockId
#[cfg(feature = "unstable-blockstore")]
define_typed_byte_data_coder!(block_id, BlockId);
// NodeId
define_typed_byte_data_coder!(node_id, NodeId);
// PublicKey
define_typed_byte_data_coder!(public_key, PublicKey);
// RouteId
#[cfg(feature = "unstable-blockstore")]
define_typed_byte_data_coder!(route_id, RouteId);
// Signature
define_typed_byte_data_coder!(signature, Signature);
// Nonce
define_untyped_byte_data_coder!(nonce, BareNonce);

View file

@ -1,40 +0,0 @@
use super::*;
use core::convert::TryInto;
pub fn decode_key256(public_key: &veilid_capnp::key256::Reader) -> BarePublicKey {
let u0 = public_key.get_u0().to_be_bytes();
let u1 = public_key.get_u1().to_be_bytes();
let u2 = public_key.get_u2().to_be_bytes();
let u3 = public_key.get_u3().to_be_bytes();
let mut x: [u8; 32] = Default::default();
x[0..8].copy_from_slice(&u0);
x[8..16].copy_from_slice(&u1);
x[16..24].copy_from_slice(&u2);
x[24..32].copy_from_slice(&u3);
BarePublicKey::new(x)
}
pub fn encode_key256(key: &BarePublicKey, builder: &mut veilid_capnp::key256::Builder) {
builder.set_u0(u64::from_be_bytes(
key.bytes[0..8]
.try_into()
.expect("slice with incorrect length"),
));
builder.set_u1(u64::from_be_bytes(
key.bytes[8..16]
.try_into()
.expect("slice with incorrect length"),
));
builder.set_u2(u64::from_be_bytes(
key.bytes[16..24]
.try_into()
.expect("slice with incorrect length"),
));
builder.set_u3(u64::from_be_bytes(
key.bytes[24..32]
.try_into()
.expect("slice with incorrect length"),
));
}

View file

@ -1,13 +1,12 @@
mod address;
mod address_type_set;
mod byte_array_types;
mod dial_info;
mod dial_info_class;
mod dial_info_detail;
mod key256;
mod network_class;
mod node_info;
mod node_status;
mod nonce;
mod operations;
mod peer_info;
mod private_safety_route;
@ -15,7 +14,6 @@ mod protocol_type_set;
mod sender_info;
mod sequencing;
mod signal_info;
mod signature512;
mod signed_direct_node_info;
mod signed_node_info;
mod signed_relayed_node_info;
@ -24,19 +22,16 @@ mod signed_value_descriptor;
mod socket_address;
#[cfg(feature = "unstable-tunnels")]
mod tunnel;
mod typed_key;
mod typed_signature;
pub use address::*;
pub use address_type_set::*;
pub use byte_array_types::*;
pub use dial_info::*;
pub use dial_info_class::*;
pub use dial_info_detail::*;
pub use key256::*;
pub use network_class::*;
pub use node_info::*;
pub use node_status::*;
pub use nonce::*;
pub use operations::*;
pub use peer_info::*;
pub use private_safety_route::*;
@ -44,7 +39,6 @@ pub use protocol_type_set::*;
pub use sender_info::*;
pub use sequencing::*;
pub use signal_info::*;
pub use signature512::*;
pub use signed_direct_node_info::*;
pub use signed_node_info::*;
pub use signed_relayed_node_info::*;
@ -53,8 +47,6 @@ pub use signed_value_descriptor::*;
pub use socket_address::*;
#[cfg(feature = "unstable-tunnels")]
pub use tunnel::*;
pub use typed_key::*;
pub use typed_signature::*;
use super::*;

View file

@ -1,31 +0,0 @@
use super::*;
pub fn encode_nonce(nonce: &BareNonce, builder: &mut veilid_capnp::nonce24::Builder) {
builder.set_u0(u64::from_be_bytes(
nonce.bytes[0..8]
.try_into()
.expect("slice with incorrect length"),
));
builder.set_u1(u64::from_be_bytes(
nonce.bytes[8..16]
.try_into()
.expect("slice with incorrect length"),
));
builder.set_u2(u64::from_be_bytes(
nonce.bytes[16..24]
.try_into()
.expect("slice with incorrect length"),
));
}
pub fn decode_nonce(reader: &veilid_capnp::nonce24::Reader) -> BareNonce {
let u0 = reader.get_u0().to_be_bytes();
let u1 = reader.get_u1().to_be_bytes();
let u2 = reader.get_u2().to_be_bytes();
BareNonce::new([
u0[0], u0[1], u0[2], u0[3], u0[4], u0[5], u0[6], u0[7], // u0
u1[0], u1[1], u1[2], u1[3], u1[4], u1[5], u1[6], u1[7], // u1
u2[0], u2[1], u2[2], u2[3], u2[4], u2[5], u2[6], u2[7], // u2
])
}

View file

@ -36,7 +36,7 @@ impl RPCOperationFindNodeQ {
) -> Result<Self, RPCError> {
rpc_ignore_missing_property!(reader, node_id);
let ni_reader = reader.get_node_id()?;
let node_id = decode_typed_node_id(&ni_reader)?;
let node_id = decode_node_id(&ni_reader)?;
rpc_ignore_missing_property!(reader, capabilities);
let cap_reader = reader.get_capabilities()?;
@ -60,7 +60,7 @@ impl RPCOperationFindNodeQ {
builder: &mut veilid_capnp::operation_find_node_q::Builder,
) -> Result<(), RPCError> {
let mut ni_builder = builder.reborrow().init_node_id();
encode_typed_node_id(&self.node_id, &mut ni_builder);
encode_node_id(&self.node_id, &mut ni_builder);
let mut cap_builder = builder
.reborrow()

View file

@ -58,7 +58,7 @@ impl RPCOperationGetValueQ {
) -> Result<Self, RPCError> {
rpc_ignore_missing_property!(reader, key);
let k_reader = reader.get_key()?;
let key = decode_typed_record_key(&k_reader)?;
let key = decode_record_key(&k_reader)?;
let subkey = reader.get_subkey();
let want_descriptor = reader.get_want_descriptor();
Ok(Self {
@ -72,7 +72,7 @@ impl RPCOperationGetValueQ {
builder: &mut veilid_capnp::operation_get_value_q::Builder,
) -> Result<(), RPCError> {
let mut k_builder = builder.reborrow().init_key();
encode_typed_record_key(&self.key, &mut k_builder);
encode_record_key(&self.key, &mut k_builder);
builder.set_subkey(self.subkey);
builder.set_want_descriptor(self.want_descriptor);
Ok(())
@ -150,7 +150,7 @@ impl RPCOperationGetValueA {
// And the signed value data
if !value
.validate(descriptor.owner(), get_value_context.subkey, &vcrypto)
.validate(descriptor.ref_owner(), get_value_context.subkey, &vcrypto)
.map_err(RPCError::protocol)?
{
return Err(RPCError::protocol("signed value data did not validate"));

View file

@ -54,7 +54,7 @@ impl RPCOperationInspectValueQ {
) -> Result<Self, RPCError> {
rpc_ignore_missing_property!(reader, key);
let k_reader = reader.get_key()?;
let key = decode_typed_record_key(&k_reader)?;
let key = decode_record_key(&k_reader)?;
rpc_ignore_missing_property!(reader, subkeys);
let sk_reader = reader.get_subkeys()?;
@ -89,7 +89,7 @@ impl RPCOperationInspectValueQ {
builder: &mut veilid_capnp::operation_inspect_value_q::Builder,
) -> Result<(), RPCError> {
let mut k_builder = builder.reborrow().init_key();
encode_typed_record_key(&self.key, &mut k_builder);
encode_record_key(&self.key, &mut k_builder);
let mut sk_builder = builder.reborrow().init_subkeys(
self.subkeys

View file

@ -4,7 +4,7 @@ use super::*;
pub(in crate::rpc_processor) struct RoutedOperation {
routing_domain: RoutingDomain,
sequencing: Sequencing,
signatures: Vec<BareSignature>,
signatures: Vec<Signature>,
nonce: BareNonce,
data: Vec<u8>,
}
@ -46,11 +46,11 @@ impl RoutedOperation {
pub fn sequencing(&self) -> Sequencing {
self.sequencing
}
pub fn signatures(&self) -> &[BareSignature] {
pub fn signatures(&self) -> &[Signature] {
&self.signatures
}
pub fn add_signature(&mut self, signature: BareSignature) {
pub fn add_signature(&mut self, signature: Signature) {
self.signatures.push(signature);
}
@ -71,22 +71,23 @@ impl RoutedOperation {
) -> Result<Self, RPCError> {
rpc_ignore_missing_property!(reader, signatures);
let sigs_reader = reader.get_signatures()?;
let mut signatures = Vec::<BareSignature>::with_capacity(
let mut signatures = Vec::<Signature>::with_capacity(
sigs_reader
.len()
.try_into()
.map_err(RPCError::map_internal("too many signatures"))?,
);
for s in sigs_reader.iter() {
// TODO: wants .ignore_ok() eventually
let sig = decode_signature512(&s);
let Some(sig) = decode_signature(&s).ignore_ok()? else {
continue;
};
signatures.push(sig);
}
let sequencing = decode_sequencing(reader.get_sequencing()?);
rpc_ignore_missing_property!(reader, nonce);
let n_reader = reader.get_nonce()?;
let nonce = decode_nonce(&n_reader);
let nonce = decode_nonce(&n_reader)?;
rpc_ignore_missing_property!(reader, data);
let data = reader.get_data()?;
@ -114,7 +115,7 @@ impl RoutedOperation {
);
for (i, sig) in self.signatures.iter().enumerate() {
let mut sig_builder = sigs_builder.reborrow().get(i as u32);
encode_signature512(sig, &mut sig_builder);
encode_signature(sig, &mut sig_builder);
}
let mut n_builder = builder.reborrow().init_nonce();
encode_nonce(&self.nonce, &mut n_builder);

View file

@ -78,7 +78,7 @@ impl RPCOperationSetValueQ {
) -> Result<Self, RPCError> {
rpc_ignore_missing_property!(reader, key);
let k_reader = reader.get_key()?;
let key = decode_typed_record_key(&k_reader)?;
let key = decode_record_key(&k_reader)?;
let subkey = reader.get_subkey();
@ -105,7 +105,7 @@ impl RPCOperationSetValueQ {
builder: &mut veilid_capnp::operation_set_value_q::Builder,
) -> Result<(), RPCError> {
let mut k_builder = builder.reborrow().init_key();
encode_typed_record_key(&self.key, &mut k_builder);
encode_record_key(&self.key, &mut k_builder);
builder.set_subkey(self.subkey);
let mut v_builder = builder.reborrow().init_value();
encode_signed_value_data(&self.value, &mut v_builder)?;
@ -164,7 +164,7 @@ impl RPCOperationSetValueA {
// And the signed value data
if !value
.validate(
set_value_context.descriptor.owner(),
set_value_context.descriptor.ref_owner(),
set_value_context.subkey,
&vcrypto,
)

View file

@ -106,7 +106,7 @@ impl RPCOperationValueChanged {
) -> Result<Self, RPCError> {
rpc_ignore_missing_property!(reader, key);
let k_reader = reader.get_key()?;
let key = decode_typed_record_key(&k_reader)?;
let key = decode_record_key(&k_reader)?;
rpc_ignore_missing_property!(reader, subkeys);
let sk_reader = reader.get_subkeys()?;
@ -149,7 +149,7 @@ impl RPCOperationValueChanged {
builder: &mut veilid_capnp::operation_value_changed::Builder,
) -> Result<(), RPCError> {
let mut k_builder = builder.reborrow().init_key();
encode_typed_record_key(&self.key, &mut k_builder);
encode_record_key(&self.key, &mut k_builder);
let mut sk_builder = builder.reborrow().init_subkeys(
self.subkeys

View file

@ -10,8 +10,8 @@ pub(in crate::rpc_processor) struct RPCOperationWatchValueQ {
expiration: u64,
count: u32,
watch_id: Option<u64>,
watcher: BarePublicKey,
signature: BareSignature,
watcher: PublicKey,
signature: Signature,
}
impl RPCOperationWatchValueQ {
@ -21,7 +21,7 @@ impl RPCOperationWatchValueQ {
expiration: u64,
count: u32,
watch_id: Option<u64>,
watcher: BareKeyPair,
watcher: KeyPair,
vcrypto: &CryptoSystemGuard<'_>,
) -> Result<Self, RPCError> {
if subkeys.ranges_len() > MAX_WATCH_VALUE_Q_SUBKEY_RANGES_LEN {
@ -34,9 +34,16 @@ impl RPCOperationWatchValueQ {
}
let signature_data = Self::make_signature_data(&key, &subkeys, expiration, count, watch_id);
let signature = vcrypto
.sign(&watcher.key, &watcher.secret, &signature_data)
.map_err(RPCError::protocol)?;
let signature = Signature::new(
vcrypto.kind(),
vcrypto
.sign(
watcher.ref_value().ref_key(),
watcher.ref_value().ref_secret(),
&signature_data,
)
.map_err(RPCError::protocol)?,
);
Ok(Self {
key,
@ -44,7 +51,7 @@ impl RPCOperationWatchValueQ {
expiration,
count,
watch_id,
watcher: watcher.key,
watcher: PublicKey::new(watcher.kind(), watcher.ref_value().key()),
signature,
})
}
@ -60,9 +67,9 @@ impl RPCOperationWatchValueQ {
let subkeys_ranges_len = subkeys.ranges_len();
let mut sig_data =
Vec::with_capacity(PUBLIC_KEY_LENGTH + 4 + (subkeys_ranges_len * 8) + 8 + 8);
sig_data.extend_from_slice(&key.kind.0);
sig_data.extend_from_slice(&key.value.bytes);
Vec::with_capacity(key.ref_value().len() + 4 + (subkeys_ranges_len * 8) + 8 + 8);
sig_data.extend_from_slice(&key.kind().0);
sig_data.extend_from_slice(key.ref_value());
for sk in subkeys.ranges() {
sig_data.extend_from_slice(&sk.start().to_le_bytes());
sig_data.extend_from_slice(&sk.end().to_le_bytes());
@ -77,7 +84,7 @@ impl RPCOperationWatchValueQ {
pub fn validate(&mut self, validate_context: &RPCValidateContext) -> Result<(), RPCError> {
let crypto = validate_context.crypto();
let Some(vcrypto) = crypto.get(self.key.kind) else {
let Some(vcrypto) = crypto.get(self.watcher.kind()) else {
return Err(RPCError::protocol("unsupported cryptosystem"));
};
@ -89,7 +96,11 @@ impl RPCOperationWatchValueQ {
self.watch_id,
);
if !vcrypto
.verify(&self.watcher, &sig_data, &self.signature)
.verify(
self.watcher.ref_value(),
&sig_data,
self.signature.ref_value(),
)
.map_err(RPCError::protocol)?
{
return Err(RPCError::protocol("failed to validate watcher signature"));
@ -129,11 +140,11 @@ impl RPCOperationWatchValueQ {
}
#[expect(dead_code)]
pub fn watcher(&self) -> &BarePublicKey {
pub fn watcher(&self) -> &PublicKey {
&self.watcher
}
#[expect(dead_code)]
pub fn signature(&self) -> &BareSignature {
pub fn signature(&self) -> &Signature {
&self.signature
}
pub fn destructure(
@ -144,8 +155,8 @@ impl RPCOperationWatchValueQ {
u64,
u32,
Option<u64>,
BarePublicKey,
BareSignature,
PublicKey,
Signature,
) {
(
self.key,
@ -164,7 +175,7 @@ impl RPCOperationWatchValueQ {
) -> Result<Self, RPCError> {
rpc_ignore_missing_property!(reader, key);
let k_reader = reader.get_key()?;
let key = decode_typed_record_key(&k_reader)?;
let key = decode_record_key(&k_reader)?;
rpc_ignore_missing_property!(reader, subkeys);
let sk_reader = reader.get_subkeys()?;
@ -195,11 +206,11 @@ impl RPCOperationWatchValueQ {
rpc_ignore_missing_property!(reader, watcher);
let w_reader = reader.get_watcher()?;
let watcher = decode_key256(&w_reader);
let watcher = decode_public_key(&w_reader)?;
rpc_ignore_missing_property!(reader, signature);
let s_reader = reader.get_signature()?;
let signature = decode_signature512(&s_reader);
let signature = decode_signature(&s_reader)?;
Ok(Self {
key,
@ -217,7 +228,7 @@ impl RPCOperationWatchValueQ {
builder: &mut veilid_capnp::operation_watch_value_q::Builder,
) -> Result<(), RPCError> {
let mut k_builder = builder.reborrow().init_key();
encode_typed_record_key(&self.key, &mut k_builder);
encode_record_key(&self.key, &mut k_builder);
let mut sk_builder = builder.reborrow().init_subkeys(
self.subkeys
@ -235,10 +246,10 @@ impl RPCOperationWatchValueQ {
builder.set_watch_id(self.watch_id.unwrap_or(0u64));
let mut w_builder = builder.reborrow().init_watcher();
encode_key256(&self.watcher, &mut w_builder);
encode_public_key(&self.watcher, &mut w_builder);
let mut s_builder = builder.reborrow().init_signature();
encode_signature512(&self.signature, &mut s_builder);
encode_signature(&self.signature, &mut s_builder);
Ok(())
}

View file

@ -12,7 +12,7 @@ pub fn decode_peer_info(
let mut node_ids = NodeIdGroup::with_capacity(nids_reader.len() as usize);
for nid_reader in nids_reader.iter() {
let Some(nid) = decode_typed_node_id(&nid_reader).ignore_ok()? else {
let Some(nid) = decode_node_id(&nid_reader).ignore_ok()? else {
continue;
};
node_ids.add(nid);
@ -38,7 +38,7 @@ pub fn encode_peer_info(
.map_err(RPCError::map_invalid_format("out of bound error"))?,
);
for (i, nid) in peer_info.node_ids().iter().enumerate() {
encode_typed_node_id(
encode_node_id(
nid,
&mut nids_builder.reborrow().get(
i.try_into()

View file

@ -6,7 +6,7 @@ pub fn decode_route_hop_data(
reader: &veilid_capnp::route_hop_data::Reader,
) -> Result<RouteHopData, RPCError> {
rpc_ignore_missing_property!(reader, nonce);
let nonce = decode_nonce(&reader.get_nonce()?);
let nonce = decode_nonce(&reader.get_nonce()?)?;
rpc_ignore_missing_property!(reader, blob);
let blob = reader.get_blob()?.to_vec();
@ -45,7 +45,7 @@ pub fn decode_route_hop(
let node = match reader.get_node().which()? {
veilid_capnp::route_hop::node::Which::NodeId(ni) => {
let ni_reader = ni?;
RouteNode::BareNodeId(decode_key256(&ni_reader).into())
RouteNode::NodeId(decode_node_id(&ni_reader)?)
}
veilid_capnp::route_hop::node::Which::PeerInfo(pi) => {
let pi_reader = pi?;
@ -71,9 +71,9 @@ pub fn encode_route_hop(
) -> Result<(), RPCError> {
let node_builder = builder.reborrow().init_node();
match &route_hop.node {
RouteNode::BareNodeId(ni) => {
RouteNode::NodeId(ni) => {
let mut ni_builder = node_builder.init_node_id();
encode_key256(&(*ni).into(), &mut ni_builder);
encode_node_id(ni, &mut ni_builder);
}
RouteNode::PeerInfo(pi) => {
let mut pi_builder = node_builder.init_peer_info();
@ -94,8 +94,7 @@ pub fn decode_private_route(
reader: &veilid_capnp::private_route::Reader,
) -> Result<PrivateRoute, RPCError> {
rpc_ignore_missing_property!(reader, public_key);
let public_key = decode_typed_public_key(&reader.get_public_key()?)?;
let hop_count = reader.get_hop_count();
let public_key = decode_public_key(&reader.get_public_key()?)?;
let hops = match reader.get_hops().which()? {
veilid_capnp::private_route::hops::Which::FirstHop(rh_reader) => {
@ -109,22 +108,17 @@ pub fn decode_private_route(
veilid_capnp::private_route::hops::Which::Empty(_) => PrivateRouteHops::Empty,
};
Ok(PrivateRoute {
public_key,
hop_count,
hops,
})
Ok(PrivateRoute { public_key, hops })
}
pub fn encode_private_route(
private_route: &PrivateRoute,
builder: &mut veilid_capnp::private_route::Builder,
) -> Result<(), RPCError> {
encode_typed_public_key(
encode_public_key(
&private_route.public_key,
&mut builder.reborrow().init_public_key(),
);
builder.set_hop_count(private_route.hop_count);
let mut h_builder = builder.reborrow().init_hops();
match &private_route.hops {
PrivateRouteHops::FirstHop(first_hop) => {
@ -149,8 +143,7 @@ pub fn decode_safety_route(
reader: &veilid_capnp::safety_route::Reader,
) -> Result<SafetyRoute, RPCError> {
rpc_ignore_missing_property!(reader, public_key);
let public_key = decode_typed_public_key(&reader.get_public_key()?)?;
let hop_count = reader.get_hop_count();
let public_key = decode_public_key(&reader.get_public_key()?)?;
let hops = match reader.get_hops().which()? {
veilid_capnp::safety_route::hops::Which::Data(rhd_reader) => {
let rhd_reader = rhd_reader?;
@ -162,22 +155,17 @@ pub fn decode_safety_route(
}
};
Ok(SafetyRoute {
public_key,
hop_count,
hops,
})
Ok(SafetyRoute { public_key, hops })
}
pub fn encode_safety_route(
safety_route: &SafetyRoute,
builder: &mut veilid_capnp::safety_route::Builder,
) -> Result<(), RPCError> {
encode_typed_public_key(
encode_public_key(
&safety_route.public_key,
&mut builder.reborrow().init_public_key(),
);
builder.set_hop_count(safety_route.hop_count);
let h_builder = builder.reborrow().init_hops();
match &safety_route.hops {
SafetyRouteHops::Data(rhd) => {

View file

@ -1,52 +0,0 @@
use super::*;
pub fn encode_signature512(sig: &BareSignature, builder: &mut veilid_capnp::signature512::Builder) {
let sig = &sig.bytes;
builder.set_u0(u64::from_be_bytes(
sig[0..8].try_into().expect("slice with incorrect length"),
));
builder.set_u1(u64::from_be_bytes(
sig[8..16].try_into().expect("slice with incorrect length"),
));
builder.set_u2(u64::from_be_bytes(
sig[16..24].try_into().expect("slice with incorrect length"),
));
builder.set_u3(u64::from_be_bytes(
sig[24..32].try_into().expect("slice with incorrect length"),
));
builder.set_u4(u64::from_be_bytes(
sig[32..40].try_into().expect("slice with incorrect length"),
));
builder.set_u5(u64::from_be_bytes(
sig[40..48].try_into().expect("slice with incorrect length"),
));
builder.set_u6(u64::from_be_bytes(
sig[48..56].try_into().expect("slice with incorrect length"),
));
builder.set_u7(u64::from_be_bytes(
sig[56..64].try_into().expect("slice with incorrect length"),
));
}
pub fn decode_signature512(reader: &veilid_capnp::signature512::Reader) -> BareSignature {
let u0 = reader.get_u0().to_be_bytes();
let u1 = reader.get_u1().to_be_bytes();
let u2 = reader.get_u2().to_be_bytes();
let u3 = reader.get_u3().to_be_bytes();
let u4 = reader.get_u4().to_be_bytes();
let u5 = reader.get_u5().to_be_bytes();
let u6 = reader.get_u6().to_be_bytes();
let u7 = reader.get_u7().to_be_bytes();
BareSignature::new([
u0[0], u0[1], u0[2], u0[3], u0[4], u0[5], u0[6], u0[7], // u0
u1[0], u1[1], u1[2], u1[3], u1[4], u1[5], u1[6], u1[7], // u1
u2[0], u2[1], u2[2], u2[3], u2[4], u2[5], u2[6], u2[7], // u2
u3[0], u3[1], u3[2], u3[3], u3[4], u3[5], u3[6], u3[7], // u3
u4[0], u4[1], u4[2], u4[3], u4[4], u4[5], u4[6], u4[7], // u4
u5[0], u5[1], u5[2], u5[3], u5[4], u5[5], u5[6], u5[7], // u5
u6[0], u6[1], u6[2], u6[3], u6[4], u6[5], u6[6], u6[7], // u6
u7[0], u7[1], u7[2], u7[3], u7[4], u7[5], u7[6], u7[7], // u7
])
}

View file

@ -14,7 +14,7 @@ pub fn decode_signed_direct_node_info(
let sigs_len = rpc_ignore_max_len!(sigs_reader, MAX_CRYPTO_KINDS);
let mut typed_signatures = Vec::with_capacity(sigs_len);
for sig_reader in sigs_reader {
let Some(typed_signature) = decode_typed_signature(&sig_reader).ignore_ok()? else {
let Some(typed_signature) = decode_signature(&sig_reader).ignore_ok()? else {
continue;
};
typed_signatures.push(typed_signature);
@ -47,7 +47,7 @@ pub fn encode_signed_direct_node_info(
.map_err(RPCError::map_invalid_format("out of bound error"))?,
);
for (i, typed_signature) in signed_direct_node_info.signatures().iter().enumerate() {
encode_typed_signature(
encode_signature(
typed_signature,
&mut sigs_builder.reborrow().get(
i.try_into()

View file

@ -12,7 +12,7 @@ pub fn decode_signed_relayed_node_info(
let rid_count = rpc_ignore_max_len!(rids_reader, MAX_CRYPTO_KINDS);
let mut relay_ids = NodeIdGroup::with_capacity(rid_count);
for rid_reader in rids_reader {
let Some(relay_id) = decode_typed_node_id(&rid_reader).ignore_ok()? else {
let Some(relay_id) = decode_node_id(&rid_reader).ignore_ok()? else {
continue;
};
relay_ids.add(relay_id);
@ -31,7 +31,7 @@ pub fn decode_signed_relayed_node_info(
let sig_count = rpc_ignore_max_len!(sigs_reader, MAX_CRYPTO_KINDS);
let mut typed_signatures = Vec::with_capacity(sig_count);
for sig_reader in sigs_reader {
let Some(typed_signature) = decode_typed_signature(&sig_reader).ignore_ok()? else {
let Some(typed_signature) = decode_signature(&sig_reader).ignore_ok()? else {
continue;
};
typed_signatures.push(typed_signature);
@ -61,7 +61,7 @@ pub fn encode_signed_relayed_node_info(
.map_err(RPCError::map_invalid_format("out of bound error"))?,
);
for (i, typed_key) in signed_relayed_node_info.relay_ids().iter().enumerate() {
encode_typed_node_id(
encode_node_id(
typed_key,
&mut rids_builder.reborrow().get(
i.try_into()
@ -85,7 +85,7 @@ pub fn encode_signed_relayed_node_info(
.map_err(RPCError::map_invalid_format("out of bound error"))?,
);
for (i, typed_signature) in signed_relayed_node_info.signatures().iter().enumerate() {
encode_typed_signature(
encode_signature(
typed_signature,
&mut sigs_builder.reborrow().get(
i.try_into()

View file

@ -9,10 +9,10 @@ pub fn decode_signed_value_data(
let data = reader.get_data()?.to_vec();
rpc_ignore_missing_property!(reader, writer);
let wr = reader.get_writer()?;
let writer = decode_key256(&wr);
let writer = decode_public_key(&wr)?;
rpc_ignore_missing_property!(reader, signature);
let sr = reader.get_signature()?;
let signature = decode_signature512(&sr);
let signature = decode_signature(&sr)?;
Ok(SignedValueData::new(
ValueData::new_with_seq(seq, data, writer).map_err(RPCError::protocol)?,
@ -27,8 +27,8 @@ pub fn encode_signed_value_data(
builder.set_seq(signed_value_data.value_data().seq());
builder.set_data(signed_value_data.value_data().data());
let mut wb = builder.reborrow().init_writer();
encode_key256(signed_value_data.value_data().writer(), &mut wb);
encode_public_key(signed_value_data.value_data().ref_writer(), &mut wb);
let mut sb = builder.reborrow().init_signature();
encode_signature512(signed_value_data.signature(), &mut sb);
encode_signature(signed_value_data.signature(), &mut sb);
Ok(())
}

View file

@ -6,12 +6,12 @@ pub fn decode_signed_value_descriptor(
) -> Result<SignedValueDescriptor, RPCError> {
rpc_ignore_missing_property!(reader, owner);
let or = reader.get_owner()?;
let owner = decode_key256(&or);
let owner = decode_public_key(&or)?;
rpc_ignore_missing_property!(reader, schema_data);
let schema_data = reader.get_schema_data()?.to_vec();
rpc_ignore_missing_property!(reader, signature);
let sr = reader.get_signature()?;
let signature = decode_signature512(&sr);
let signature = decode_signature(&sr)?;
Ok(SignedValueDescriptor::new(owner, schema_data, signature))
}
@ -20,9 +20,9 @@ pub fn encode_signed_value_descriptor(
builder: &mut veilid_capnp::signed_value_descriptor::Builder,
) -> Result<(), RPCError> {
let mut ob = builder.reborrow().init_owner();
encode_key256(signed_value_descriptor.owner(), &mut ob);
encode_public_key(signed_value_descriptor.ref_owner(), &mut ob);
builder.set_schema_data(signed_value_descriptor.schema_data());
let mut sb = builder.reborrow().init_signature();
encode_signature512(signed_value_descriptor.signature(), &mut sb);
encode_signature(signed_value_descriptor.ref_signature(), &mut sb);
Ok(())
}

View file

@ -1,62 +0,0 @@
use super::*;
pub fn decode_typed_public_key(
reader: &veilid_capnp::typed_key::Reader,
) -> Result<PublicKey, RPCError> {
rpc_ignore_missing_property!(reader, key);
let key_reader = reader.get_key()?;
let kind = reader.get_kind();
Ok(PublicKey::new(
CryptoKind::from(kind.to_be_bytes()),
decode_key256(&key_reader),
))
}
pub fn encode_typed_public_key(
typed_key: &PublicKey,
builder: &mut veilid_capnp::typed_key::Builder,
) {
builder.set_kind(u32::from_be_bytes(typed_key.kind.0));
let mut key_builder = builder.reborrow().init_key();
encode_key256(&typed_key.value, &mut key_builder);
}
pub fn decode_typed_node_id(reader: &veilid_capnp::typed_key::Reader) -> Result<NodeId, RPCError> {
rpc_ignore_missing_property!(reader, key);
let key_reader = reader.get_key()?;
let kind = reader.get_kind();
Ok(NodeId::new(
CryptoKind::from(kind.to_be_bytes()),
BareNodeId::new(decode_key256(&key_reader).bytes),
))
}
pub fn encode_typed_node_id(typed_key: &NodeId, builder: &mut veilid_capnp::typed_key::Builder) {
builder.set_kind(u32::from_be_bytes(typed_key.kind.0));
let mut key_builder = builder.reborrow().init_key();
encode_key256(&BarePublicKey::new(typed_key.value.bytes), &mut key_builder);
}
pub fn decode_typed_record_key(
reader: &veilid_capnp::typed_key::Reader,
) -> Result<RecordKey, RPCError> {
rpc_ignore_missing_property!(reader, key);
let key_reader = reader.get_key()?;
let kind = reader.get_kind();
Ok(RecordKey::new(
CryptoKind::from(kind.to_be_bytes()),
BareRecordKey::new(decode_key256(&key_reader).bytes),
))
}
pub fn encode_typed_record_key(
typed_key: &RecordKey,
builder: &mut veilid_capnp::typed_key::Builder,
) {
builder.set_kind(u32::from_be_bytes(typed_key.kind.0));
let mut key_builder = builder.reborrow().init_key();
encode_key256(&BarePublicKey::new(typed_key.value.bytes), &mut key_builder);
}

View file

@ -1,23 +0,0 @@
use super::*;
pub fn decode_typed_signature(
reader: &veilid_capnp::typed_signature::Reader,
) -> Result<Signature, RPCError> {
rpc_ignore_missing_property!(reader, signature);
let sig_reader = reader.get_signature()?;
let kind = reader.get_kind();
Ok(Signature::new(
CryptoKind::from(kind.to_be_bytes()),
decode_signature512(&sig_reader),
))
}
pub fn encode_typed_signature(
typed_signature: &Signature,
builder: &mut veilid_capnp::typed_signature::Builder,
) {
builder.set_kind(u32::from_be_bytes(typed_signature.kind.0));
let mut sig_builder = builder.reborrow().init_signature();
encode_signature512(&typed_signature.value, &mut sig_builder);
}

View file

@ -130,18 +130,20 @@ impl Destination {
Destination::Direct {
node,
safety_selection: _,
} => Ok(Target::BareNodeId(
node.best_node_id()
.ok_or_else(|| RPCError::protocol("no supported node id"))?,
)),
} => {
Ok(Target::NodeId(node.best_node_id().ok_or_else(|| {
RPCError::protocol("no supported node id")
})?))
}
Destination::Relay {
relay: _,
node,
safety_selection: _,
} => Ok(Target::BareNodeId(
node.best_node_id()
.ok_or_else(|| RPCError::protocol("no supported node id"))?,
)),
} => {
Ok(Target::NodeId(node.best_node_id().ok_or_else(|| {
RPCError::protocol("no supported node id")
})?))
}
Destination::PrivateRoute {
private_route,
safety_selection: _,
@ -152,7 +154,7 @@ impl Destination {
.add_remote_private_route(private_route.clone())
.map_err(RPCError::protocol)?;
Ok(Target::PrivateRoute(route_id))
Ok(Target::RouteId(route_id))
}
}
}
@ -289,9 +291,9 @@ impl RPCProcessor {
safety_selection: SafetySelection,
) -> Result<rpc_processor::Destination, RPCError> {
match target {
Target::BareNodeId(node_id) => {
Target::NodeId(node_id) => {
// Resolve node
let nr = match self.resolve_node(node_id, safety_selection).await? {
let nr = match self.resolve_node(node_id, safety_selection.clone()).await? {
Some(nr) => nr,
None => {
return Err(RPCError::network("could not resolve node id"));
@ -305,7 +307,7 @@ impl RPCProcessor {
safety_selection,
})
}
Target::PrivateRoute(rsid) => {
Target::RouteId(rsid) => {
// Get remote private route
let Some(private_route) = self
.routing_table()
@ -345,7 +347,7 @@ impl RPCProcessor {
let crypto_kind = target
.best_node_id()
.ok_or_else(|| RPCError::protocol("no supported node id"))?
.kind;
.kind();
let pr_key = network_result_try!(rss
.get_private_route_for_safety_spec(
crypto_kind,
@ -376,7 +378,7 @@ impl RPCProcessor {
let crypto_kind = target
.best_node_id()
.ok_or_else(|| RPCError::protocol("no supported node id"))?
.kind;
.kind();
let mut avoid_nodes = relay.node_ids();
avoid_nodes.add_all(&target.node_ids());
@ -402,7 +404,7 @@ impl RPCProcessor {
));
};
let crypto_kind = private_route.public_key.kind;
let crypto_kind = private_route.public_key.kind();
match safety_selection {
SafetySelection::Unsafe(_) => {
@ -418,10 +420,10 @@ impl RPCProcessor {
// Determine if we can use optimized nodeinfo
let route_node = if rss.has_remote_private_route_seen_our_node_info(
&private_route.public_key.value,
private_route.public_key.ref_value(),
&published_peer_info,
) {
RouteNode::BareNodeId(routing_table.node_id(crypto_kind).value)
RouteNode::NodeId(routing_table.node_id(crypto_kind))
} else {
RouteNode::PeerInfo(published_peer_info)
};
@ -438,12 +440,12 @@ impl RPCProcessor {
// Check for loopback test
let opt_private_route_id =
rss.get_route_id_for_key(&private_route.public_key.value);
rss.get_route_id_for_key(private_route.public_key.ref_value());
let pr_key = if opt_private_route_id.is_some()
&& safety_spec.preferred_route == opt_private_route_id
{
// Private route is also safety route during loopback test
private_route.public_key.value
private_route.public_key.value()
} else {
// Get the private route to respond to that matches the safety route spec we sent the request with
network_result_try!(rss
@ -519,7 +521,7 @@ impl RPCProcessor {
// If this was received over our private route, it's okay to respond to a private route via our safety route
NetworkResult::value(Destination::private_route(
pr.clone(),
SafetySelection::Safe(detail.safety_spec),
SafetySelection::Safe(detail.safety_spec.clone()),
))
}
}

View file

@ -400,7 +400,7 @@ impl<'a> FanoutCall<'a> {
self.routing_table
.find_preferred_closest_nodes(
self.node_count,
self.hash_coordinate,
self.hash_coordinate.clone(),
filters,
transform,
)
@ -415,7 +415,7 @@ impl<'a> FanoutCall<'a> {
pub async fn run(&self, init_fanout_queue: Vec<NodeRef>) -> Result<FanoutResult, RPCError> {
// Create context for this run
let crypto = self.routing_table.crypto();
let Some(vcrypto) = crypto.get(self.hash_coordinate.kind) else {
let Some(vcrypto) = crypto.get(self.hash_coordinate.kind()) else {
return Err(RPCError::internal(
"should not try this on crypto we don't support",
));
@ -425,12 +425,12 @@ impl<'a> FanoutCall<'a> {
b_key: &CryptoTyped<BareNodeId>|
-> core::cmp::Ordering {
let da = vcrypto.distance(
&BareHashDigest::from(a_key.value),
&self.hash_coordinate.value,
&BareHashDigest::from(a_key.value()),
self.hash_coordinate.ref_value(),
);
let db = vcrypto.distance(
&BareHashDigest::from(b_key.value),
&self.hash_coordinate.value,
&BareHashDigest::from(b_key.value()),
self.hash_coordinate.ref_value(),
);
da.cmp(&db)
},
@ -438,7 +438,7 @@ impl<'a> FanoutCall<'a> {
let context = Arc::new(Mutex::new(FanoutContext {
fanout_queue: FanoutQueue::new(
self.routing_table.registry(),
self.hash_coordinate.kind,
self.hash_coordinate.kind(),
node_sort,
self.consensus_count,
),

View file

@ -123,7 +123,7 @@ impl<'a> FanoutQueue<'a> {
}
// Add the new node
self.nodes.insert(
key,
key.clone(),
FanoutNode {
node_ref: node_ref.clone(),
status: FanoutNodeStatus::Queued,

View file

@ -74,9 +74,9 @@ impl MessageHeader {
}
pub fn direct_sender_node_id(&self) -> NodeId {
match &self.detail {
RPCMessageHeaderDetail::Direct(d) => d.envelope.get_sender_typed_id(),
RPCMessageHeaderDetail::SafetyRouted(s) => s.direct.envelope.get_sender_typed_id(),
RPCMessageHeaderDetail::PrivateRouted(p) => p.direct.envelope.get_sender_typed_id(),
RPCMessageHeaderDetail::Direct(d) => d.envelope.get_sender_id(),
RPCMessageHeaderDetail::SafetyRouted(s) => s.direct.envelope.get_sender_id(),
RPCMessageHeaderDetail::PrivateRouted(p) => p.direct.envelope.get_sender_id(),
}
}
}

View file

@ -41,8 +41,8 @@ mod rpc_start_tunnel;
pub(crate) use answer::*;
pub(crate) use coders::{
builder_to_vec, decode_private_route, encode_node_info, encode_private_route, encode_route_hop,
encode_signed_direct_node_info, encode_typed_node_id, RPCDecodeContext,
builder_to_vec, decode_private_route, encode_node_id, encode_node_info, encode_private_route,
encode_route_hop, encode_signed_direct_node_info, RPCDecodeContext,
MAX_INSPECT_VALUE_A_SEQS_LEN,
};
pub(crate) use destination::*;
@ -146,7 +146,6 @@ pub(crate) struct RPCProcessor {
timeout_us: TimestampDuration,
queue_size: u32,
concurrency: u32,
max_route_hop_count: usize,
waiting_rpc_table: OperationWaiter<Message, Option<QuestionContext>>,
waiting_app_call_table: OperationWaiter<Vec<u8>, ()>,
startup_context: RPCProcessorStartupContext,
@ -172,7 +171,7 @@ impl RPCProcessor {
startup_context: RPCProcessorStartupContext,
) -> Self {
// make local copy of node id for easy access
let (concurrency, queue_size, max_route_hop_count, timeout_us) = {
let (concurrency, queue_size, timeout_us) = {
let config = registry.config();
let c = config.get();
@ -180,7 +179,6 @@ impl RPCProcessor {
let mut concurrency = c.network.rpc.concurrency;
let queue_size = c.network.rpc.queue_size;
let timeout_us = TimestampDuration::new(ms_to_us(c.network.rpc.timeout_ms));
let max_route_hop_count = c.network.rpc.max_route_hop_count as usize;
if concurrency == 0 {
concurrency = get_concurrency();
if concurrency == 0 {
@ -190,7 +188,7 @@ impl RPCProcessor {
// Default RPC concurrency is the number of CPUs * 16 rpc workers per core, as a single worker takes about 1% CPU when relaying and 16% is reasonable for baseline plus relay
concurrency *= RPC_WORKERS_PER_CORE;
}
(concurrency, queue_size, max_route_hop_count, timeout_us)
(concurrency, queue_size, timeout_us)
};
Self {
@ -199,7 +197,6 @@ impl RPCProcessor {
timeout_us,
queue_size,
concurrency,
max_route_hop_count,
waiting_rpc_table: OperationWaiter::new(),
waiting_app_call_table: OperationWaiter::new(),
startup_context,
@ -374,7 +371,7 @@ impl RPCProcessor {
let routing_domain = RoutingDomain::PublicInternet;
// Ignore own node
if routing_table.matches_own_node_id(&[node_id]) {
if routing_table.matches_own_node_id(&[node_id.clone()]) {
return TimeoutOr::Value(Err(RPCError::network("can't search for own node id")));
}
@ -382,14 +379,17 @@ impl RPCProcessor {
let result = Arc::new(Mutex::new(Option::<NodeRef>::None));
let registry = self.registry();
let node_id2 = node_id.clone();
let call_routine = Arc::new(move |next_node: NodeRef| {
let registry = registry.clone();
let node_id = node_id2.clone();
let safety_selection = safety_selection.clone();
Box::pin(async move {
let this = registry.rpc_processor();
match this
.rpc_call_find_node(
Destination::direct(next_node.routing_domain_filtered(routing_domain))
.with_safety(safety_selection),
.with_safety(safety_selection.clone()),
node_id,
vec![],
)
@ -416,8 +416,9 @@ impl RPCProcessor {
// Routine to call to check if we're done at each step
let result2 = result.clone();
let node_id2 = node_id.clone();
let check_done = Arc::new(move |_: &FanoutResult| -> bool {
let Ok(Some(nr)) = routing_table.lookup_node_ref(node_id) else {
let Ok(Some(nr)) = routing_table.lookup_node_ref(node_id2.clone()) else {
return false;
};
@ -483,7 +484,7 @@ impl RPCProcessor {
// First see if we have the node in our routing table already
let mut existing_nr = None;
if let Some(nr) = routing_table
.lookup_node_ref(node_id)
.lookup_node_ref(node_id.clone())
.map_err(RPCError::internal)?
{
existing_nr = Some(nr.clone());
@ -575,7 +576,7 @@ impl RPCProcessor {
let node_id = self
.routing_table()
.node_id(sr.direct.envelope.get_crypto_kind());
if node_id.value != reply_private_route.into() {
if node_id.value() != reply_private_route.into() {
return Err(RPCError::protocol(
"should have received reply from safety route to a stub",
));
@ -611,8 +612,7 @@ impl RPCProcessor {
// Get useful private route properties
let pr_is_stub = remote_private_route.is_stub();
let pr_hop_count = remote_private_route.hop_count;
let pr_pubkey = remote_private_route.public_key.value;
let pr_pubkey = remote_private_route.public_key.value();
let crypto_kind = remote_private_route.crypto_kind();
let Some(vcrypto) = crypto.get(crypto_kind) else {
return Err(RPCError::internal(
@ -621,11 +621,12 @@ impl RPCProcessor {
};
// Compile the safety route with the private route
let sequencing = safety_selection.get_sequencing();
let compiled_route: CompiledRoute = network_result_try!(rss
.compile_safety_route(safety_selection, remote_private_route)
.to_rpc_network_result()?);
let sr_is_stub = compiled_route.safety_route.is_stub();
let sr_pubkey = compiled_route.safety_route.public_key.value;
let sr_pubkey = compiled_route.safety_route.public_key.value();
// Encrypt routed operation
// Xmsg + ENC(Xmsg, DH(PKapr, SKbsr))
@ -638,15 +639,9 @@ impl RPCProcessor {
.map_err(RPCError::map_internal("encryption failed"))?;
// Make the routed operation
let operation = RoutedOperation::new(
routing_domain,
safety_selection.get_sequencing(),
nonce,
enc_msg_data,
);
let operation = RoutedOperation::new(routing_domain, sequencing, nonce, enc_msg_data);
// Prepare route operation
let sr_hop_count = compiled_route.safety_route.hop_count;
let route_operation = RPCOperationRoute::new(compiled_route.safety_route, operation);
let ssni_route =
self.get_sender_peer_info(&Destination::direct(compiled_route.first_hop.clone()));
@ -661,14 +656,10 @@ impl RPCProcessor {
operation.encode(&mut route_operation)?;
let out_message = builder_to_vec(route_msg)?;
// Get the first hop this is going to
let out_hop_count = (1 + sr_hop_count + pr_hop_count) as usize;
let out = RenderedOperation {
message: out_message,
destination_node_ref: compiled_route.first_hop.unfiltered(),
node_ref: compiled_route.first_hop,
hop_count: out_hop_count,
safety_route: if sr_is_stub { None } else { Some(sr_pubkey) },
remote_private_route: if pr_is_stub { None } else { Some(pr_pubkey) },
reply_private_route,
@ -701,7 +692,7 @@ impl RPCProcessor {
let reply_private_route = match operation.kind() {
RPCOperationKind::Question(q) => match q.respond_to() {
RespondTo::Sender => None,
RespondTo::PrivateRoute(pr) => Some(pr.public_key.value),
RespondTo::PrivateRoute(pr) => Some(pr.public_key.value()),
},
RPCOperationKind::Statement(_) | RPCOperationKind::Answer(_) => None,
};
@ -710,12 +701,12 @@ impl RPCProcessor {
match dest {
Destination::Direct {
node: ref node_ref,
safety_selection,
ref safety_selection,
}
| Destination::Relay {
relay: ref node_ref,
node: _,
safety_selection,
ref safety_selection,
} => {
// Send to a node without a private route
// --------------------------------------
@ -737,8 +728,8 @@ impl RPCProcessor {
SafetySelection::Unsafe(sequencing) => {
// Apply safety selection sequencing requirement if it is more strict than the node_ref's sequencing requirement
let mut node_ref = node_ref.clone();
if sequencing > node_ref.sequencing() {
node_ref.set_sequencing(sequencing)
if *sequencing > node_ref.sequencing() {
node_ref.set_sequencing(*sequencing)
}
// Reply private route should be None here, even for questions
@ -750,7 +741,6 @@ impl RPCProcessor {
message,
destination_node_ref,
node_ref,
hop_count: 1,
safety_route: None,
remote_private_route: None,
reply_private_route: None,
@ -786,7 +776,7 @@ impl RPCProcessor {
// Wrap with safety route
out = self.wrap_with_route(
routing_domain,
safety_selection,
safety_selection.clone(),
private_route,
reply_private_route,
message,
@ -1142,15 +1132,24 @@ impl RPCProcessor {
message,
destination_node_ref,
node_ref,
hop_count,
safety_route,
remote_private_route,
reply_private_route,
} = network_result_try!(self.render_operation(dest.clone(), &operation)?);
// Calculate answer timeout
// Timeout is number of hops times the timeout per hop
let timeout_us = self.timeout_us * (hop_count as u64);
//
// Maximum timeout is number of hops times the timeout per hop, but the hop
// count is not known for routes.
//
// Practical timeout for routed operations is twice the node-to-node maximum timeout
// or performance suffers. If there are too many timeouts on a route, the route
// should fail to test, and a newer and hopefully faster route will be allocated.
let timeout_us = if safety_route.is_some() || remote_private_route.is_some() {
self.timeout_us * 2u64
} else {
self.timeout_us
};
// Set up op id eventual
let handle = self.waiting_rpc_table.add_op_waiter(op_id, context);
@ -1174,8 +1173,8 @@ impl RPCProcessor {
RPCKind::Question,
send_ts,
node_ref.unfiltered(),
safety_route,
remote_private_route,
safety_route.clone(),
remote_private_route.clone(),
);
RPCError::network(e)
})?;
@ -1195,8 +1194,8 @@ impl RPCProcessor {
send_ts,
bytes,
node_ref.unfiltered(),
safety_route,
remote_private_route,
safety_route.clone(),
remote_private_route.clone(),
send_data_result.is_ordered(),
);
@ -1241,7 +1240,6 @@ impl RPCProcessor {
message,
destination_node_ref,
node_ref,
hop_count: _,
safety_route,
remote_private_route,
reply_private_route: _,
@ -1266,8 +1264,8 @@ impl RPCProcessor {
RPCKind::Statement,
send_ts,
node_ref.unfiltered(),
safety_route,
remote_private_route,
safety_route.clone(),
remote_private_route.clone(),
);
RPCError::network(e)
})?;
@ -1276,7 +1274,7 @@ impl RPCProcessor {
let send_ts = Timestamp::now();
let send_data_result = network_result_value_or_log!(self res => [ format!(": node_ref={}, destination_node_ref={}, message.len={}", node_ref, destination_node_ref, message_len) ] {
// If we couldn't send we're still cleaning up
self.record_send_failure(RPCKind::Statement, send_ts, node_ref.unfiltered(), safety_route, remote_private_route);
self.record_send_failure(RPCKind::Statement, send_ts, node_ref.unfiltered(), safety_route.clone(), remote_private_route.clone());
network_result_raise!(res);
}
);
@ -1315,7 +1313,6 @@ impl RPCProcessor {
message,
destination_node_ref,
node_ref,
hop_count: _,
safety_route,
remote_private_route,
reply_private_route: _,
@ -1340,8 +1337,8 @@ impl RPCProcessor {
RPCKind::Answer,
send_ts,
node_ref.unfiltered(),
safety_route,
remote_private_route,
safety_route.clone(),
remote_private_route.clone(),
);
RPCError::network(e)
})?;
@ -1350,7 +1347,7 @@ impl RPCProcessor {
let send_ts = Timestamp::now();
let send_data_result = network_result_value_or_log!(self res => [ format!(": node_ref={}, destination_node_ref={}, message.len={}", node_ref, destination_node_ref, message_len) ] {
// If we couldn't send we're still cleaning up
self.record_send_failure(RPCKind::Answer, send_ts, node_ref.unfiltered(), safety_route, remote_private_route);
self.record_send_failure(RPCKind::Answer, send_ts, node_ref.unfiltered(), safety_route.clone(), remote_private_route.clone());
network_result_raise!(res);
}
);
@ -1427,7 +1424,7 @@ impl RPCProcessor {
let msg = match &encoded_msg.header.detail {
RPCMessageHeaderDetail::Direct(detail) => {
// Get sender node id
let sender_node_id = detail.envelope.get_sender_typed_id();
let sender_node_id = detail.envelope.get_sender_id();
// Decode and validate the RPC operation
let decode_res = self.decode_rpc_operation(&encoded_msg);
@ -1464,7 +1461,7 @@ impl RPCProcessor {
// Get the sender noderef, incorporating sender's peer info
let sender_peer_info = operation.sender_peer_info();
let mut opt_sender_nr: Option<NodeRef> = network_result_try!(self
.process_sender_peer_info(routing_domain, sender_node_id, &sender_peer_info)? => {
.process_sender_peer_info(routing_domain, sender_node_id.clone(), &sender_peer_info)? => {
veilid_log!(self debug target:"network_result", "Sender PeerInfo: {:?}", sender_peer_info);
veilid_log!(self debug target:"network_result", "From Operation: {:?}", operation.kind());
veilid_log!(self debug target:"network_result", "With Detail: {:?}", detail);

View file

@ -8,8 +8,6 @@ pub struct RenderedOperation {
pub destination_node_ref: NodeRef,
/// Node to send envelope to (may not be destination node in case of relay)
pub node_ref: FilteredNodeRef,
/// Total safety + private route hop count + 1 hop for the initial send
pub hop_count: usize,
/// The safety route used to send the message
pub safety_route: Option<BarePublicKey>,
/// The private route used to send the message
@ -24,7 +22,6 @@ impl fmt::Debug for RenderedOperation {
.field("message(len)", &self.message.len())
.field("destination_node_ref", &self.destination_node_ref)
.field("node_ref", &self.node_ref)
.field("hop_count", &self.hop_count)
.field("safety_route", &self.safety_route)
.field("remote_private_route", &self.remote_private_route)
.field("reply_private_route", &self.reply_private_route)

View file

@ -29,7 +29,7 @@ impl RPCProcessor {
let waitable_reply = network_result_try!(self.question(dest, question, None).await?);
// Keep the reply private route that was used to return with the answer
let reply_private_route = waitable_reply.context.reply_private_route;
let reply_private_route = waitable_reply.context.reply_private_route.clone();
// Wait for reply
let (msg, latency) = match self.wait_for_reply(waitable_reply, debug_string).await? {
@ -84,7 +84,7 @@ impl RPCProcessor {
// Get the private route this came over
let opt_pr_pubkey = match &msg.header.detail {
RPCMessageHeaderDetail::Direct(_) | RPCMessageHeaderDetail::SafetyRouted(_) => None,
RPCMessageHeaderDetail::PrivateRouted(pr) => Some(pr.private_route),
RPCMessageHeaderDetail::PrivateRouted(pr) => Some(pr.private_route.clone()),
};
let route_id = if let Some(pr_pubkey) = opt_pr_pubkey {
let rss = routing_table.route_spec_store();

View file

@ -47,7 +47,7 @@ impl RPCProcessor {
// Get the private route this came over
let opt_pr_pubkey = match &msg.header.detail {
RPCMessageHeaderDetail::Direct(_) | RPCMessageHeaderDetail::SafetyRouted(_) => None,
RPCMessageHeaderDetail::PrivateRouted(pr) => Some(pr.private_route),
RPCMessageHeaderDetail::PrivateRouted(pr) => Some(pr.private_route.clone()),
};
let route_id = if let Some(pr_pubkey) = opt_pr_pubkey {
let rss = routing_table.route_spec_store();

View file

@ -36,7 +36,7 @@ impl RPCProcessor {
}
let find_node_q_detail = RPCQuestionDetail::FindNodeQ(Box::new(
RPCOperationFindNodeQ::new(node_id, capabilities.clone()),
RPCOperationFindNodeQ::new(node_id.clone(), capabilities.clone()),
));
let find_node_q = RPCQuestion::new(
network_result_try!(self.get_destination_respond_to(&dest)?),
@ -49,7 +49,7 @@ impl RPCProcessor {
let waitable_reply = network_result_try!(self.question(dest, find_node_q, None).await?);
// Keep the reply private route that was used to return with the answer
let reply_private_route = waitable_reply.context.reply_private_route;
let reply_private_route = waitable_reply.context.reply_private_route.clone();
// Wait for reply
let (msg, latency) = match self.wait_for_reply(waitable_reply, debug_string).await? {
@ -120,7 +120,7 @@ impl RPCProcessor {
let closest_nodes = network_result_try!(routing_table.find_preferred_closest_peers(
routing_domain,
node_id,
&node_id.clone().into(),
&capabilities
));

View file

@ -28,7 +28,7 @@ impl RPCProcessor {
pub async fn rpc_call_get_value(
&self,
dest: Destination,
key: RecordKey,
record_key: RecordKey,
subkey: ValueSubkey,
last_descriptor: Option<SignedValueDescriptor>,
) -> RPCNetworkResult<Answer<GetValueAnswer>> {
@ -48,16 +48,16 @@ impl RPCProcessor {
// Get the target node id
let crypto = self.crypto();
let Some(vcrypto) = crypto.get(key.kind) else {
let Some(vcrypto) = crypto.get(record_key.kind()) else {
return Err(RPCError::internal("unsupported cryptosystem"));
};
let Some(target_node_id) = target_node_ids.get(key.kind) else {
let Some(target_node_id) = target_node_ids.get(record_key.kind()) else {
return Err(RPCError::internal("No node id for crypto kind"));
};
let debug_string = format!(
"OUT ==> GetValueQ({} #{}{}) => {}",
key,
record_key,
subkey,
if last_descriptor.is_some() {
" +lastdesc"
@ -68,7 +68,8 @@ impl RPCProcessor {
);
// Send the getvalue question
let get_value_q = RPCOperationGetValueQ::new(key, subkey, last_descriptor.is_none());
let get_value_q =
RPCOperationGetValueQ::new(record_key.clone(), subkey, last_descriptor.is_none());
let question = RPCQuestion::new(
network_result_try!(self.get_destination_respond_to(&dest)?),
RPCQuestionDetail::GetValueQ(Box::new(get_value_q)),
@ -88,7 +89,7 @@ impl RPCProcessor {
);
// Keep the reply private route that was used to return with the answer
let reply_private_route = waitable_reply.context.reply_private_route;
let reply_private_route = waitable_reply.context.reply_private_route.clone();
// Wait for reply
let (msg, latency) = match self.wait_for_reply(waitable_reply, debug_string).await? {
@ -122,7 +123,7 @@ impl RPCProcessor {
let debug_string_answer = format!(
"OUT <== GetValueA({} #{}{}{} peers={}) <= {}",
key,
record_key,
subkey,
debug_string_value,
if descriptor.is_some() { " +desc" } else { "" },
@ -134,7 +135,7 @@ impl RPCProcessor {
let peer_ids: Vec<String> = peers
.iter()
.filter_map(|p| p.node_ids().get(key.kind).map(|k| k.to_string()))
.filter_map(|p| p.node_ids().get(record_key.kind()).map(|k| k.to_string()))
.collect();
veilid_log!(self debug target: "dht", "Peers: {:#?}", peer_ids);
}
@ -142,8 +143,8 @@ impl RPCProcessor {
// Validate peers returned are, in fact, closer to the key than the node we sent this to
let valid = match RoutingTable::verify_peers_closer(
&vcrypto,
target_node_id.into(),
key.into(),
&target_node_id.clone().into(),
&record_key.clone().into(),
&peers,
) {
Ok(v) => v,
@ -219,17 +220,20 @@ impl RPCProcessor {
};
// Destructure
let (key, subkey, want_descriptor) = get_value_q.destructure();
let (record_key, subkey, want_descriptor) = get_value_q.destructure();
// Get the nodes that we know about that are closer to the the key than our own node
let closer_to_key_peers = network_result_try!(
routing_table.find_preferred_peers_closer_to_key(routing_domain, key, vec![CAP_DHT])
);
let closer_to_key_peers = network_result_try!(routing_table
.find_preferred_peers_closer_to_key(
routing_domain,
&record_key.clone().into(),
vec![CAP_DHT]
));
if debug_target_enabled!("dht") {
let debug_string = format!(
"IN <=== GetValueQ({} #{}{}) <== {}",
key,
record_key,
subkey,
if want_descriptor { " +wantdesc" } else { "" },
msg.header.direct_sender_node_id()
@ -252,7 +256,7 @@ impl RPCProcessor {
// See if we have this record ourselves
let storage_manager = self.storage_manager();
let get_result = network_result_try!(storage_manager
.inbound_get_value(key, subkey, want_descriptor)
.inbound_get_value(record_key.clone(), subkey, want_descriptor)
.await
.map_err(RPCError::internal)?);
(get_result.opt_value, get_result.opt_descriptor)
@ -273,7 +277,7 @@ impl RPCProcessor {
let debug_string_answer = format!(
"IN ===> GetValueA({} #{}{}{} peers={}) ==> {}",
key,
record_key,
subkey,
debug_string_value,
if get_result_descriptor.is_some() {

View file

@ -30,7 +30,7 @@ impl RPCProcessor {
pub async fn rpc_call_inspect_value(
&self,
dest: Destination,
key: RecordKey,
record_key: RecordKey,
subkeys: ValueSubkeyRangeSet,
last_descriptor: Option<SignedValueDescriptor>,
) -> RPCNetworkResult<Answer<InspectValueAnswer>> {
@ -50,16 +50,16 @@ impl RPCProcessor {
// Get the target node id
let crypto = self.crypto();
let Some(vcrypto) = crypto.get(key.kind) else {
let Some(vcrypto) = crypto.get(record_key.kind()) else {
return Err(RPCError::internal("unsupported cryptosystem"));
};
let Some(target_node_id) = target_node_ids.get(key.kind) else {
let Some(target_node_id) = target_node_ids.get(record_key.kind()) else {
return Err(RPCError::internal("No node id for crypto kind"));
};
let debug_string = format!(
"OUT ==> InspectValueQ({} #{}{}) => {}",
key,
record_key,
&subkeys,
if last_descriptor.is_some() {
" +lastdesc"
@ -70,8 +70,11 @@ impl RPCProcessor {
);
// Send the inspectvalue question
let inspect_value_q =
RPCOperationInspectValueQ::new(key, subkeys.clone(), last_descriptor.is_none())?;
let inspect_value_q = RPCOperationInspectValueQ::new(
record_key.clone(),
subkeys.clone(),
last_descriptor.is_none(),
)?;
let question = RPCQuestion::new(
network_result_try!(self.get_destination_respond_to(&dest)?),
RPCQuestionDetail::InspectValueQ(Box::new(inspect_value_q)),
@ -91,7 +94,7 @@ impl RPCProcessor {
);
// Keep the reply private route that was used to return with the answer
let reply_private_route = waitable_reply.context.reply_private_route;
let reply_private_route = waitable_reply.context.reply_private_route.clone();
// Wait for reply
let (msg, latency) = match self.wait_for_reply(waitable_reply, debug_string).await? {
@ -118,7 +121,7 @@ impl RPCProcessor {
if debug_target_enabled!("dht") {
let debug_string_answer = format!(
"OUT <== InspectValueA({} {} peers={}) <= {} seqs:\n{}",
key,
record_key,
if descriptor.is_some() { " +desc" } else { "" },
peers.len(),
dest,
@ -129,7 +132,7 @@ impl RPCProcessor {
let peer_ids: Vec<String> = peers
.iter()
.filter_map(|p| p.node_ids().get(key.kind).map(|k| k.to_string()))
.filter_map(|p| p.node_ids().get(record_key.kind()).map(|k| k.to_string()))
.collect();
veilid_log!(self debug target: "dht", "Peers: {:#?}", peer_ids);
}
@ -137,8 +140,8 @@ impl RPCProcessor {
// Validate peers returned are, in fact, closer to the key than the node we sent this to
let valid = match RoutingTable::verify_peers_closer(
&vcrypto,
target_node_id.into(),
key.into(),
&target_node_id.clone().into(),
&record_key.clone().into(),
&peers,
) {
Ok(v) => v,
@ -205,17 +208,20 @@ impl RPCProcessor {
};
// Destructure
let (key, subkeys, want_descriptor) = inspect_value_q.destructure();
let (record_key, subkeys, want_descriptor) = inspect_value_q.destructure();
// Get the nodes that we know about that are closer to the the key than our own node
let closer_to_key_peers = network_result_try!(
routing_table.find_preferred_peers_closer_to_key(routing_domain, key, vec![CAP_DHT])
);
let closer_to_key_peers = network_result_try!(routing_table
.find_preferred_peers_closer_to_key(
routing_domain,
&record_key.clone().into(),
vec![CAP_DHT]
));
if debug_target_enabled!("dht") {
let debug_string = format!(
"IN <=== InspectValueQ({} {}{}) <== {}",
key,
record_key,
subkeys,
if want_descriptor { " +wantdesc" } else { "" },
msg.header.direct_sender_node_id()
@ -239,7 +245,7 @@ impl RPCProcessor {
// See if we have this record ourselves
let storage_manager = self.storage_manager();
let inspect_result = network_result_try!(storage_manager
.inbound_inspect_value(key, subkeys, want_descriptor)
.inbound_inspect_value(record_key.clone(), subkeys, want_descriptor)
.await
.map_err(RPCError::internal)?);
(
@ -255,7 +261,7 @@ impl RPCProcessor {
if debug_target_enabled!("dht") {
let debug_string_answer = format!(
"IN ===> InspectValueA({} {:?}{} peers={}) ==> {}",
key,
record_key,
inspect_result_seqs,
if inspect_result_descriptor.is_some() {
" +desc"

View file

@ -10,32 +10,12 @@ impl RPCProcessor {
route_hop: RouteHop,
safety_route: SafetyRoute,
) -> RPCNetworkResult<()> {
// Make sure hop count makes sense
if safety_route.hop_count as usize > self.max_route_hop_count {
return Ok(NetworkResult::invalid_message(
"Safety route hop count too high to process",
));
}
if safety_route.hop_count == 0 {
return Ok(NetworkResult::invalid_message(
"Safety route hop count should not be zero if there are more hops",
));
}
if route_hop.next_hop.is_none() {
return Ok(NetworkResult::invalid_message(
"Safety route hop must have next hop",
));
}
// Get next hop node ref
let routing_table = self.routing_table();
let Some(next_hop_nr) = route_hop
.node
.node_ref(&routing_table, safety_route.public_key.kind)
else {
let Some(next_hop_nr) = route_hop.node.node_ref(&routing_table) else {
return Ok(NetworkResult::invalid_message(format!(
"could not get route node hop ref: {}",
route_hop.node.describe(safety_route.public_key.kind)
route_hop.node.describe()
)));
};
@ -46,7 +26,6 @@ impl RPCProcessor {
let next_hop_route = RPCOperationRoute::new(
SafetyRoute {
public_key: safety_route.public_key,
hop_count: safety_route.hop_count - 1,
hops: SafetyRouteHops::Data(route_hop.next_hop.unwrap()),
},
routed_operation,
@ -67,21 +46,12 @@ impl RPCProcessor {
safety_route_public_key: PublicKey,
next_private_route: PrivateRoute,
) -> RPCNetworkResult<()> {
// Make sure hop count makes sense
if next_private_route.hop_count as usize > self.max_route_hop_count {
return Ok(NetworkResult::invalid_message(
"Private route hop count too high to process",
));
}
// Get next hop node ref
let routing_table = self.routing_table();
let Some(next_hop_nr) =
next_route_node.node_ref(&routing_table, safety_route_public_key.kind)
else {
let Some(next_hop_nr) = next_route_node.node_ref(&routing_table) else {
return Ok(NetworkResult::invalid_message(format!(
"could not get route node hop ref: {}",
next_route_node.describe(safety_route_public_key.kind)
next_route_node.describe()
)));
};
@ -92,7 +62,6 @@ impl RPCProcessor {
let next_hop_route = RPCOperationRoute::new(
SafetyRoute {
public_key: safety_route_public_key,
hop_count: 0,
hops: SafetyRouteHops::Private(next_private_route),
},
routed_operation,
@ -122,8 +91,8 @@ impl RPCProcessor {
// xxx: punish nodes that send messages that fail to decrypt eventually? How to do this for safety routes?
let node_id_secret = self
.routing_table()
.node_id_secret_key(remote_sr_pubkey.kind);
let Ok(dh_secret) = vcrypto.cached_dh(&remote_sr_pubkey.value, &node_id_secret) else {
.node_id_secret_key(remote_sr_pubkey.kind());
let Ok(dh_secret) = vcrypto.cached_dh(remote_sr_pubkey.ref_value(), &node_id_secret) else {
return Ok(NetworkResult::invalid_message(
"dh failed for remote safety route for safety routed operation",
));
@ -146,7 +115,7 @@ impl RPCProcessor {
// Pass message to RPC system
self.enqueue_safety_routed_message(
detail,
remote_sr_pubkey.value,
remote_sr_pubkey.value(),
routed_operation.sequencing(),
body,
)
@ -166,7 +135,7 @@ impl RPCProcessor {
pr_pubkey: PublicKey,
) -> RPCNetworkResult<()> {
// Get sender id of the peer with the crypto kind of the route
let Some(sender_id) = detail.sender_noderef.node_ids().get(pr_pubkey.kind) else {
let Some(sender_id) = detail.sender_noderef.node_ids().get(pr_pubkey.kind()) else {
return Ok(NetworkResult::invalid_message(
"route node doesnt have a required crypto kind for routed operation",
));
@ -176,16 +145,16 @@ impl RPCProcessor {
// Ensure the route is validated, and construct a return safetyspec that matches the inbound preferences
let routing_table = self.routing_table();
let rss = routing_table.route_spec_store();
let preferred_route = rss.get_route_id_for_key(&pr_pubkey.value);
let preferred_route = rss.get_route_id_for_key(pr_pubkey.ref_value());
let Some((secret_key, safety_spec)) = rss.with_signature_validated_route(
&pr_pubkey,
routed_operation.signatures(),
routed_operation.data(),
sender_id.value,
&sender_id,
|rssd, rsd| {
(
rsd.secret_key,
rsd.secret_key.clone(),
SafetySpec {
preferred_route,
hop_count: rssd.hop_count(),
@ -202,7 +171,7 @@ impl RPCProcessor {
// Now that things are valid, decrypt the routed operation with DEC(nonce, DH(the SR's public key, the PR's (or node's) secret)
// xxx: punish nodes that send messages that fail to decrypt eventually. How to do this for private routes?
let Ok(dh_secret) = vcrypto.cached_dh(&remote_sr_pubkey.value, &secret_key) else {
let Ok(dh_secret) = vcrypto.cached_dh(remote_sr_pubkey.ref_value(), &secret_key) else {
return Ok(NetworkResult::invalid_message(
"dh failed for remote safety route for private routed operation",
));
@ -221,8 +190,8 @@ impl RPCProcessor {
// Pass message to RPC system
self.enqueue_private_routed_message(
detail,
remote_sr_pubkey.value,
pr_pubkey.value,
remote_sr_pubkey.value(),
pr_pubkey.value(),
safety_spec,
body,
)
@ -242,7 +211,11 @@ impl RPCProcessor {
) -> RPCNetworkResult<()> {
// If the private route public key is our node id, then this was sent via safety route to our node directly
// so there will be no signatures to validate
if self.routing_table().node_ids().contains(&pr_pubkey.into()) {
if self
.routing_table()
.node_ids()
.contains(&pr_pubkey.clone().into())
{
// The private route was a stub
self.process_safety_routed_operation(
detail,
@ -291,13 +264,6 @@ impl RPCProcessor {
&mut routed_operation
)?);
// Ensure hop count > 0
if private_route.hop_count == 0 {
return Ok(NetworkResult::invalid_message(
"route should not be at the end",
));
}
// Make next PrivateRoute and pass it on
return self
.process_route_private_route_hop(
@ -306,7 +272,6 @@ impl RPCProcessor {
sr_pubkey,
PrivateRoute {
public_key: private_route.public_key,
hop_count: private_route.hop_count - 1,
hops: route_hop
.next_hop
.map(PrivateRouteHops::Data)
@ -336,7 +301,7 @@ impl RPCProcessor {
) -> RPCNetworkResult<RouteHop> {
// Get crypto kind
let crypto = self.crypto();
let crypto_kind = pr_pubkey.kind;
let crypto_kind = pr_pubkey.kind();
let Some(vcrypto) = crypto.get(crypto_kind) else {
return Ok(NetworkResult::invalid_message(
"private route hop data crypto is not supported",
@ -346,7 +311,7 @@ impl RPCProcessor {
// Decrypt the blob with DEC(nonce, DH(the PR's public key, this hop's secret)
let node_id_secret = self.routing_table().node_id_secret_key(crypto_kind);
let dh_secret = vcrypto
.cached_dh(&pr_pubkey.value, &node_id_secret)
.cached_dh(pr_pubkey.ref_value(), &node_id_secret)
.map_err(RPCError::protocol)?;
let dec_blob_data = match vcrypto.decrypt_aead(
&route_hop_data.blob,
@ -381,13 +346,16 @@ impl RPCProcessor {
if route_hop.next_hop.is_some() {
let node_id = self.routing_table().node_id(crypto_kind);
let node_id_secret = self.routing_table().node_id_secret_key(crypto_kind);
let sig = vcrypto
let sig = Signature::new(
vcrypto.kind(),
vcrypto
.sign(
&node_id.value.into(),
&node_id.value().into(),
&node_id_secret,
routed_operation.data(),
)
.map_err(RPCError::internal)?;
.map_err(RPCError::internal)?,
);
routed_operation.add_signature(sig);
}
@ -452,7 +420,7 @@ impl RPCProcessor {
// Decrypt the blob with DEC(nonce, DH(the SR's public key, this hop's secret)
let node_id_secret = self.routing_table().node_id_secret_key(crypto_kind);
let Ok(dh_secret) =
vcrypto.cached_dh(&safety_route.public_key.value, &node_id_secret)
vcrypto.cached_dh(safety_route.public_key.ref_value(), &node_id_secret)
else {
return Ok(NetworkResult::invalid_message(
"dh failed for safety route hop",
@ -583,13 +551,6 @@ impl RPCProcessor {
&mut routed_operation
)?);
// Ensure hop count > 0
if private_route.hop_count == 0 {
return Ok(NetworkResult::invalid_message(
"route should not be at the end",
));
}
// Make next PrivateRoute and pass it on
network_result_try!(
self.process_route_private_route_hop(
@ -598,7 +559,6 @@ impl RPCProcessor {
safety_route.public_key,
PrivateRoute {
public_key: private_route.public_key,
hop_count: private_route.hop_count - 1,
hops: route_hop
.next_hop
.map(PrivateRouteHops::Data)
@ -609,18 +569,6 @@ impl RPCProcessor {
);
}
PrivateRouteHops::Empty => {
// Ensure hop count == 0
if private_route.hop_count != 0 {
return Ok(NetworkResult::invalid_message(
"route should be at the end",
));
}
if safety_route.hop_count != 0 {
return Ok(NetworkResult::invalid_message(
"Safety hop count should be zero if switched to private route",
));
}
// No hops left, time to process the routed operation
network_result_try!(self.process_routed_operation(
detail,

View file

@ -30,7 +30,7 @@ impl RPCProcessor {
pub async fn rpc_call_set_value(
&self,
dest: Destination,
key: RecordKey,
record_key: RecordKey,
subkey: ValueSubkey,
value: SignedValueData,
descriptor: SignedValueDescriptor,
@ -52,16 +52,16 @@ impl RPCProcessor {
// Get the target node id
let crypto = self.crypto();
let Some(vcrypto) = crypto.get(key.kind) else {
let Some(vcrypto) = crypto.get(record_key.kind()) else {
return Err(RPCError::internal("unsupported cryptosystem"));
};
let Some(target_node_id) = target_node_ids.get(key.kind) else {
let Some(target_node_id) = target_node_ids.get(record_key.kind()) else {
return Err(RPCError::internal("No node id for crypto kind"));
};
let debug_string = format!(
"OUT ==> SetValueQ({} #{} len={} seq={} writer={}{}) => {}",
key,
record_key,
subkey,
value.value_data().data().len(),
value.value_data().seq(),
@ -72,7 +72,7 @@ impl RPCProcessor {
// Send the setvalue question
let set_value_q = RPCOperationSetValueQ::new(
key,
record_key.clone(),
subkey,
value,
if send_descriptor {
@ -101,7 +101,7 @@ impl RPCProcessor {
);
// Keep the reply private route that was used to return with the answer
let reply_private_route = waitable_reply.context.reply_private_route;
let reply_private_route = waitable_reply.context.reply_private_route.clone();
// Wait for reply
let (msg, latency) = match self.wait_for_reply(waitable_reply, debug_string).await? {
@ -136,7 +136,7 @@ impl RPCProcessor {
let debug_string_answer = format!(
"OUT <== SetValueA({} #{}{}{} peers={}) <= {}",
key,
record_key,
subkey,
if set { " +set" } else { "" },
debug_string_value,
@ -148,7 +148,7 @@ impl RPCProcessor {
let peer_ids: Vec<String> = peers
.iter()
.filter_map(|p| p.node_ids().get(key.kind).map(|k| k.to_string()))
.filter_map(|p| p.node_ids().get(record_key.kind()).map(|k| k.to_string()))
.collect();
veilid_log!(self debug target: "dht", "Peers: {:#?}", peer_ids);
}
@ -156,8 +156,8 @@ impl RPCProcessor {
// Validate peers returned are, in fact, closer to the key than the node we sent this to
let valid = match RoutingTable::verify_peers_closer(
&vcrypto,
target_node_id.into(),
key.into(),
&target_node_id.clone().into(),
&record_key.clone().into(),
&peers,
) {
Ok(v) => v,
@ -231,20 +231,23 @@ impl RPCProcessor {
};
// Destructure
let (key, subkey, value, descriptor) = set_value_q.destructure();
let (record_key, subkey, value, descriptor) = set_value_q.destructure();
// Get target for ValueChanged notifications
let dest = network_result_try!(self.get_respond_to_destination(&msg));
let target = dest.get_target(&routing_table)?;
// Get the nodes that we know about that are closer to the the key than our own node
let closer_to_key_peers = network_result_try!(
routing_table.find_preferred_peers_closer_to_key(routing_domain, key, vec![CAP_DHT])
);
let closer_to_key_peers = network_result_try!(routing_table
.find_preferred_peers_closer_to_key(
routing_domain,
&record_key.clone().into(),
vec![CAP_DHT]
));
let debug_string = format!(
"IN <=== SetValueQ({} #{} len={} seq={} writer={}{}) <== {}",
key,
record_key,
subkey,
value.value_data().data().len(),
value.value_data().seq(),
@ -270,7 +273,7 @@ impl RPCProcessor {
let storage_manager = self.storage_manager();
let new_value = network_result_try!(storage_manager
.inbound_set_value(
key,
record_key.clone(),
subkey,
Arc::new(value),
descriptor.map(Arc::new),
@ -297,7 +300,7 @@ impl RPCProcessor {
let debug_string_answer = format!(
"IN ===> SetValueA({} #{}{}{} peers={}) ==> {}",
key,
record_key,
subkey,
if set { " +set" } else { "" },
debug_string_value,

View file

@ -78,7 +78,7 @@ impl RPCProcessor {
let send_data_method = waitable_reply.context.send_data_result.clone();
// Keep the reply private route that was used to return with the answer
let reply_private_route = waitable_reply.context.reply_private_route;
let reply_private_route = waitable_reply.context.reply_private_route.clone();
// Wait for reply
let (msg, latency) = match self.wait_for_reply(waitable_reply, debug_string).await? {

View file

@ -131,7 +131,7 @@ impl RPCProcessor {
// We filter on the -outgoing- protocol capability status not the node's dial info
// Use the address type though, to ensure we reach an ipv6 capable node if this is
// an ipv6 address
let sender_node_id = detail.envelope.get_sender_typed_id();
let sender_node_id = detail.envelope.get_sender_id();
let routing_domain = detail.routing_domain;
let node_count = self
.config()

View file

@ -56,7 +56,7 @@ impl RPCProcessor {
// Try it as the node if, and the storage manager will reject the
// value change if it doesn't match the active watch's node id
let inbound_node_id = match &msg.header.detail {
RPCMessageHeaderDetail::Direct(d) => d.envelope.get_sender_typed_id(),
RPCMessageHeaderDetail::Direct(d) => d.envelope.get_sender_id(),
RPCMessageHeaderDetail::SafetyRouted(_) => {
return Ok(NetworkResult::invalid_message(
"not processing value change over safety route",
@ -64,7 +64,7 @@ impl RPCProcessor {
}
RPCMessageHeaderDetail::PrivateRouted(p) => NodeId::new(
p.direct.envelope.get_crypto_kind(),
p.remote_safety_route.into(),
p.remote_safety_route.clone().into(),
),
};

View file

@ -27,11 +27,11 @@ impl RPCProcessor {
pub async fn rpc_call_watch_value(
&self,
dest: Destination,
key: RecordKey,
record_key: RecordKey,
subkeys: ValueSubkeyRangeSet,
expiration: Timestamp,
count: u32,
watcher: BareKeyPair,
watcher: KeyPair,
watch_id: Option<u64>,
) -> RPCNetworkResult<Answer<WatchValueAnswer>> {
let _guard = self
@ -50,10 +50,10 @@ impl RPCProcessor {
// Get the target node id
let crypto = self.crypto();
let Some(vcrypto) = crypto.get(key.kind) else {
let Some(vcrypto) = crypto.get(record_key.kind()) else {
return Err(RPCError::internal("unsupported cryptosystem"));
};
let Some(target_node_id) = target_node_ids.get(key.kind) else {
let Some(target_node_id) = target_node_ids.get(record_key.kind()) else {
return Err(RPCError::internal("No node id for crypto kind"));
};
@ -64,17 +64,17 @@ impl RPCProcessor {
} else {
"".to_owned()
},
key,
record_key,
subkeys,
expiration,
count,
dest,
watcher.key
watcher
);
// Send the watchvalue question
let watch_value_q = RPCOperationWatchValueQ::new(
key,
record_key.clone(),
subkeys.clone(),
expiration.as_u64(),
count,
@ -93,7 +93,7 @@ impl RPCProcessor {
network_result_try!(self.question(dest.clone(), question, None).await?);
// Keep the reply private route that was used to return with the answer
let reply_private_route = waitable_reply.context.reply_private_route;
let reply_private_route = waitable_reply.context.reply_private_route.clone();
// Wait for reply
let (msg, latency) = match self.wait_for_reply(waitable_reply, debug_string).await? {
@ -117,7 +117,7 @@ impl RPCProcessor {
"OUT <== WatchValueA({}id={} {} #{:?}@{} peers={}) <= {}",
if accepted { "+accept " } else { "" },
watch_id,
key,
record_key,
subkeys,
expiration,
peers.len(),
@ -128,7 +128,7 @@ impl RPCProcessor {
let peer_ids: Vec<String> = peers
.iter()
.filter_map(|p| p.node_ids().get(key.kind).map(|k| k.to_string()))
.filter_map(|p| p.node_ids().get(record_key.kind()).map(|k| k.to_string()))
.collect();
veilid_log!(self debug target: "dht", "Peers: {:#?}", peer_ids);
}
@ -155,8 +155,8 @@ impl RPCProcessor {
// Validate peers returned are, in fact, closer to the key than the node we sent this to
let valid = match RoutingTable::verify_peers_closer(
&vcrypto,
target_node_id.into(),
key.into(),
&target_node_id.clone().into(),
&record_key.clone().into(),
&peers,
) {
Ok(v) => v,
@ -233,9 +233,16 @@ impl RPCProcessor {
};
// Destructure
let (key, subkeys, expiration, count, watch_id, watcher, _signature) =
let (record_key, subkeys, expiration, count, watch_id, watcher, _signature) =
watch_value_q.destructure();
// Extract member id for watcher
let Ok(watcher_member_id) = self.storage_manager().generate_member_id(&watcher) else {
return Ok(NetworkResult::invalid_message(
"could not generate member id for watcher public key",
));
};
// Get target for ValueChanged notifications
let dest = network_result_try!(self.get_respond_to_destination(&msg));
let target = dest.get_target(&routing_table)?;
@ -248,12 +255,12 @@ impl RPCProcessor {
} else {
"".to_owned()
},
key,
record_key,
subkeys,
expiration,
count,
msg.header.direct_sender_node_id(),
watcher
watcher_member_id
);
veilid_log!(self debug target: "dht", "{}", debug_string);
@ -261,7 +268,11 @@ impl RPCProcessor {
// Get the nodes that we know about that are closer to the the key than our own node
let closer_to_key_peers = network_result_try!(routing_table
.find_preferred_peers_closer_to_key(routing_domain, key, vec![CAP_DHT, CAP_DHT_WATCH]));
.find_preferred_peers_closer_to_key(
routing_domain,
&record_key.clone().into(),
vec![CAP_DHT, CAP_DHT_WATCH]
));
// See if we would have accepted this as a set, same set_value_count for watches
let set_value_count = self
@ -279,14 +290,14 @@ impl RPCProcessor {
subkeys: subkeys.clone(),
expiration: Timestamp::new(expiration),
count,
watcher,
watcher_member_id,
target,
};
// See if we have this record ourselves, if so, accept the watch
let storage_manager = self.storage_manager();
let watch_result = network_result_try!(storage_manager
.inbound_watch_value(key, params, watch_id)
.inbound_watch_value(record_key.clone(), params, watch_id)
.await
.map_err(RPCError::internal)?);
@ -308,7 +319,7 @@ impl RPCProcessor {
"IN ===> WatchValueA({}id={} {} #{} expiration={} peers={}) ==> {}",
if ret_accepted { "+accept " } else { "" },
ret_watch_id,
key,
record_key,
subkeys,
ret_expiration,
closer_to_key_peers.len(),

View file

@ -34,7 +34,10 @@ impl StorageManager {
record_key: RecordKey,
subkey: ValueSubkey,
) -> Option<ActiveSubkeyWriteGuard> {
let asw = inner.active_subkey_writes.entry(record_key).or_default();
let asw = inner
.active_subkey_writes
.entry(record_key.clone())
.or_default();
if asw.contains(subkey) {
veilid_log!(self debug "already writing to this subkey: {}:{}", record_key, subkey);
None

View file

@ -41,7 +41,7 @@ impl StorageManager {
let mut out = "[\n".to_owned();
for (k, v) in &inner.offline_subkey_writes {
let record_info = local_record_store
.peek_record(*k, |r| format!("{} nodes", r.detail().nodes.len()))
.peek_record(k, |r| format!("{} nodes", r.detail().nodes.len()))
.unwrap_or("Not found".to_owned());
out += &format!(" {}:{:?}, {}\n", k, v, record_info);
@ -108,7 +108,7 @@ impl StorageManager {
let Some(local_record_store) = &inner.local_record_store else {
return "not initialized".to_owned();
};
let local_debug = local_record_store.debug_record_info(record_key);
let local_debug = local_record_store.debug_record_info(record_key.clone());
let opened_debug = if let Some(o) = inner.opened_records.get(&record_key) {
format!("Opened Record: {:#?}\n", o)

View file

@ -47,7 +47,7 @@ impl StorageManager {
// Get the nodes we know are caching this value to seed the fanout
let init_fanout_queue = {
self.get_value_nodes(record_key)
self.get_value_nodes(record_key.clone())
.await?
.unwrap_or_default()
.into_iter()
@ -81,11 +81,15 @@ impl StorageManager {
let call_routine = {
let context = context.clone();
let registry = self.registry();
let record_key = record_key.clone();
let safety_selection = safety_selection.clone();
Arc::new(
move |next_node: NodeRef| -> PinBoxFutureStatic<FanoutCallResult> {
let context = context.clone();
let registry = registry.clone();
let record_key = record_key.clone();
let last_descriptor = last_get_result.opt_descriptor.clone();
let safety_selection = safety_selection.clone();
Box::pin(async move {
let rpc_processor = registry.rpc_processor();
let gva = match
@ -93,7 +97,7 @@ impl StorageManager {
.rpc_call_get_value(
Destination::direct(next_node.routing_domain_filtered(routing_domain))
.with_safety(safety_selection),
record_key,
record_key.clone(),
subkey,
last_descriptor.map(|x| (*x).clone()),
)
@ -145,8 +149,8 @@ impl StorageManager {
};
// Validate with schema
if schema.check_subkey_value_data(
descriptor.owner(),
if registry.storage_manager().check_subkey_value_data(schema,
descriptor.ref_owner(),
subkey,
value.value_data(),
).is_err() {
@ -255,7 +259,7 @@ impl StorageManager {
let routing_table = registry.routing_table();
let fanout_call = FanoutCall::new(
&routing_table,
record_key.into(),
record_key.clone().into(),
key_count,
fanout,
consensus_count,
@ -316,6 +320,7 @@ impl StorageManager {
Box::new(
move |result: VeilidAPIResult<get_value::OutboundGetValueResult>| -> PinBoxFutureStatic<bool> {
let registry=registry.clone();
let key = key.clone();
Box::pin(async move {
let this = registry.storage_manager();
let result = match result {
@ -326,7 +331,7 @@ impl StorageManager {
}
};
let is_incomplete = result.fanout_result.kind.is_incomplete();
let value_data = match this.process_outbound_get_value_result(key, subkey, Some(last_seq), result).await {
let value_data = match this.process_outbound_get_value_result(key.clone(), subkey, Some(last_seq), result).await {
Ok(Some(v)) => v,
Ok(None) => {
return is_incomplete;
@ -344,7 +349,7 @@ impl StorageManager {
// if the sequence number changed since our first partial update
// Send with a max count as this is not attached to any watch
if last_seq != value_data.seq() {
this.update_callback_value_change(key,ValueSubkeyRangeSet::single(subkey), u32::MAX, Some(value_data));
this.update_callback_value_change(key.clone(),ValueSubkeyRangeSet::single(subkey), u32::MAX, Some(value_data));
}
// Return done
@ -371,7 +376,7 @@ impl StorageManager {
// Get cryptosystem
let crypto = self.crypto();
let Some(vcrypto) = crypto.get(record_key.kind) else {
let Some(vcrypto) = crypto.get(record_key.kind()) else {
apibail_generic!("unsupported cryptosystem");
};
@ -381,7 +386,7 @@ impl StorageManager {
Self::process_fanout_results_inner(
&mut inner,
&vcrypto,
record_key,
record_key.clone(),
core::iter::once((ValueSubkeyRangeSet::single(subkey), result.fanout_result)),
false,
self.config()
@ -416,7 +421,7 @@ impl StorageManager {
let (_is_local, last_get_result) = {
// See if the subkey we are getting has a last known local value
let mut last_get_result = self
.handle_get_local_value_inner(&mut inner, key, subkey, true)
.handle_get_local_value_inner(&mut inner, key.clone(), subkey, true)
.await?;
// If this is local, it must have a descriptor already
if last_get_result.opt_descriptor.is_some() {

View file

@ -88,7 +88,7 @@ impl StorageManager {
// Get the nodes we know are caching this value to seed the fanout
let init_fanout_queue = {
self.get_value_nodes(record_key)
self.get_value_nodes(record_key.clone())
.await?
.unwrap_or_default()
.into_iter()
@ -125,12 +125,16 @@ impl StorageManager {
let call_routine = {
let context = context.clone();
let registry = self.registry();
let record_key = record_key.clone();
let safety_selection = safety_selection.clone();
Arc::new(
move |next_node: NodeRef| -> PinBoxFutureStatic<FanoutCallResult> {
let context = context.clone();
let registry = registry.clone();
let opt_descriptor = local_inspect_result.opt_descriptor();
let subkeys = subkeys.clone();
let record_key = record_key.clone();
let safety_selection = safety_selection.clone();
Box::pin(async move {
let rpc_processor = registry.rpc_processor();
@ -138,7 +142,7 @@ impl StorageManager {
rpc_processor
.rpc_call_inspect_value(
Destination::direct(next_node.routing_domain_filtered(routing_domain)).with_safety(safety_selection),
record_key,
record_key.clone(),
subkeys.clone(),
opt_descriptor.map(|x| (*x).clone()),
)
@ -368,7 +372,12 @@ impl StorageManager {
let (_is_local, inspect_result) = {
// See if the subkey we are getting has a last known local value
let mut local_inspect_result = self
.handle_inspect_local_value_inner(&mut inner, record_key, subkeys.clone(), true)
.handle_inspect_local_value_inner(
&mut inner,
record_key.clone(),
subkeys.clone(),
true,
)
.await?;
// If this is local, it must have a descriptor already
if local_inspect_result.opt_descriptor().is_some() {

View file

@ -6,6 +6,7 @@ mod offline_subkey_writes;
mod outbound_watch_manager;
mod record_store;
mod rehydrate;
mod schema;
mod set_value;
mod tasks;
mod types;
@ -28,6 +29,8 @@ pub use types::*;
impl_veilid_log_facility!("stor");
/// Fixed length of MemberId (DHT Schema member id) in bytes
pub const MEMBER_ID_LENGTH: usize = 32;
/// The maximum size of a single subkey
pub(crate) const MAX_SUBKEY_SIZE: usize = ValueData::MAX_LEN;
/// The maximum total size of all subkeys of a record
@ -488,7 +491,7 @@ impl StorageManager {
Destination::direct(
node_ref.routing_domain_filtered(RoutingDomain::PublicInternet),
)
.with_safety(current.params().safety_selection),
.with_safety(current.params().safety_selection.clone()),
)
}
}
@ -501,13 +504,12 @@ impl StorageManager {
#[instrument(level = "trace", target = "stor", skip_all)]
pub fn get_record_key(
&self,
kind: CryptoKind,
schema: DHTSchema,
owner_key: &BarePublicKey,
owner_key: &PublicKey,
) -> VeilidAPIResult<RecordKey> {
// Get cryptosystem
let crypto = self.crypto();
let Some(vcrypto) = crypto.get(kind) else {
let Some(vcrypto) = crypto.get(owner_key.kind()) else {
apibail_generic!("unsupported cryptosystem");
};
@ -515,7 +517,24 @@ impl StorageManager {
schema.validate()?;
let schema_data = schema.compile();
Ok(Self::get_key(&vcrypto, owner_key, &schema_data))
Ok(Self::make_record_key(
&vcrypto,
owner_key.ref_value(),
&schema_data,
))
}
fn make_record_key(
vcrypto: &CryptoSystemGuard<'_>,
owner_key: &BarePublicKey,
schema_data: &[u8],
) -> RecordKey {
let mut hash_data = Vec::<u8>::with_capacity(owner_key.len() + 4 + schema_data.len());
hash_data.extend_from_slice(&vcrypto.kind().0);
hash_data.extend_from_slice(owner_key);
hash_data.extend_from_slice(schema_data);
let hash = vcrypto.generate_hash(&hash_data);
RecordKey::new(vcrypto.kind(), BareRecordKey::from(hash))
}
/// Create a local record from scratch with a new owner key, open it, and return the opened descriptor
@ -523,7 +542,7 @@ impl StorageManager {
&self,
kind: CryptoKind,
schema: DHTSchema,
owner: Option<BareKeyPair>,
owner: Option<KeyPair>,
safety_selection: SafetySelection,
) -> VeilidAPIResult<DHTRecordDescriptor> {
let Ok(_guard) = self.startup_lock.enter() else {
@ -538,7 +557,13 @@ impl StorageManager {
// Create a new owned local record from scratch
let (key, owner) = self
.create_new_owned_local_record_inner(&mut inner, kind, schema, owner, safety_selection)
.create_new_owned_local_record_inner(
&mut inner,
kind,
schema,
owner,
safety_selection.clone(),
)
.await?;
// Now that the record is made we should always succeed to open the existing record
@ -553,7 +578,7 @@ impl StorageManager {
pub async fn open_record(
&self,
record_key: RecordKey,
writer: Option<BareKeyPair>,
writer: Option<KeyPair>,
safety_selection: SafetySelection,
) -> VeilidAPIResult<DHTRecordDescriptor> {
let Ok(_guard) = self.startup_lock.enter() else {
@ -564,7 +589,12 @@ impl StorageManager {
// See if we have a local record already or not
if let Some(res) = self
.open_existing_record_inner(&mut inner, record_key, writer, safety_selection)
.open_existing_record_inner(
&mut inner,
record_key.clone(),
writer.clone(),
safety_selection.clone(),
)
.await?
{
drop(inner);
@ -593,9 +623,9 @@ impl StorageManager {
// Use the safety selection we opened the record with
let result = self
.outbound_inspect_value(
record_key,
record_key.clone(),
ValueSubkeyRangeSet::single(0),
safety_selection,
safety_selection.clone(),
InspectResult::default(),
false,
)
@ -613,7 +643,12 @@ impl StorageManager {
let mut inner = self.inner.lock().await;
if let Some(res) = self
.open_existing_record_inner(&mut inner, record_key, writer, safety_selection)
.open_existing_record_inner(
&mut inner,
record_key.clone(),
writer.clone(),
safety_selection.clone(),
)
.await?
{
// Don't bother to rehydrate in this edge case
@ -655,7 +690,7 @@ impl StorageManager {
// Attempt to close the record, returning the opened record if it wasn't already closed
let mut inner = self.inner.lock().await;
let keys = inner.opened_records.keys().copied().collect::<Vec<_>>();
let keys = inner.opened_records.keys().cloned().collect::<Vec<_>>();
for key in keys {
Self::close_record_inner(&mut inner, key)?;
}
@ -672,7 +707,7 @@ impl StorageManager {
// Ensure the record is closed
let mut inner = self.inner.lock().await;
Self::close_record_inner(&mut inner, record_key)?;
Self::close_record_inner(&mut inner, record_key.clone())?;
// Get record from the local store
let Some(local_record_store) = inner.local_record_store.as_mut() else {
@ -705,7 +740,7 @@ impl StorageManager {
// See if the requested subkey is our local record store
let last_get_result = self
.handle_get_local_value_inner(&mut inner, record_key, subkey, true)
.handle_get_local_value_inner(&mut inner, record_key.clone(), subkey, true)
.await?;
// Return the existing value if we have one unless we are forcing a refresh
@ -734,7 +769,12 @@ impl StorageManager {
.as_ref()
.map(|v| v.value_data().seq());
let res_rx = self
.outbound_get_value(record_key, subkey, safety_selection, last_get_result)
.outbound_get_value(
record_key.clone(),
subkey,
safety_selection,
last_get_result,
)
.await?;
// Wait for the first result
@ -746,7 +786,7 @@ impl StorageManager {
// Process the returned result
let out = self
.process_outbound_get_value_result(record_key, subkey, opt_last_seq, result)
.process_outbound_get_value_result(record_key.clone(), subkey, opt_last_seq, result)
.await?;
if let Some(out) = &out {
@ -754,7 +794,7 @@ impl StorageManager {
if partial {
self.process_deferred_outbound_get_value_result(
res_rx,
record_key,
record_key.clone(),
subkey,
out.seq(),
);
@ -781,7 +821,7 @@ impl StorageManager {
// Get cryptosystem
let crypto = self.crypto();
let Some(vcrypto) = crypto.get(record_key.kind) else {
let Some(vcrypto) = crypto.get(record_key.kind()) else {
apibail_generic!("unsupported cryptosystem");
};
@ -796,7 +836,10 @@ impl StorageManager {
};
// Use the specified writer, or if not specified, the default writer when the record was opened
let opt_writer = options.as_ref().and_then(|o| o.writer).or(opt_writer);
let opt_writer = options
.as_ref()
.and_then(|o| o.writer.clone())
.or(opt_writer);
let allow_offline = options
.unwrap_or_default()
.allow_offline
@ -809,7 +852,7 @@ impl StorageManager {
// See if the subkey we are modifying has a last known local value
let last_get_result = self
.handle_get_local_value_inner(&mut inner, record_key, subkey, true)
.handle_get_local_value_inner(&mut inner, record_key.clone(), subkey, true)
.await?;
// Get the descriptor and schema for the key
@ -821,20 +864,22 @@ impl StorageManager {
// Make new subkey data
let value_data = if let Some(last_signed_value_data) = last_get_result.opt_value {
if last_signed_value_data.value_data().data() == data
&& last_signed_value_data.value_data().writer() == &writer.key
&& last_signed_value_data.value_data().ref_writer() == &writer.key()
{
// Data and writer is the same, nothing is changing,
// just return that we set it, but no network activity needs to happen
return Ok(None);
}
let seq = last_signed_value_data.value_data().seq();
ValueData::new_with_seq(seq + 1, data, writer.key)?
ValueData::new_with_seq(seq + 1, data, writer.key())?
} else {
ValueData::new(data, writer.key)?
ValueData::new(data, writer.key())?
};
// Validate with schema
if let Err(e) = schema.check_subkey_value_data(descriptor.owner(), subkey, &value_data) {
if let Err(e) =
self.check_subkey_value_data(&schema, descriptor.ref_owner(), subkey, &value_data)
{
veilid_log!(self debug "schema validation error: {}", e);
// Validation failed, ignore this value
apibail_generic!(format!(
@ -846,10 +891,10 @@ impl StorageManager {
// Sign the new value data with the writer
let signed_value_data = Arc::new(SignedValueData::make_signature(
value_data,
descriptor.owner(),
&descriptor.owner(),
subkey,
&vcrypto,
writer.secret,
&writer.bare_secret(),
)?);
// Check if we are offline
@ -872,7 +917,7 @@ impl StorageManager {
// Note that we are writing this subkey in the foreground
// If it appears we are already doing this, then put it to the background/offline queue
let opt_guard = self.mark_active_subkey_write_inner(&mut inner, record_key, subkey);
let opt_guard = self.mark_active_subkey_write_inner(&mut inner, record_key.clone(), subkey);
if opt_guard.is_none() {
if allow_offline == AllowOffline(false) {
apibail_try_again!("offline, try again later");
@ -898,9 +943,9 @@ impl StorageManager {
// Use the safety selection we opened the record with
let res_rx = match self
.outbound_set_value(
record_key,
record_key.clone(),
subkey,
safety_selection,
safety_selection.clone(),
signed_value_data.clone(),
descriptor,
)
@ -917,7 +962,7 @@ impl StorageManager {
if allow_offline == AllowOffline(true) {
self.add_offline_subkey_write_inner(
&mut inner,
record_key,
record_key.clone(),
subkey,
safety_selection,
signed_value_data.clone(),
@ -990,10 +1035,10 @@ impl StorageManager {
// Process the returned result
let out = self
.process_outbound_set_value_result(
record_key,
record_key.clone(),
subkey,
signed_value_data.value_data().clone(),
safety_selection,
safety_selection.clone(),
result,
)
.await?;
@ -1038,10 +1083,10 @@ impl StorageManager {
let opt_value_data = self
.process_outbound_set_value_result(
record_key,
record_key.clone(),
subkey,
signed_value_data.value_data().clone(),
safety_selection,
safety_selection.clone(),
result,
)
.await?;
@ -1080,14 +1125,14 @@ impl StorageManager {
expiration: Timestamp,
count: u32,
) -> VeilidAPIResult<bool> {
let key = watch_lock.tag();
let record_key = watch_lock.tag();
// Obtain the inner state lock
let mut inner = self.inner.lock().await;
// Get the safety selection and the writer we opened this record
let (safety_selection, opt_watcher) = {
let Some(opened_record) = inner.opened_records.get(&key) else {
let Some(opened_record) = inner.opened_records.get(&record_key) else {
// Record must be opened already to change watch
apibail_generic!("record not open");
};
@ -1106,7 +1151,7 @@ impl StorageManager {
// Get the schema so we can truncate the watch to the number of subkeys
let schema = if let Some(lrs) = inner.local_record_store.as_ref() {
let Some(schema) = lrs.peek_record(key, |r| r.schema()) else {
let Some(schema) = lrs.peek_record(&record_key, |r| r.schema()) else {
apibail_generic!("no local record found");
};
schema
@ -1148,7 +1193,7 @@ impl StorageManager {
let active = desired_params.is_some();
inner
.outbound_watch_manager
.set_desired_watch(key, desired_params);
.set_desired_watch(record_key, desired_params);
// Drop the lock for network access
drop(inner);
@ -1168,7 +1213,10 @@ impl StorageManager {
// Obtain the watch change lock
// (may need to wait for background operations to complete on the watch)
let watch_lock = self.outbound_watch_lock_table.lock_tag(record_key).await;
let watch_lock = self
.outbound_watch_lock_table
.lock_tag(record_key.clone())
.await;
// Calculate change to existing watch
let (subkeys, count, expiration_ts) = {
@ -1247,7 +1295,7 @@ impl StorageManager {
// Get cryptosystem
let crypto = self.crypto();
let Some(vcrypto) = crypto.get(record_key.kind) else {
let Some(vcrypto) = crypto.get(record_key.kind()) else {
apibail_generic!("unsupported cryptosystem");
};
@ -1261,7 +1309,7 @@ impl StorageManager {
// See if the requested record is our local record store
let mut local_inspect_result = self
.handle_inspect_local_value_inner(&mut inner, record_key, subkeys.clone(), true)
.handle_inspect_local_value_inner(&mut inner, record_key.clone(), subkeys.clone(), true)
.await?;
// Get the offline subkeys for this record still only returning the ones we're inspecting
@ -1319,7 +1367,7 @@ impl StorageManager {
// Get the inspect record report from the network
let result = self
.outbound_inspect_value(
record_key,
record_key.clone(),
subkeys,
safety_selection,
if matches!(scope, DHTReportScope::SyncGet | DHTReportScope::SyncSet) {
@ -1343,7 +1391,7 @@ impl StorageManager {
Self::process_fanout_results_inner(
&mut inner,
&vcrypto,
record_key,
record_key.clone(),
results_iter,
false,
self.config()
@ -1378,14 +1426,14 @@ impl StorageManager {
let dest = rpc_processor
.resolve_target_to_destination(
vc.target,
vc.target.clone(),
SafetySelection::Unsafe(Sequencing::PreferOrdered),
)
.await
.map_err(VeilidAPIError::from)?;
network_result_value_or_log!(self rpc_processor
.rpc_call_value_changed(dest, vc.record_key, vc.subkeys.clone(), vc.count, vc.watch_id, vc.value.map(|v| (*v).clone()) )
.rpc_call_value_changed(dest, vc.record_key.clone(), vc.subkeys.clone(), vc.count, vc.watch_id, vc.value.map(|v| (*v).clone()) )
.await
.map_err(VeilidAPIError::from)? => [format!(": dest={:?} vc={:?}", dest, vc)] {});
@ -1464,9 +1512,9 @@ impl StorageManager {
inner: &mut StorageManagerInner,
kind: CryptoKind,
schema: DHTSchema,
owner: Option<BareKeyPair>,
owner: Option<KeyPair>,
safety_selection: SafetySelection,
) -> VeilidAPIResult<(RecordKey, BareKeyPair)> {
) -> VeilidAPIResult<(RecordKey, KeyPair)> {
// Get cryptosystem
let crypto = self.crypto();
let Some(vcrypto) = crypto.get(kind) else {
@ -1483,11 +1531,12 @@ impl StorageManager {
let config = self.config();
let cfg = config.get();
if let Some(node_id) = cfg.network.routing_table.node_id.get(kind) {
if schema.is_member(&node_id.value.into()) {
let node_member_id = BareMemberId::new(node_id.ref_value());
if schema.is_member(&node_member_id) {
apibail_invalid_argument!(
"node id can not be schema member",
"schema",
node_id.value
node_id.value()
);
}
}
@ -1497,17 +1546,24 @@ impl StorageManager {
let schema_data = schema.compile();
// New values require a new owner key if not given
let owner = owner.unwrap_or_else(|| vcrypto.generate_keypair());
let owner = if let Some(owner) = owner {
if owner.kind() != vcrypto.kind() {
apibail_invalid_argument!("owner is wrong crypto kind", "owner", owner);
}
owner
} else {
KeyPair::new(vcrypto.kind(), vcrypto.generate_keypair())
};
// Calculate dht key
let dht_key = Self::get_key(&vcrypto, &owner.key, &schema_data);
let record_key = Self::make_record_key(&vcrypto, owner.ref_value().ref_key(), &schema_data);
// Make a signed value descriptor for this dht value
let signed_value_descriptor = Arc::new(SignedValueDescriptor::make_signature(
owner.key,
owner.key(),
schema_data,
&vcrypto,
owner.secret,
owner.bare_secret(),
)?);
// Add new local value record
@ -1516,9 +1572,11 @@ impl StorageManager {
let record =
Record::<LocalRecordDetail>::new(cur_ts, signed_value_descriptor, local_record_detail)?;
local_record_store.new_record(dht_key, record).await?;
local_record_store
.new_record(record_key.clone(), record)
.await?;
Ok((dht_key, owner))
Ok((record_key, owner))
}
#[instrument(level = "trace", target = "stor", skip_all, err)]
@ -1527,7 +1585,7 @@ impl StorageManager {
inner: &mut StorageManagerInner,
record_key: RecordKey,
safety_selection: SafetySelection,
) -> VeilidAPIResult<Option<(BarePublicKey, DHTSchema)>> {
) -> VeilidAPIResult<Option<(PublicKey, DHTSchema)>> {
// Get local record store
let Some(local_record_store) = inner.local_record_store.as_mut() else {
apibail_not_initialized!();
@ -1542,7 +1600,7 @@ impl StorageManager {
// Return record details
r.clone()
};
let Some(remote_record) = remote_record_store.with_record(record_key, rcb) else {
let Some(remote_record) = remote_record_store.with_record(&record_key, rcb) else {
// No local or remote record found, return None
return Ok(None);
};
@ -1555,13 +1613,13 @@ impl StorageManager {
LocalRecordDetail::new(safety_selection),
)?;
local_record_store
.new_record(record_key, local_record)
.new_record(record_key.clone(), local_record)
.await?;
// Move copy subkey data from remote to local store
for subkey in remote_record.stored_subkeys().iter() {
let Some(get_result) = remote_record_store
.get_subkey(record_key, subkey, false)
.get_subkey(record_key.clone(), subkey, false)
.await?
else {
// Subkey was missing
@ -1575,7 +1633,7 @@ impl StorageManager {
};
local_record_store
.set_subkey(
record_key,
record_key.clone(),
subkey,
subkey_data,
InboundWatchUpdateMode::NoUpdate,
@ -1585,15 +1643,17 @@ impl StorageManager {
// Move watches
local_record_store.move_watches(
record_key,
remote_record_store.move_watches(record_key, None),
record_key.clone(),
remote_record_store.move_watches(record_key.clone(), None),
);
// Delete remote record from store
remote_record_store.delete_record(record_key).await?;
remote_record_store
.delete_record(record_key.clone())
.await?;
// Return record information as transferred to local record
Ok(Some((*remote_record.owner(), remote_record.schema())))
Ok(Some((remote_record.owner(), remote_record.schema())))
}
#[instrument(level = "trace", target = "stor", skip_all, err)]
@ -1601,7 +1661,7 @@ impl StorageManager {
&self,
inner: &mut StorageManagerInner,
record_key: RecordKey,
writer: Option<BareKeyPair>,
writer: Option<KeyPair>,
safety_selection: SafetySelection,
) -> VeilidAPIResult<Option<DHTRecordDescriptor>> {
// Get local record store
@ -1614,18 +1674,22 @@ impl StorageManager {
// Process local record
// Keep the safety selection we opened the record with
r.detail_mut().safety_selection = safety_selection;
r.detail_mut().safety_selection = safety_selection.clone();
// Return record details
(*r.owner(), r.schema())
(r.owner(), r.schema())
};
let (owner, schema) = match local_record_store.with_record_mut(record_key, cb) {
let (owner, schema) = match local_record_store.with_record_mut(&record_key, cb) {
Some(v) => v,
None => {
// If we don't have a local record yet, check to see if we have a remote record
// if so, migrate it to a local record
let Some(v) = self
.move_remote_record_to_local_inner(&mut *inner, record_key, safety_selection)
.move_remote_record_to_local_inner(
&mut *inner,
record_key.clone(),
safety_selection.clone(),
)
.await?
else {
// No remote record either
@ -1638,9 +1702,9 @@ impl StorageManager {
// If the writer we chose is also the owner, we have the owner secret
// Otherwise this is just another subkey writer
let owner_secret = if let Some(writer) = writer {
if writer.key == owner {
Some(writer.secret)
let owner_secret = if let Some(writer) = writer.clone() {
if writer.key() == owner {
Some(writer.bare_secret())
} else {
None
}
@ -1651,12 +1715,12 @@ impl StorageManager {
// Write open record
inner
.opened_records
.entry(record_key)
.entry(record_key.clone())
.and_modify(|e| {
e.set_writer(writer);
e.set_safety_selection(safety_selection);
e.set_writer(writer.clone());
e.set_safety_selection(safety_selection.clone());
})
.or_insert_with(|| OpenedRecord::new(writer, safety_selection));
.or_insert_with(|| OpenedRecord::new(writer.clone(), safety_selection.clone()));
// Make DHT Record Descriptor to return
let descriptor = DHTRecordDescriptor::new(record_key, owner, owner_secret, schema);
@ -1668,7 +1732,7 @@ impl StorageManager {
&self,
inner: &mut StorageManagerInner,
record_key: RecordKey,
writer: Option<BareKeyPair>,
writer: Option<KeyPair>,
inspect_result: InspectResult,
safety_selection: SafetySelection,
) -> VeilidAPIResult<DHTRecordDescriptor> {
@ -1683,13 +1747,13 @@ impl StorageManager {
apibail_generic!("no descriptor");
};
// Get owner
let owner = *signed_value_descriptor.owner();
let owner = signed_value_descriptor.owner();
// If the writer we chose is also the owner, we have the owner secret
// Otherwise this is just another subkey writer
let owner_secret = if let Some(writer) = writer {
if writer.key == owner {
Some(writer.secret)
let owner_secret = if let Some(writer) = &writer {
if writer.key() == owner {
Some(writer.bare_secret())
} else {
None
}
@ -1707,14 +1771,17 @@ impl StorageManager {
let record = Record::<LocalRecordDetail>::new(
Timestamp::now(),
signed_value_descriptor,
LocalRecordDetail::new(safety_selection),
LocalRecordDetail::new(safety_selection.clone()),
)?;
local_record_store.new_record(record_key, record).await?;
local_record_store
.new_record(record_key.clone(), record)
.await?;
// Write open record
inner
.opened_records
.insert(record_key, OpenedRecord::new(writer, safety_selection));
inner.opened_records.insert(
record_key.clone(),
OpenedRecord::new(writer, safety_selection),
);
// Make DHT Record Descriptor to return
let descriptor = DHTRecordDescriptor::new(record_key, owner, owner_secret, schema);
@ -1735,14 +1802,14 @@ impl StorageManager {
// Get routing table to see if we still know about these nodes
let routing_table = self.routing_table();
let opt_value_nodes = local_record_store.peek_record(record_key, |r| {
let opt_value_nodes = local_record_store.peek_record(&record_key, |r| {
let d = r.detail();
d.nodes
.keys()
.copied()
.cloned()
.filter_map(|x| {
routing_table
.lookup_node_ref(NodeId::new(record_key.kind, x))
.lookup_node_ref(NodeId::new(record_key.kind(), x))
.ok()
.flatten()
})
@ -1765,14 +1832,14 @@ impl StorageManager {
let local_record_store = inner.local_record_store.as_mut().unwrap();
let cur_ts = Timestamp::now();
local_record_store.with_record_mut(record_key, |r| {
local_record_store.with_record_mut(&record_key, |r| {
let d = r.detail_mut();
for (subkeys, fanout_result) in subkey_results_iter {
for node_id in fanout_result
.value_nodes
.iter()
.filter_map(|x| x.node_ids().get(record_key.kind).map(|k| k.value))
.filter_map(|x| x.node_ids().get(record_key.kind()).map(|k| k.value()))
{
let pnd = d.nodes.entry(node_id).or_default();
if is_set || pnd.last_set == Timestamp::default() {
@ -1787,7 +1854,7 @@ impl StorageManager {
let mut nodes_ts = d
.nodes
.iter()
.map(|kv| (*kv.0, kv.1.last_seen))
.map(|kv| (kv.0.clone(), kv.1.last_seen))
.collect::<Vec<_>>();
nodes_ts.sort_by(|a, b| {
// Timestamp is first metric
@ -1797,12 +1864,12 @@ impl StorageManager {
}
// Distance is the next metric, closer nodes first
let da = vcrypto.distance(
&BareHashDigest::from(a.0),
&BareHashDigest::from(record_key.value),
&BareHashDigest::from(a.0.clone()),
&BareHashDigest::from(record_key.value()),
);
let db = vcrypto.distance(
&BareHashDigest::from(b.0),
&BareHashDigest::from(record_key.value),
&BareHashDigest::from(b.0.clone()),
&BareHashDigest::from(record_key.value()),
);
da.cmp(&db)
});
@ -1820,7 +1887,10 @@ impl StorageManager {
let Some(local_record_store) = inner.local_record_store.as_mut() else {
apibail_not_initialized!();
};
if local_record_store.peek_record(record_key, |_| {}).is_none() {
if local_record_store
.peek_record(&record_key, |_| {})
.is_none()
{
apibail_key_not_found!(record_key);
}
@ -1846,7 +1916,7 @@ impl StorageManager {
// See if the value is in the offline subkey writes first,
// since it may not have been committed yet to the local record store
if let Some(get_result) =
self.get_offline_subkey_writes_subkey(inner, record_key, subkey, want_descriptor)?
self.get_offline_subkey_writes_subkey(inner, &record_key, subkey, want_descriptor)?
{
return Ok(get_result);
}
@ -1880,7 +1950,7 @@ impl StorageManager {
// See if this new data supercedes any offline subkey writes
self.remove_old_offline_subkey_writes_inner(
inner,
record_key,
record_key.clone(),
subkey,
signed_value_data.clone(),
);
@ -1967,7 +2037,7 @@ impl StorageManager {
// See if we have a remote record already or not
if remote_record_store
.with_record(record_key, |_| {})
.with_record(&record_key, |_| {})
.is_none()
{
// record didn't exist, make it
@ -1978,7 +2048,9 @@ impl StorageManager {
signed_value_descriptor,
remote_record_detail,
)?;
remote_record_store.new_record(record_key, record).await?
remote_record_store
.new_record(record_key.clone(), record)
.await?
};
// Write subkey to remote store
@ -2018,19 +2090,6 @@ impl StorageManager {
)
}
fn get_key(
vcrypto: &CryptoSystemGuard<'_>,
owner_key: &BarePublicKey,
schema_data: &[u8],
) -> RecordKey {
let mut hash_data = Vec::<u8>::with_capacity(PUBLIC_KEY_LENGTH + 4 + schema_data.len());
hash_data.extend_from_slice(&vcrypto.kind().0);
hash_data.extend_from_slice(&owner_key.bytes);
hash_data.extend_from_slice(schema_data);
let hash = vcrypto.generate_hash(&hash_data);
RecordKey::new(vcrypto.kind(), BareRecordKey::from(hash))
}
#[instrument(level = "trace", target = "stor", skip_all)]
fn process_deferred_results<T: Send + 'static>(
&self,

View file

@ -49,14 +49,14 @@ impl StorageManager {
pub(super) fn get_offline_subkey_writes_subkey(
&self,
inner: &mut StorageManagerInner,
record_key: RecordKey,
record_key: &RecordKey,
subkey: ValueSubkey,
want_descriptor: bool,
) -> VeilidAPIResult<Option<GetResult>> {
let Some(local_record_store) = inner.local_record_store.as_mut() else {
apibail_not_initialized!();
};
let Some(osw) = inner.offline_subkey_writes.get(&record_key) else {
let Some(osw) = inner.offline_subkey_writes.get(record_key) else {
return Ok(None);
};
let Some(signed_value_data) = osw.subkey_value_data.get(&subkey).cloned() else {
@ -93,7 +93,7 @@ impl StorageManager {
signed_value_data: Arc<SignedValueData>,
) {
// Get the offline subkey write record
match inner.offline_subkey_writes.entry(record_key) {
match inner.offline_subkey_writes.entry(record_key.clone()) {
hashlink::linked_hash_map::Entry::Occupied(mut o) => {
let finished = {
let osw = o.get_mut();
@ -154,7 +154,7 @@ impl StorageManager {
);
// Get the offline subkey write record
match inner.offline_subkey_writes.entry(record_key) {
match inner.offline_subkey_writes.entry(record_key.clone()) {
hashlink::linked_hash_map::Entry::Occupied(mut o) => {
let finished = {
let osw = o.get_mut();

View file

@ -32,7 +32,7 @@ impl fmt::Display for OutboundWatchManager {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut out = format!("outbound_watches({}): [\n", self.outbound_watches.len());
{
let mut keys = self.outbound_watches.keys().copied().collect::<Vec<_>>();
let mut keys = self.outbound_watches.keys().cloned().collect::<Vec<_>>();
keys.sort();
for k in keys {
@ -43,7 +43,7 @@ impl fmt::Display for OutboundWatchManager {
out += "]\n";
out += &format!("per_node_states({}): [\n", self.per_node_states.len());
{
let mut keys = self.per_node_states.keys().copied().collect::<Vec<_>>();
let mut keys = self.per_node_states.keys().cloned().collect::<Vec<_>>();
keys.sort();
for k in keys {
@ -60,7 +60,7 @@ impl fmt::Display for OutboundWatchManager {
let mut keys = self
.needs_change_inspection
.keys()
.copied()
.cloned()
.collect::<Vec<_>>();
keys.sort();
@ -92,7 +92,7 @@ impl OutboundWatchManager {
pub fn prepare(&mut self, routing_table: VeilidComponentGuard<'_, RoutingTable>) {
for (pnk, pns) in &mut self.per_node_states {
pns.watch_node_ref = match routing_table.lookup_node_ref(pnk.node_id) {
pns.watch_node_ref = match routing_table.lookup_node_ref(pnk.node_id.clone()) {
Ok(v) => v,
Err(e) => {
veilid_log!(routing_table debug "Error looking up outbound watch node ref: {}", e);
@ -103,7 +103,7 @@ impl OutboundWatchManager {
self.per_node_states
.retain(|_, v| v.watch_node_ref.is_some());
let keys = self.per_node_states.keys().copied().collect::<HashSet<_>>();
let keys = self.per_node_states.keys().cloned().collect::<HashSet<_>>();
for v in self.outbound_watches.values_mut() {
if let Some(state) = v.state_mut() {
@ -133,7 +133,7 @@ impl OutboundWatchManager {
// Watch does not exist, add one if that's what is desired
if let Some(desired) = desired_watch {
self.outbound_watches
.insert(record_key, OutboundWatch::new(record_key, desired));
.insert(record_key.clone(), OutboundWatch::new(record_key, desired));
}
}
}
@ -163,7 +163,7 @@ impl OutboundWatchManager {
for (pnk, pns) in &self.per_node_states {
if pns.count == 0 {
// If per-node watch is done, add to finished list
finished_pnks.insert(*pnk);
finished_pnks.insert(pnk.clone());
} else if !pns
.watch_node_ref
.as_ref()
@ -172,10 +172,10 @@ impl OutboundWatchManager {
.is_alive()
{
// If node is unreachable add to dead list
dead_pnks.insert(*pnk);
dead_pnks.insert(pnk.clone());
} else if cur_ts >= pns.expiration_ts {
// If per-node watch has expired add to expired list
expired_pnks.insert(*pnk);
expired_pnks.insert(pnk.clone());
}
}

View file

@ -13,7 +13,7 @@ pub struct OutboundWatchParameters {
/// Subkeys requested for this watch
pub subkeys: ValueSubkeyRangeSet,
/// What key to use to perform the watch
pub opt_watcher: Option<BareKeyPair>,
pub opt_watcher: Option<KeyPair>,
/// What safety selection to use on the network
pub safety_selection: SafetySelection,
}

View file

@ -163,7 +163,13 @@ impl OutboundWatchState {
self.value_changed_routes = self
.nodes
.iter()
.filter_map(|x| per_node_state.get(x).unwrap().opt_value_changed_route)
.filter_map(|x| {
per_node_state
.get(x)
.cloned()
.unwrap()
.opt_value_changed_route
})
.collect();
res

View file

@ -2,7 +2,7 @@ use super::*;
impl_veilid_log_facility!("stor");
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub(in crate::storage_manager) struct PerNodeKey {
/// Watched record key
pub record_key: RecordKey,
@ -36,7 +36,7 @@ pub(in crate::storage_manager) struct PerNodeState {
/// SafetySelection used to contact the node
pub safety_selection: SafetySelection,
/// What key was used to perform the watch
pub opt_watcher: Option<BareKeyPair>,
pub opt_watcher: Option<KeyPair>,
/// The expiration of a successful watch
pub expiration_ts: Timestamp,
/// How many value change notifications are left

View file

@ -10,7 +10,7 @@ pub struct InboundWatchParameters {
/// How many updates are left before forced expiration
pub count: u32,
/// The watching schema member key, or an anonymous key
pub watcher: BarePublicKey,
pub watcher_member_id: MemberId,
/// The place where updates are sent
pub target: Target,
}

View file

@ -1,63 +1,48 @@
use super::*;
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
#[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct RecordTableKey {
pub key: RecordKey,
pub record_key: RecordKey,
}
impl RecordTableKey {
pub fn bytes(&self) -> [u8; HASH_DIGEST_LENGTH + 4] {
let mut bytes = [0u8; HASH_DIGEST_LENGTH + 4];
bytes[0..4].copy_from_slice(&self.key.kind.0);
bytes[4..HASH_DIGEST_LENGTH + 4].copy_from_slice(&self.key.value.bytes);
bytes
pub fn bytes(&self) -> Vec<u8> {
Vec::from(self.record_key.clone())
}
}
impl TryFrom<&[u8]> for RecordTableKey {
type Error = EyreReport;
fn try_from(bytes: &[u8]) -> Result<Self, Self::Error> {
if bytes.len() != HASH_DIGEST_LENGTH + 4 {
bail!("invalid bytes length");
}
let kind = CryptoKind::try_from(&bytes[0..4]).wrap_err("invalid kind")?;
let value =
BareRecordKey::try_from(&bytes[4..HASH_DIGEST_LENGTH + 4]).wrap_err("invalid value")?;
let key = RecordKey::new(kind, value);
Ok(RecordTableKey { key })
let key = RecordKey::try_from(bytes)?;
Ok(RecordTableKey { record_key: key })
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
#[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct SubkeyTableKey {
pub key: RecordKey,
pub record_key: RecordKey,
pub subkey: ValueSubkey,
}
impl SubkeyTableKey {
pub fn bytes(&self) -> [u8; HASH_DIGEST_LENGTH + 4 + 4] {
let mut bytes = [0u8; HASH_DIGEST_LENGTH + 4 + 4];
bytes[0..4].copy_from_slice(&self.key.kind.0);
bytes[4..HASH_DIGEST_LENGTH + 4].copy_from_slice(&self.key.value.bytes);
bytes[HASH_DIGEST_LENGTH + 4..HASH_DIGEST_LENGTH + 4 + 4]
.copy_from_slice(&self.subkey.to_le_bytes());
pub fn bytes(&self) -> Vec<u8> {
let mut bytes = Vec::<_>::from(self.record_key.clone());
bytes.extend_from_slice(&self.subkey.to_le_bytes());
bytes
}
}
impl TryFrom<&[u8]> for SubkeyTableKey {
type Error = EyreReport;
fn try_from(bytes: &[u8]) -> Result<Self, Self::Error> {
if bytes.len() != HASH_DIGEST_LENGTH + 4 {
bail!("invalid bytes length");
}
let kind = CryptoKind::try_from(&bytes[0..4]).wrap_err("invalid kind")?;
let value =
BareRecordKey::try_from(&bytes[4..HASH_DIGEST_LENGTH + 4]).wrap_err("invalid value")?;
let key = RecordKey::try_from(&bytes[0..bytes.len() - 4])?;
let subkey = ValueSubkey::from_le_bytes(
bytes[HASH_DIGEST_LENGTH + 4..HASH_DIGEST_LENGTH + 4 + 4]
bytes[(bytes.len() - 4)..]
.try_into()
.wrap_err("invalid subkey")?,
);
let key = RecordKey::new(kind, value);
Ok(SubkeyTableKey { key, subkey })
Ok(SubkeyTableKey {
record_key: key,
subkey,
})
}
}

View file

@ -284,14 +284,17 @@ where
}
// add to index and ensure we deduplicate in the case of an error
if let Some(v) = self.record_index.insert_with_callback(ri.0, ri.1, |k, v| {
if let Some(v) = self
.record_index
.insert_with_callback(ri.0.clone(), ri.1, |k, v| {
// If the configuration change, we only want to keep the 'limits.max_records' records
dead_records.push(DeadRecord {
key: k,
record: v,
in_total_storage: true,
});
}) {
})
{
// This shouldn't happen, but deduplicate anyway
veilid_log!(self warn "duplicate record in table: {:?}", ri.0);
dead_records.push(DeadRecord {
@ -412,7 +415,7 @@ where
for sk in stored_subkeys.iter() {
// From table
let stk = SubkeyTableKey {
key: dr.key.key,
record_key: dr.key.record_key.clone(),
subkey: sk,
};
let stkb = stk.bytes();
@ -469,8 +472,12 @@ where
}
#[instrument(level = "trace", target = "stor", skip_all, err)]
pub async fn new_record(&mut self, key: RecordKey, record: Record<D>) -> VeilidAPIResult<()> {
let rtk = RecordTableKey { key };
pub async fn new_record(
&mut self,
record_key: RecordKey,
record: Record<D>,
) -> VeilidAPIResult<()> {
let rtk = RecordTableKey { record_key };
if self.record_index.contains_key(&rtk) {
apibail_internal!("record already exists");
}
@ -495,9 +502,12 @@ where
// Save to record index
let mut dead_records = Vec::new();
if let Some(v) = self.record_index.insert_with_callback(rtk, record, |k, v| {
if let Some(v) = self
.record_index
.insert_with_callback(rtk.clone(), record, |k, v| {
dead_records.push((k, v));
}) {
})
{
// Shouldn't happen but log it
veilid_log!(self warn "new duplicate record in table: {:?}", rtk);
self.add_dead_record(rtk, v);
@ -510,13 +520,13 @@ where
}
#[instrument(level = "trace", target = "stor", skip_all, err)]
pub async fn delete_record(&mut self, key: RecordKey) -> VeilidAPIResult<()> {
pub async fn delete_record(&mut self, record_key: RecordKey) -> VeilidAPIResult<()> {
// Get the record table key
let rtk = RecordTableKey { key };
let rtk = RecordTableKey { record_key };
// Remove record from the index
let Some(record) = self.record_index.remove(&rtk) else {
apibail_key_not_found!(key);
apibail_key_not_found!(rtk.record_key.clone());
};
// Remove watches
@ -526,7 +536,7 @@ where
self.changed_watched_values.remove(&rtk);
// Invalidate inspect cache for this key
self.inspect_cache.invalidate(&rtk.key);
self.inspect_cache.invalidate(&rtk.record_key);
// Remove from table store immediately
self.add_dead_record(rtk, record);
@ -536,19 +546,23 @@ where
}
#[instrument(level = "trace", target = "stor", skip_all)]
pub(super) fn contains_record(&mut self, key: RecordKey) -> bool {
let rtk = RecordTableKey { key };
pub(super) fn contains_record(&mut self, record_key: &RecordKey) -> bool {
let rtk = RecordTableKey {
record_key: record_key.clone(),
};
self.record_index.contains_key(&rtk)
}
#[instrument(level = "trace", target = "stor", skip_all)]
pub(super) fn with_record<R, F>(&mut self, key: RecordKey, f: F) -> Option<R>
pub(super) fn with_record<R, F>(&mut self, record_key: &RecordKey, f: F) -> Option<R>
where
F: FnOnce(&Record<D>) -> R,
{
// Get record from index
let mut out = None;
let rtk = RecordTableKey { key };
let rtk = RecordTableKey {
record_key: record_key.clone(),
};
if let Some(record) = self.record_index.get_mut(&rtk) {
// Callback
out = Some(f(record));
@ -566,13 +580,15 @@ where
}
#[instrument(level = "trace", target = "stor", skip_all)]
pub(super) fn peek_record<R, F>(&self, key: RecordKey, f: F) -> Option<R>
pub(super) fn peek_record<R, F>(&self, record_key: &RecordKey, f: F) -> Option<R>
where
F: FnOnce(&Record<D>) -> R,
{
// Get record from index
let mut out = None;
let rtk = RecordTableKey { key };
let rtk = RecordTableKey {
record_key: record_key.clone(),
};
if let Some(record) = self.record_index.peek(&rtk) {
// Callback
out = Some(f(record));
@ -581,13 +597,15 @@ where
}
#[instrument(level = "trace", target = "stor", skip_all)]
pub(super) fn with_record_mut<R, F>(&mut self, key: RecordKey, f: F) -> Option<R>
pub(super) fn with_record_mut<R, F>(&mut self, record_key: &RecordKey, f: F) -> Option<R>
where
F: FnOnce(&mut Record<D>) -> R,
{
// Get record from index
let mut out = None;
let rtk = RecordTableKey { key };
let rtk = RecordTableKey {
record_key: record_key.clone(),
};
if let Some(record) = self.record_index.get_mut(&rtk) {
// Callback
out = Some(f(record));
@ -607,12 +625,13 @@ where
#[instrument(level = "trace", target = "stor", skip_all, err)]
pub async fn get_subkey(
&mut self,
key: RecordKey,
record_key: RecordKey,
subkey: ValueSubkey,
want_descriptor: bool,
) -> VeilidAPIResult<Option<GetResult>> {
// Get record from index
let Some((subkey_count, has_subkey, opt_descriptor)) = self.with_record(key, |record| {
let Some((subkey_count, has_subkey, opt_descriptor)) =
self.with_record(&record_key, |record| {
(
record.subkey_count(),
record.stored_subkeys().contains(subkey),
@ -622,7 +641,8 @@ where
None
},
)
}) else {
})
else {
// Record not available
return Ok(None);
};
@ -642,7 +662,7 @@ where
}
// If subkey exists in subkey cache, use that
let stk = SubkeyTableKey { key, subkey };
let stk = SubkeyTableKey { record_key, subkey };
if let Some(record_data) = self.subkey_cache.get(&stk) {
let out = record_data.signed_value_data().clone();
@ -675,12 +695,13 @@ where
#[instrument(level = "trace", target = "stor", skip_all, err)]
pub async fn peek_subkey(
&self,
key: RecordKey,
record_key: RecordKey,
subkey: ValueSubkey,
want_descriptor: bool,
) -> VeilidAPIResult<Option<GetResult>> {
// record from index
let Some((subkey_count, has_subkey, opt_descriptor)) = self.peek_record(key, |record| {
let Some((subkey_count, has_subkey, opt_descriptor)) =
self.peek_record(&record_key, |record| {
(
record.subkey_count(),
record.stored_subkeys().contains(subkey),
@ -690,7 +711,8 @@ where
None
},
)
}) else {
})
else {
// Record not available
return Ok(None);
};
@ -710,7 +732,7 @@ where
}
// If subkey exists in subkey cache, use that
let stk = SubkeyTableKey { key, subkey };
let stk = SubkeyTableKey { record_key, subkey };
if let Some(record_data) = self.subkey_cache.peek(&stk) {
let out = record_data.signed_value_data().clone();
@ -740,7 +762,7 @@ where
#[instrument(level = "trace", target = "stor", skip_all)]
async fn update_watched_value(
&mut self,
key: RecordKey,
record_key: RecordKey,
subkey: ValueSubkey,
watch_update_mode: InboundWatchUpdateMode,
) {
@ -753,7 +775,7 @@ where
return;
}
let rtk = RecordTableKey { key };
let rtk = RecordTableKey { record_key };
let Some(wr) = self.watched_records.get_mut(&rtk) else {
return;
};
@ -779,7 +801,7 @@ where
#[instrument(level = "trace", target = "stor", skip_all, err)]
pub async fn set_subkey(
&mut self,
key: RecordKey,
record_key: RecordKey,
subkey: ValueSubkey,
signed_value_data: Arc<SignedValueData>,
watch_update_mode: InboundWatchUpdateMode,
@ -794,10 +816,12 @@ where
}
// Get record subkey count and total size of all record subkey data exclusive of structures
let Some((subkey_count, prior_record_data_size)) = self.with_record(key, |record| {
let Some((subkey_count, prior_record_data_size)) = self
.with_record(&record_key, |record| {
(record.subkey_count(), record.record_data_size())
}) else {
apibail_invalid_argument!("no record at this key", "key", key);
})
else {
apibail_invalid_argument!("no record at this key", "key", record_key);
};
// Check if the subkey is in range
@ -809,7 +833,10 @@ where
let mut prior_subkey_size = 0usize;
// If subkey exists in subkey cache, use that
let stk = SubkeyTableKey { key, subkey };
let stk = SubkeyTableKey {
record_key: record_key.clone(),
subkey,
};
let stk_bytes = stk.bytes();
if let Some(record_data) = self.subkey_cache.peek(&stk) {
@ -855,7 +882,7 @@ where
// Write to inspect cache
self.inspect_cache.replace_subkey_seq(
&stk.key,
&stk.record_key,
subkey,
subkey_record_data.signed_value_data().value_data().seq(),
);
@ -864,7 +891,7 @@ where
self.add_to_subkey_cache(stk, subkey_record_data);
// Update record
self.with_record_mut(key, |record| {
self.with_record_mut(&record_key, |record| {
record.store_subkey(subkey);
record.set_record_data_size(new_record_data_size);
})
@ -874,7 +901,7 @@ where
self.total_storage_space.commit().unwrap();
// Send updates to
self.update_watched_value(key, subkey, watch_update_mode)
self.update_watched_value(record_key, subkey, watch_update_mode)
.await;
Ok(())
@ -883,12 +910,12 @@ where
#[instrument(level = "trace", target = "stor", skip_all, err)]
pub async fn inspect_record(
&mut self,
key: RecordKey,
record_key: RecordKey,
subkeys: &ValueSubkeyRangeSet,
want_descriptor: bool,
) -> VeilidAPIResult<Option<InspectResult>> {
// Get record from index
let Some((schema_subkeys, opt_descriptor)) = self.with_record(key, |record| {
let Some((schema_subkeys, opt_descriptor)) = self.with_record(&record_key, |record| {
// Get number of subkeys from schema and ensure we are getting the
// right number of sequence numbers betwen that and what we asked for
let schema_subkeys = record
@ -917,7 +944,7 @@ where
}
// See if we have this inspection cached
if let Some(icv) = self.inspect_cache.get(&key, &schema_subkeys) {
if let Some(icv) = self.inspect_cache.get(&record_key, &schema_subkeys) {
return Ok(Some(InspectResult::new(
self,
subkeys.clone(),
@ -932,7 +959,10 @@ where
#[allow(clippy::unnecessary_cast)]
let mut seqs = Vec::with_capacity(schema_subkeys.len() as usize);
for subkey in schema_subkeys.iter() {
let stk = SubkeyTableKey { key, subkey };
let stk = SubkeyTableKey {
record_key: record_key.clone(),
subkey,
};
let opt_seq = if let Some(record_data) = self.subkey_cache.peek(&stk) {
Some(record_data.signed_value_data().value_data().seq())
} else {
@ -949,7 +979,7 @@ where
// Save seqs cache
self.inspect_cache.put(
key,
record_key,
schema_subkeys.clone(),
InspectCacheL2Value { seqs: seqs.clone() },
);
@ -967,7 +997,7 @@ where
#[instrument(level = "trace", target = "stor", skip_all, err)]
pub async fn _change_existing_watch(
&mut self,
key: RecordKey,
record_key: RecordKey,
params: InboundWatchParameters,
watch_id: u64,
) -> VeilidAPIResult<InboundWatchResult> {
@ -978,7 +1008,7 @@ where
apibail_internal!("zero expiration should have been resolved to max by now");
}
// Get the watch list for this record
let rtk = RecordTableKey { key };
let rtk = RecordTableKey { record_key };
let Some(watch_list) = self.watched_records.get_mut(&rtk) else {
// No watches, nothing to change
return Ok(InboundWatchResult::Rejected);
@ -988,7 +1018,7 @@ where
for w in &mut watch_list.watches {
// If the watch id doesn't match, then we're not updating
// Also do not allow the watcher key to change
if w.id == watch_id && w.params.watcher == params.watcher {
if w.id == watch_id && w.params.watcher_member_id == params.watcher_member_id {
// Updating an existing watch
w.params = params;
return Ok(InboundWatchResult::Changed {
@ -1004,12 +1034,14 @@ where
#[instrument(level = "trace", target = "stor", skip_all, err)]
pub async fn _create_new_watch(
&mut self,
key: RecordKey,
record_key: RecordKey,
params: InboundWatchParameters,
member_check: Box<dyn Fn(BarePublicKey) -> bool + Send>,
member_check: Box<dyn Fn(&MemberId) -> bool + Send>,
) -> VeilidAPIResult<InboundWatchResult> {
// Generate a record-unique watch id > 0
let rtk = RecordTableKey { key };
let rtk = RecordTableKey {
record_key: record_key.clone(),
};
let mut id = 0;
while id == 0 {
id = get_random_u64();
@ -1036,19 +1068,18 @@ where
let mut watch_count = 0;
let mut target_watch_count = 0;
let is_member = member_check(params.watcher);
let is_member = member_check(&params.watcher_member_id);
let rtk = RecordTableKey { key };
if let Some(watched_record) = self.watched_records.get_mut(&rtk) {
// Total up the number of watches for this key
for w in &mut watched_record.watches {
// See if this watch should be counted toward any limits
let count_watch = if is_member {
// If the watcher is a member of the schema, then consider the total per-watcher key
w.params.watcher == params.watcher
w.params.watcher_member_id == params.watcher_member_id
} else {
// If the watcher is not a member of the schema, the check if this watch is an anonymous watch and contributes to per-record key total
!member_check(w.params.watcher)
!member_check(&w.params.watcher_member_id)
};
// For any watch, if the target matches our also tally that separately
@ -1095,14 +1126,16 @@ where
#[instrument(level = "trace", target = "stor", skip_all, err)]
pub async fn watch_record(
&mut self,
key: RecordKey,
record_key: RecordKey,
mut params: InboundWatchParameters,
opt_watch_id: Option<u64>,
) -> VeilidAPIResult<InboundWatchResult> {
// If count is zero then we're cancelling a watch completely
if params.count == 0 {
if let Some(watch_id) = opt_watch_id {
let cancelled = self.cancel_watch(key, watch_id, params.watcher).await?;
let cancelled = self
.cancel_watch(record_key.clone(), watch_id, params.watcher_member_id)
.await?;
if cancelled {
return Ok(InboundWatchResult::Cancelled);
}
@ -1122,7 +1155,9 @@ where
} else if params.expiration.as_u64() < min_ts {
// Don't add watches with too low of an expiration time
if let Some(watch_id) = opt_watch_id {
let cancelled = self.cancel_watch(key, watch_id, params.watcher).await?;
let cancelled = self
.cancel_watch(record_key, watch_id, params.watcher_member_id)
.await?;
if cancelled {
return Ok(InboundWatchResult::Cancelled);
}
@ -1131,20 +1166,26 @@ where
}
// Make a closure to check for member vs anonymous
let Some(member_check) = self.with_record(key, |record| {
let Some((schema, owner)) = self.with_record(&record_key, |record| {
let schema = record.schema();
let owner = *record.owner();
Box::new(move |watcher| owner == watcher || schema.is_member(&watcher))
let owner = record.owner();
(schema, owner)
}) else {
// Record not found
return Ok(InboundWatchResult::Rejected);
};
let owner_member_id = self.storage_manager().generate_member_id(&owner)?;
let member_check = Box::new(move |watcher: &MemberId| {
owner_member_id == *watcher || schema.is_member(watcher.ref_value())
});
// Create or update depending on if a watch id is specified or not
if let Some(watch_id) = opt_watch_id {
self._change_existing_watch(key, params, watch_id).await
self._change_existing_watch(record_key, params, watch_id)
.await
} else {
self._create_new_watch(key, params, member_check).await
self._create_new_watch(record_key, params, member_check)
.await
}
}
@ -1153,22 +1194,22 @@ where
#[instrument(level = "trace", target = "stor", skip_all, err)]
async fn cancel_watch(
&mut self,
key: RecordKey,
record_key: RecordKey,
watch_id: u64,
watcher: BarePublicKey,
watcher_member_id: MemberId,
) -> VeilidAPIResult<bool> {
if watch_id == 0 {
apibail_internal!("should not have let a zero watch id get here");
}
// See if we are cancelling an existing watch
let rtk = RecordTableKey { key };
let rtk = RecordTableKey { record_key };
let mut is_empty = false;
let mut ret = false;
if let Some(watch_list) = self.watched_records.get_mut(&rtk) {
let mut dead_watcher = None;
for (wn, w) in watch_list.watches.iter_mut().enumerate() {
// Must match the watch id and the watcher key to cancel
if w.id == watch_id && w.params.watcher == watcher {
if w.id == watch_id && w.params.watcher_member_id == watcher_member_id {
// Canceling an existing watch
dead_watcher = Some(wn);
ret = true;
@ -1193,15 +1234,15 @@ where
#[instrument(level = "trace", target = "stor", skip_all)]
pub fn move_watches(
&mut self,
key: RecordKey,
record_key: RecordKey,
in_watch: Option<(InboundWatchList, bool)>,
) -> Option<(InboundWatchList, bool)> {
let rtk = RecordTableKey { key };
let rtk = RecordTableKey { record_key };
let out = self.watched_records.remove(&rtk);
if let Some(in_watch) = in_watch {
self.watched_records.insert(rtk, in_watch.0);
self.watched_records.insert(rtk.clone(), in_watch.0);
if in_watch.1 {
self.changed_watched_values.insert(rtk);
self.changed_watched_values.insert(rtk.clone());
}
}
let is_watched = self.changed_watched_values.remove(&rtk);
@ -1263,8 +1304,8 @@ where
}
evcis.push(EarlyValueChangedInfo {
target: w.params.target,
key: rtk.key,
target: w.params.target.clone(),
key: rtk.record_key.clone(),
subkeys,
count,
watch_id: w.id,
@ -1276,6 +1317,7 @@ where
watch.watches.remove(dw);
if watch.watches.is_empty() {
empty_watched_records.push(rtk);
break;
}
}
}
@ -1290,7 +1332,7 @@ where
veilid_log!(self error "first subkey should exist for value change notification");
continue;
};
let get_result = match self.get_subkey(evci.key, first_subkey, false).await {
let get_result = match self.get_subkey(evci.key.clone(), first_subkey, false).await {
Ok(Some(skr)) => skr,
Ok(None) => {
veilid_log!(self error "subkey should have data for value change notification");
@ -1345,7 +1387,7 @@ where
for (rik, rec) in &self.record_index {
out += &format!(
" {} age={} len={} subkeys={}\n",
rik.key,
rik.record_key,
display_duration(get_timestamp() - rec.last_touched().as_u64()),
rec.record_data_size(),
rec.stored_subkeys(),
@ -1359,21 +1401,21 @@ where
out += &format!("Total Storage Space: {}\n", self.total_storage_space.get());
out += &format!("Dead Records: {}\n", self.dead_records.len());
for dr in &self.dead_records {
out += &format!(" {}\n", dr.key.key);
out += &format!(" {}\n", dr.key.record_key);
}
out += &format!("Changed Records: {}\n", self.changed_records.len());
for cr in &self.changed_records {
out += &format!(" {}\n", cr.key);
out += &format!(" {}\n", cr.record_key);
}
out
}
pub fn debug_record_info(&self, key: RecordKey) -> String {
pub fn debug_record_info(&self, record_key: RecordKey) -> String {
let record_info = self
.peek_record(key, |r| format!("{:#?}", r))
.peek_record(&record_key, |r| format!("{:#?}", r))
.unwrap_or("Not found".to_owned());
let watched_record = match self.watched_records.get(&RecordTableKey { key }) {
let watched_record = match self.watched_records.get(&RecordTableKey { record_key }) {
Some(w) => {
format!("Remote Watches: {:#?}", w)
}
@ -1382,8 +1424,12 @@ where
format!("{}\n{}\n", record_info, watched_record)
}
pub async fn debug_record_subkey_info(&self, key: RecordKey, subkey: ValueSubkey) -> String {
match self.peek_subkey(key, subkey, true).await {
pub async fn debug_record_subkey_info(
&self,
record_key: RecordKey,
subkey: ValueSubkey,
) -> String {
match self.peek_subkey(record_key, subkey, true).await {
Ok(Some(v)) => {
format!("{:#?}", v)
}

View file

@ -7,29 +7,29 @@ pub(in crate::storage_manager) struct OpenedRecord {
/// The key pair used to perform writes to subkey on this opened record
/// Without this, set_value() will fail regardless of which key or subkey is being written to
/// as all writes are signed
writer: Option<BareKeyPair>,
writer: Option<KeyPair>,
/// The safety selection in current use
safety_selection: SafetySelection,
}
impl OpenedRecord {
pub fn new(writer: Option<BareKeyPair>, safety_selection: SafetySelection) -> Self {
pub fn new(writer: Option<KeyPair>, safety_selection: SafetySelection) -> Self {
Self {
writer,
safety_selection,
}
}
pub fn writer(&self) -> Option<&BareKeyPair> {
pub fn writer(&self) -> Option<&KeyPair> {
self.writer.as_ref()
}
pub fn set_writer(&mut self, writer: Option<BareKeyPair>) {
pub fn set_writer(&mut self, writer: Option<KeyPair>) {
self.writer = writer;
}
pub fn safety_selection(&self) -> SafetySelection {
self.safety_selection
self.safety_selection.clone()
}
pub fn set_safety_selection(&mut self, safety_selection: SafetySelection) {
self.safety_selection = safety_selection;

View file

@ -37,7 +37,7 @@ where
pub fn descriptor(&self) -> Arc<SignedValueDescriptor> {
self.descriptor.clone()
}
pub fn owner(&self) -> &BarePublicKey {
pub fn owner(&self) -> PublicKey {
self.descriptor.owner()
}

Some files were not shown because too many files have changed in this diff Show more