Switch to crypto to typed keys everywhere

This commit is contained in:
Christien Rioux 2025-08-22 16:53:37 -04:00
parent 88f69ce237
commit 848da0ae4e
180 changed files with 8532 additions and 8488 deletions

View file

@ -20,6 +20,8 @@
- This will only be a breaking change for anyone utilizing the previous `writer` argument. - This will only be a breaking change for anyone utilizing the previous `writer` argument.
- `writer` is now a member of `SetDHTValueOptions`, alongside the new `allow_offline` property. - `writer` is now a member of `SetDHTValueOptions`, alongside the new `allow_offline` property.
- Eliminated DHTW capability, merged into DHTV capability, now there is only one DHT enabling/disabling capability and all operations are part of it - Eliminated DHTW capability, merged into DHTV capability, now there is only one DHT enabling/disabling capability and all operations are part of it
- Crypto / CryptoSystem functions now use typed keys everywhere (#483)
- Eliminated 'best' CryptoKind concept, crypto kinds must now be explicitly stated, otherwise upgrades of veilid-core that change the 'best' CryptoKind could break functionality silently.
- veilid-core: - veilid-core:
- Add private route example - Add private route example
@ -29,6 +31,7 @@
- Improved `TypedXXX` conversion traits, including to and from `Vec<u8>` - Improved `TypedXXX` conversion traits, including to and from `Vec<u8>`
- Ensure utf8 replacement characters are never emitted in logs - Ensure utf8 replacement characters are never emitted in logs
- Export `CRYPTO_KIND_VLD0` constant - Export `CRYPTO_KIND_VLD0` constant
- Added SequenceOrdering enum to represent ordering mode for protocols rather than a bool
- veilid-python: - veilid-python:
- Correction of type hints - Correction of type hints
@ -38,6 +41,10 @@
- veilid-server: - veilid-server:
- Use `detect_address_changes: auto` by default - Use `detect_address_changes: auto` by default
- veilid-wasm:
- Reorganize crate and add `js` and `dart` features to enable different target bindings
- Revamp bindings for `js` to eliminate excessive strings and improve marshaling
**Changed in Veilid 0.4.7** **Changed in Veilid 0.4.7**
- _BREAKING API CHANGES_: - _BREAKING API CHANGES_:

963
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -281,7 +281,7 @@ unit-tests-wasm-linux:
FROM +code-linux FROM +code-linux
# Just run build now because actual unit tests require network access # Just run build now because actual unit tests require network access
# which should be moved to a separate integration test # which should be moved to a separate integration test
RUN veilid-wasm/wasm_build.sh release RUN veilid-wasm/wasm_build_dart.sh release
unit-tests-linux: unit-tests-linux:
WAIT WAIT

View file

@ -2,10 +2,10 @@
SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
pushd $SCRIPTDIR/.. >/dev/null pushd $SCRIPTDIR/.. >/dev/null
cargo-zigbuild clippy --target x86_64-unknown-linux-gnu cargo-zigbuild clippy --target x86_64-unknown-linux-gnu $@
cargo-zigbuild clippy --target x86_64-unknown-linux-gnu --manifest-path=veilid-server/Cargo.toml --no-default-features --features=default-async-std cargo-zigbuild clippy --target x86_64-unknown-linux-gnu --manifest-path=veilid-server/Cargo.toml --no-default-features --features=default-async-std $@
cargo-zigbuild clippy --target x86_64-pc-windows-gnu cargo-zigbuild clippy --target x86_64-pc-windows-gnu $@
cargo-zigbuild clippy --target aarch64-apple-darwin cargo-zigbuild clippy --target aarch64-apple-darwin $@
cargo clippy --manifest-path=veilid-wasm/Cargo.toml --target wasm32-unknown-unknown cargo clippy --manifest-path=veilid-wasm/Cargo.toml --target wasm32-unknown-unknown $@
popd >/dev/null popd >/dev/null

View file

@ -233,6 +233,7 @@ serde_bytes = { version = "0.11", default-features = false, features = [
] } ] }
tsify = { version = "0.5.5", features = ["js"] } tsify = { version = "0.5.5", features = ["js"] }
serde-wasm-bindgen = "0.6.5" serde-wasm-bindgen = "0.6.5"
wasm-bindgen-derive = "0.3.0"
# Network # Network
ws_stream_wasm = "0.7.4" ws_stream_wasm = "0.7.4"

View file

@ -184,13 +184,13 @@ async fn create_route(
veilid_api_scope(update_callback, config, |veilid_api| async move { veilid_api_scope(update_callback, config, |veilid_api| async move {
// Create a new private route endpoint // Create a new private route endpoint
let (route_id, route_blob) = let RouteBlob { route_id, blob } =
try_again_loop(|| async { veilid_api.new_private_route().await }).await?; try_again_loop(|| async { veilid_api.new_private_route().await }).await?;
// Print the blob // Print the blob
println!( println!(
"Route id created: {route_id}\nConnect with this private route blob:\ncargo run --example private-route-example -- --connect {}", "Route id created: {route_id}\nConnect with this private route blob:\ncargo run --example private-route-example -- --connect {}",
data_encoding::BASE64.encode(&route_blob) data_encoding::BASE64.encode(&blob)
); );
// Wait for enter key to exit the application // Wait for enter key to exit the application

View file

@ -1,203 +0,0 @@
use super::*;
pub(crate) const VEILID_DOMAIN_API: &[u8] = b"VEILID_API";
pub trait CryptoSystem {
// Accessors
fn kind(&self) -> CryptoKind;
fn crypto(&self) -> VeilidComponentGuard<'_, Crypto>;
// Cached Operations
fn cached_dh(
&self,
key: &BarePublicKey,
secret: &BareSecretKey,
) -> VeilidAPIResult<BareSharedSecret>;
// Generation
fn random_bytes(&self, len: u32) -> Vec<u8>;
fn hash_password(&self, password: &[u8], salt: &[u8]) -> VeilidAPIResult<String>;
fn verify_password(&self, password: &[u8], password_hash: &str) -> VeilidAPIResult<bool>;
fn derive_shared_secret(
&self,
password: &[u8],
salt: &[u8],
) -> VeilidAPIResult<BareSharedSecret>;
fn random_nonce(&self) -> BareNonce;
fn random_shared_secret(&self) -> BareSharedSecret;
fn compute_dh(
&self,
key: &BarePublicKey,
secret: &BareSecretKey,
) -> VeilidAPIResult<BareSharedSecret>;
fn generate_shared_secret(
&self,
key: &BarePublicKey,
secret: &BareSecretKey,
domain: &[u8],
) -> VeilidAPIResult<BareSharedSecret> {
let dh = self.compute_dh(key, secret)?;
Ok(BareSharedSecret::from(
self.generate_hash(&[&dh, domain, VEILID_DOMAIN_API].concat()),
))
}
fn generate_keypair(&self) -> BareKeyPair;
fn generate_hash(&self, data: &[u8]) -> BareHashDigest;
fn generate_hash_reader(
&self,
reader: &mut dyn std::io::Read,
) -> VeilidAPIResult<BarePublicKey>;
// Validation
fn shared_secret_length(&self) -> usize;
fn nonce_length(&self) -> usize;
fn hash_digest_length(&self) -> usize;
fn public_key_length(&self) -> usize;
fn secret_key_length(&self) -> usize;
fn signature_length(&self) -> usize;
fn default_salt_length(&self) -> usize;
fn aead_overhead(&self) -> usize;
fn check_shared_secret(&self, secret: &BareSharedSecret) -> VeilidAPIResult<()> {
if secret.len() != self.shared_secret_length() {
apibail_generic!(format!(
"invalid shared secret length: {} != {}",
secret.len(),
self.shared_secret_length()
));
}
Ok(())
}
fn check_nonce(&self, nonce: &BareNonce) -> VeilidAPIResult<()> {
if nonce.len() != self.nonce_length() {
apibail_generic!(format!(
"invalid nonce length: {} != {}",
nonce.len(),
self.nonce_length()
));
}
Ok(())
}
fn check_hash_digest(&self, hash: &BareHashDigest) -> VeilidAPIResult<()> {
if hash.len() != self.hash_digest_length() {
apibail_generic!(format!(
"invalid hash digest length: {} != {}",
hash.len(),
self.hash_digest_length()
));
}
Ok(())
}
fn check_public_key(&self, key: &BarePublicKey) -> VeilidAPIResult<()> {
if key.len() != self.public_key_length() {
apibail_generic!(format!(
"invalid public key length: {} != {}",
key.len(),
self.public_key_length()
));
}
Ok(())
}
fn check_secret_key(&self, key: &BareSecretKey) -> VeilidAPIResult<()> {
if key.len() != self.secret_key_length() {
apibail_generic!(format!(
"invalid secret key length: {} != {}",
key.len(),
self.secret_key_length()
));
}
Ok(())
}
fn check_signature(&self, signature: &BareSignature) -> VeilidAPIResult<()> {
if signature.len() != self.signature_length() {
apibail_generic!(format!(
"invalid signature length: {} != {}",
signature.len(),
self.signature_length()
));
}
Ok(())
}
fn validate_keypair(&self, key: &BarePublicKey, secret: &BareSecretKey) -> bool;
fn validate_hash(&self, data: &[u8], hash: &BareHashDigest) -> bool;
fn validate_hash_reader(
&self,
reader: &mut dyn std::io::Read,
hash: &BareHashDigest,
) -> VeilidAPIResult<bool>;
// Distance Metric
fn distance(&self, hash1: &BareHashDigest, hash2: &BareHashDigest) -> BareHashDistance;
// Authentication
fn sign(
&self,
key: &BarePublicKey,
secret: &BareSecretKey,
data: &[u8],
) -> VeilidAPIResult<BareSignature>;
fn verify(
&self,
key: &BarePublicKey,
data: &[u8],
signature: &BareSignature,
) -> VeilidAPIResult<bool>;
// AEAD Encrypt/Decrypt
fn decrypt_in_place_aead(
&self,
body: &mut Vec<u8>,
nonce: &BareNonce,
shared_secret: &BareSharedSecret,
associated_data: Option<&[u8]>,
) -> VeilidAPIResult<()>;
fn decrypt_aead(
&self,
body: &[u8],
nonce: &BareNonce,
shared_secret: &BareSharedSecret,
associated_data: Option<&[u8]>,
) -> VeilidAPIResult<Vec<u8>>;
fn encrypt_in_place_aead(
&self,
body: &mut Vec<u8>,
nonce: &BareNonce,
shared_secret: &BareSharedSecret,
associated_data: Option<&[u8]>,
) -> VeilidAPIResult<()>;
fn encrypt_aead(
&self,
body: &[u8],
nonce: &BareNonce,
shared_secret: &BareSharedSecret,
associated_data: Option<&[u8]>,
) -> VeilidAPIResult<Vec<u8>>;
// NoAuth Encrypt/Decrypt
fn crypt_in_place_no_auth(
&self,
body: &mut [u8],
nonce: &BareNonce,
shared_secret: &BareSharedSecret,
) -> VeilidAPIResult<()>;
fn crypt_b2b_no_auth(
&self,
in_buf: &[u8],
out_buf: &mut [u8],
nonce: &BareNonce,
shared_secret: &BareSharedSecret,
) -> VeilidAPIResult<()>;
fn crypt_no_auth_aligned_8(
&self,
body: &[u8],
nonce: &BareNonce,
shared_secret: &BareSharedSecret,
) -> VeilidAPIResult<Vec<u8>>;
fn crypt_no_auth_unaligned(
&self,
body: &[u8],
nonce: &BareNonce,
shared_secret: &BareSharedSecret,
) -> VeilidAPIResult<Vec<u8>>;
}

View file

@ -0,0 +1,213 @@
use super::*;
mod blake3digest512;
#[cfg(feature = "enable-crypto-none")]
pub(crate) mod none;
#[cfg(feature = "enable-crypto-vld0")]
pub(crate) mod vld0;
// #[cfg(feature = "enable-crypto-vld1")]
// pub(crate) mod vld1;
pub(crate) const VEILID_DOMAIN_API: &[u8] = b"VEILID_API";
#[cfg(feature = "enable-crypto-none")]
pub use none::sizes::*;
#[cfg(feature = "enable-crypto-none")]
pub use none::*;
#[cfg(feature = "enable-crypto-vld0")]
pub use vld0::sizes::*;
#[cfg(feature = "enable-crypto-vld0")]
pub use vld0::*;
// #[cfg(feature = "enable-crypto-vld1")]
// pub use vld1::*;
pub use blake3digest512::*;
pub trait CryptoSystem {
// Accessors
fn kind(&self) -> CryptoKind;
fn crypto(&self) -> VeilidComponentGuard<'_, Crypto>;
// Cached Operations
fn cached_dh(&self, key: &PublicKey, secret: &SecretKey) -> VeilidAPIResult<SharedSecret>;
// Generation
fn random_bytes(&self, len: u32) -> Vec<u8>;
fn hash_password(&self, password: &[u8], salt: &[u8]) -> VeilidAPIResult<String>;
fn verify_password(&self, password: &[u8], password_hash: &str) -> VeilidAPIResult<bool>;
fn derive_shared_secret(&self, password: &[u8], salt: &[u8]) -> VeilidAPIResult<SharedSecret>;
fn random_nonce(&self) -> Nonce;
fn random_shared_secret(&self) -> SharedSecret;
fn compute_dh(&self, key: &PublicKey, secret: &SecretKey) -> VeilidAPIResult<SharedSecret>;
fn generate_shared_secret(
&self,
key: &PublicKey,
secret: &SecretKey,
domain: &[u8],
) -> VeilidAPIResult<SharedSecret> {
let dh = self.compute_dh(key, secret)?;
let hash = self.generate_hash(&[&dh.into_value(), domain, VEILID_DOMAIN_API].concat());
Ok(SharedSecret::new(
hash.kind(),
BareSharedSecret::new(&hash.into_value()),
))
}
fn generate_keypair(&self) -> KeyPair;
fn generate_hash(&self, data: &[u8]) -> HashDigest;
fn generate_hash_reader(&self, reader: &mut dyn std::io::Read) -> VeilidAPIResult<PublicKey>;
// Validation
fn shared_secret_length(&self) -> usize;
fn nonce_length(&self) -> usize;
fn hash_digest_length(&self) -> usize;
fn public_key_length(&self) -> usize;
fn secret_key_length(&self) -> usize;
fn signature_length(&self) -> usize;
fn default_salt_length(&self) -> usize;
fn aead_overhead(&self) -> usize;
fn check_shared_secret(&self, secret: &SharedSecret) -> VeilidAPIResult<()> {
if secret.kind() != self.kind() {
apibail_generic!("incorrect shared secret kind");
}
if secret.value().len() != self.shared_secret_length() {
apibail_generic!(format!(
"invalid shared secret length: {} != {}",
secret.value().len(),
self.shared_secret_length()
));
}
Ok(())
}
fn check_nonce(&self, nonce: &Nonce) -> VeilidAPIResult<()> {
if nonce.len() != self.nonce_length() {
apibail_generic!(format!(
"invalid nonce length: {} != {}",
nonce.len(),
self.nonce_length()
));
}
Ok(())
}
fn check_hash_digest(&self, hash: &HashDigest) -> VeilidAPIResult<()> {
if hash.kind() != self.kind() {
apibail_generic!("incorrect hash digest kind");
}
if hash.value().len() != self.hash_digest_length() {
apibail_generic!(format!(
"invalid hash digest length: {} != {}",
hash.value().len(),
self.hash_digest_length()
));
}
Ok(())
}
fn check_public_key(&self, key: &PublicKey) -> VeilidAPIResult<()> {
if key.kind() != self.kind() {
apibail_generic!("incorrect public key kind");
}
if key.value().len() != self.public_key_length() {
apibail_generic!(format!(
"invalid public key length: {} != {}",
key.value().len(),
self.public_key_length()
));
}
Ok(())
}
fn check_secret_key(&self, key: &SecretKey) -> VeilidAPIResult<()> {
if key.kind() != self.kind() {
apibail_generic!("incorrect secret key kind");
}
if key.value().len() != self.secret_key_length() {
apibail_generic!(format!(
"invalid secret key length: {} != {}",
key.value().len(),
self.secret_key_length()
));
}
Ok(())
}
fn check_signature(&self, signature: &Signature) -> VeilidAPIResult<()> {
if signature.kind() != self.kind() {
apibail_generic!("incorrect signature kind");
}
if signature.value().len() != self.signature_length() {
apibail_generic!(format!(
"invalid signature length: {} != {}",
signature.value().len(),
self.signature_length()
));
}
Ok(())
}
fn validate_keypair(&self, key: &PublicKey, secret: &SecretKey) -> VeilidAPIResult<bool>;
fn validate_hash(&self, data: &[u8], hash: &HashDigest) -> VeilidAPIResult<bool>;
fn validate_hash_reader(
&self,
reader: &mut dyn std::io::Read,
hash: &HashDigest,
) -> VeilidAPIResult<bool>;
// Authentication
fn sign(&self, key: &PublicKey, secret: &SecretKey, data: &[u8]) -> VeilidAPIResult<Signature>;
fn verify(&self, key: &PublicKey, data: &[u8], signature: &Signature) -> VeilidAPIResult<bool>;
// AEAD Encrypt/Decrypt
fn decrypt_in_place_aead(
&self,
body: &mut Vec<u8>,
nonce: &Nonce,
shared_secret: &SharedSecret,
associated_data: Option<&[u8]>,
) -> VeilidAPIResult<()>;
fn decrypt_aead(
&self,
body: &[u8],
nonce: &Nonce,
shared_secret: &SharedSecret,
associated_data: Option<&[u8]>,
) -> VeilidAPIResult<Vec<u8>>;
fn encrypt_in_place_aead(
&self,
body: &mut Vec<u8>,
nonce: &Nonce,
shared_secret: &SharedSecret,
associated_data: Option<&[u8]>,
) -> VeilidAPIResult<()>;
fn encrypt_aead(
&self,
body: &[u8],
nonce: &Nonce,
shared_secret: &SharedSecret,
associated_data: Option<&[u8]>,
) -> VeilidAPIResult<Vec<u8>>;
// NoAuth Encrypt/Decrypt
fn crypt_in_place_no_auth(
&self,
body: &mut [u8],
nonce: &Nonce,
shared_secret: &SharedSecret,
) -> VeilidAPIResult<()>;
fn crypt_b2b_no_auth(
&self,
in_buf: &[u8],
out_buf: &mut [u8],
nonce: &Nonce,
shared_secret: &SharedSecret,
) -> VeilidAPIResult<()>;
fn crypt_no_auth_aligned_8(
&self,
body: &[u8],
nonce: &Nonce,
shared_secret: &SharedSecret,
) -> VeilidAPIResult<Vec<u8>>;
fn crypt_no_auth_unaligned(
&self,
body: &[u8],
nonce: &Nonce,
shared_secret: &SharedSecret,
) -> VeilidAPIResult<Vec<u8>>;
}

View file

@ -6,11 +6,11 @@ use data_encoding::BASE64URL_NOPAD;
use digest::rand_core::RngCore; use digest::rand_core::RngCore;
use digest::Digest; use digest::Digest;
const NONE_AEAD_OVERHEAD: usize = NONE_PUBLIC_KEY_LENGTH; const NONE_AEAD_OVERHEAD: usize = NONE_PUBLIC_KEY_LENGTH;
pub const CRYPTO_KIND_NONE: CryptoKind = CryptoKind(*b"NONE"); pub const CRYPTO_KIND_NONE: CryptoKind = CryptoKind::new(*b"NONE");
pub const CRYPTO_KIND_NONE_FOURCC: u32 = u32::from_be_bytes(*b"NONE"); pub const CRYPTO_KIND_NONE_FOURCC: u32 = u32::from_be_bytes(*b"NONE");
pub use sizes::*; pub use sizes::*;
pub fn none_generate_keypair() -> BareKeyPair { pub fn none_generate_keypair() -> KeyPair {
let mut csprng = VeilidRng {}; let mut csprng = VeilidRng {};
let mut pub_bytes = [0u8; NONE_PUBLIC_KEY_LENGTH]; let mut pub_bytes = [0u8; NONE_PUBLIC_KEY_LENGTH];
let mut sec_bytes = [0u8; NONE_SECRET_KEY_LENGTH]; let mut sec_bytes = [0u8; NONE_SECRET_KEY_LENGTH];
@ -20,7 +20,7 @@ pub fn none_generate_keypair() -> BareKeyPair {
} }
let dht_key = BarePublicKey::new(&pub_bytes); let dht_key = BarePublicKey::new(&pub_bytes);
let dht_key_secret = BareSecretKey::new(&sec_bytes); let dht_key_secret = BareSecretKey::new(&sec_bytes);
BareKeyPair::new(dht_key, dht_key_secret) KeyPair::new(CRYPTO_KIND_NONE, BareKeyPair::new(dht_key, dht_key_secret))
} }
fn do_xor_32(a: &[u8], b: &[u8]) -> VeilidAPIResult<[u8; 32]> { fn do_xor_32(a: &[u8], b: &[u8]) -> VeilidAPIResult<[u8; 32]> {
@ -93,11 +93,7 @@ impl CryptoSystem for CryptoSystemNONE {
} }
// Cached Operations // Cached Operations
fn cached_dh( fn cached_dh(&self, key: &PublicKey, secret: &SecretKey) -> VeilidAPIResult<SharedSecret> {
&self,
key: &BarePublicKey,
secret: &BareSecretKey,
) -> VeilidAPIResult<BareSharedSecret> {
self.crypto() self.crypto()
.cached_dh_internal::<CryptoSystemNONE>(self, key, secret) .cached_dh_internal::<CryptoSystemNONE>(self, key, secret)
} }
@ -128,50 +124,51 @@ impl CryptoSystem for CryptoSystemNONE {
Ok(self.hash_password(password, &salt)? == password_hash) Ok(self.hash_password(password, &salt)? == password_hash)
} }
fn derive_shared_secret( fn derive_shared_secret(&self, password: &[u8], salt: &[u8]) -> VeilidAPIResult<SharedSecret> {
&self,
password: &[u8],
salt: &[u8],
) -> VeilidAPIResult<BareSharedSecret> {
if salt.len() < Salt::MIN_LENGTH || salt.len() > Salt::MAX_LENGTH { if salt.len() < Salt::MIN_LENGTH || salt.len() > Salt::MAX_LENGTH {
apibail_generic!("invalid salt length"); apibail_generic!("invalid salt length");
} }
Ok(BareSharedSecret::new( Ok(SharedSecret::new(
blake3::hash(self.hash_password(password, salt)?.as_bytes()).as_bytes(), CRYPTO_KIND_NONE,
BareSharedSecret::new(
blake3::hash(self.hash_password(password, salt)?.as_bytes()).as_bytes(),
),
)) ))
} }
fn random_nonce(&self) -> BareNonce { fn random_nonce(&self) -> Nonce {
let mut nonce = [0u8; NONE_NONCE_LENGTH]; let mut nonce = [0u8; NONE_NONCE_LENGTH];
random_bytes(&mut nonce); random_bytes(&mut nonce);
BareNonce::new(&nonce) Nonce::new(&nonce)
} }
fn random_shared_secret(&self) -> BareSharedSecret { fn random_shared_secret(&self) -> SharedSecret {
let mut s = [0u8; NONE_SHARED_SECRET_LENGTH]; let mut s = [0u8; NONE_SHARED_SECRET_LENGTH];
random_bytes(&mut s); random_bytes(&mut s);
BareSharedSecret::new(&s) SharedSecret::new(CRYPTO_KIND_NONE, BareSharedSecret::new(&s))
} }
fn compute_dh( fn compute_dh(&self, key: &PublicKey, secret: &SecretKey) -> VeilidAPIResult<SharedSecret> {
&self, let s = do_xor_32(key.ref_value(), secret.ref_value())?;
key: &BarePublicKey, Ok(SharedSecret::new(
secret: &BareSecretKey, CRYPTO_KIND_NONE,
) -> VeilidAPIResult<BareSharedSecret> { BareSharedSecret::new(&s),
let s = do_xor_32(key, secret)?; ))
Ok(BareSharedSecret::new(&s))
} }
fn generate_keypair(&self) -> BareKeyPair { fn generate_keypair(&self) -> KeyPair {
none_generate_keypair() none_generate_keypair()
} }
fn generate_hash(&self, data: &[u8]) -> BareHashDigest { fn generate_hash(&self, data: &[u8]) -> HashDigest {
BareHashDigest::new(blake3::hash(data).as_bytes()) HashDigest::new(
CRYPTO_KIND_NONE,
BareHashDigest::new(blake3::hash(data).as_bytes()),
)
} }
fn generate_hash_reader( fn generate_hash_reader(&self, reader: &mut dyn std::io::Read) -> VeilidAPIResult<PublicKey> {
&self,
reader: &mut dyn std::io::Read,
) -> VeilidAPIResult<BarePublicKey> {
let mut hasher = blake3::Hasher::new(); let mut hasher = blake3::Hasher::new();
std::io::copy(reader, &mut hasher).map_err(VeilidAPIError::generic)?; std::io::copy(reader, &mut hasher).map_err(VeilidAPIError::generic)?;
Ok(BarePublicKey::new(hasher.finalize().as_bytes())) Ok(PublicKey::new(
CRYPTO_KIND_NONE,
BarePublicKey::new(hasher.finalize().as_bytes()),
))
} }
// Validation // Validation
@ -200,49 +197,56 @@ impl CryptoSystem for CryptoSystemNONE {
NONE_SIGNATURE_LENGTH NONE_SIGNATURE_LENGTH
} }
fn validate_keypair(&self, dht_key: &BarePublicKey, dht_key_secret: &BareSecretKey) -> bool { fn validate_keypair(
&self,
public_key: &PublicKey,
secret_key: &SecretKey,
) -> VeilidAPIResult<bool> {
self.check_public_key(public_key)?;
self.check_secret_key(secret_key)?;
let data = vec![0u8; 512]; let data = vec![0u8; 512];
let Ok(sig) = self.sign(dht_key, dht_key_secret, &data) else { let Ok(sig) = self.sign(public_key, secret_key, &data) else {
return false; return Ok(false);
}; };
let Ok(v) = self.verify(dht_key, &data, &sig) else { let Ok(v) = self.verify(public_key, &data, &sig) else {
return false; return Ok(false);
}; };
v Ok(v)
} }
fn validate_hash(&self, data: &[u8], dht_key: &BareHashDigest) -> bool { fn validate_hash(&self, data: &[u8], hash_digest: &HashDigest) -> VeilidAPIResult<bool> {
let bytes = *blake3::hash(data).as_bytes(); self.check_hash_digest(hash_digest)?;
bytes == dht_key.bytes() let out_hash = blake3::hash(data);
let bytes = out_hash.as_bytes();
Ok(*bytes == **hash_digest.ref_value())
} }
fn validate_hash_reader( fn validate_hash_reader(
&self, &self,
reader: &mut dyn std::io::Read, reader: &mut dyn std::io::Read,
dht_key: &BareHashDigest, hash_digest: &HashDigest,
) -> VeilidAPIResult<bool> { ) -> VeilidAPIResult<bool> {
self.check_hash_digest(hash_digest)?;
let mut hasher = blake3::Hasher::new(); let mut hasher = blake3::Hasher::new();
std::io::copy(reader, &mut hasher).map_err(VeilidAPIError::generic)?; std::io::copy(reader, &mut hasher).map_err(VeilidAPIError::generic)?;
let bytes = *hasher.finalize().as_bytes(); let out_hash = hasher.finalize();
Ok(bytes == dht_key.bytes()) let bytes = out_hash.as_bytes();
} Ok(*bytes == **hash_digest.ref_value())
// Distance Metric
fn distance(&self, key1: &BareHashDigest, key2: &BareHashDigest) -> BareHashDistance {
let mut bytes = [0u8; NONE_HASH_DIGEST_LENGTH];
for (n, byte) in bytes.iter_mut().enumerate() {
*byte = key1[n] ^ key2[n];
}
BareHashDistance::new(&bytes)
} }
// Authentication // Authentication
fn sign( fn sign(
&self, &self,
dht_key: &BarePublicKey, public_key: &PublicKey,
dht_key_secret: &BareSecretKey, secret_key: &SecretKey,
data: &[u8], data: &[u8],
) -> VeilidAPIResult<BareSignature> { ) -> VeilidAPIResult<Signature> {
if !is_bytes_eq_32(&do_xor_32(dht_key, dht_key_secret)?, 0xFFu8)? { self.check_public_key(public_key)?;
self.check_secret_key(secret_key)?;
if !is_bytes_eq_32(
&do_xor_32(public_key.ref_value(), secret_key.ref_value())?,
0xFFu8,
)? {
return Err(VeilidAPIError::parse_error( return Err(VeilidAPIError::parse_error(
"Keypair is invalid", "Keypair is invalid",
"invalid keys", "invalid keys",
@ -255,31 +259,43 @@ impl CryptoSystem for CryptoSystemNONE {
let in_sig_bytes: [u8; NONE_SIGNATURE_LENGTH] = sig.into(); let in_sig_bytes: [u8; NONE_SIGNATURE_LENGTH] = sig.into();
let mut sig_bytes = [0u8; NONE_SIGNATURE_LENGTH]; let mut sig_bytes = [0u8; NONE_SIGNATURE_LENGTH];
sig_bytes[0..32].copy_from_slice(&in_sig_bytes[0..32]); sig_bytes[0..32].copy_from_slice(&in_sig_bytes[0..32]);
sig_bytes[32..64].copy_from_slice(&do_xor_32(&in_sig_bytes[32..64], dht_key_secret)?); sig_bytes[32..64]
let dht_sig = BareSignature::new(&sig_bytes); .copy_from_slice(&do_xor_32(&in_sig_bytes[32..64], secret_key.ref_value())?);
let dht_sig = Signature::new(CRYPTO_KIND_NONE, BareSignature::new(&sig_bytes));
println!("DEBUG dht_sig: {:?}", dht_sig); println!("DEBUG dht_sig: {:?}", dht_sig);
Ok(dht_sig) Ok(dht_sig)
} }
fn verify( fn verify(
&self, &self,
dht_key: &BarePublicKey, public_key: &PublicKey,
data: &[u8], data: &[u8],
signature: &BareSignature, signature: &Signature,
) -> VeilidAPIResult<bool> { ) -> VeilidAPIResult<bool> {
self.check_public_key(public_key)?;
self.check_signature(signature)?;
let mut dig = Blake3Digest512::new(); let mut dig = Blake3Digest512::new();
dig.update(data); dig.update(data);
let sig = dig.finalize(); let sig = dig.finalize();
let in_sig_bytes: [u8; NONE_SIGNATURE_LENGTH] = sig.into(); let in_sig_bytes: [u8; NONE_SIGNATURE_LENGTH] = sig.into();
let mut verify_bytes = [0u8; NONE_SIGNATURE_LENGTH]; let mut verify_bytes = [0u8; NONE_SIGNATURE_LENGTH];
verify_bytes[0..32].copy_from_slice(&do_xor_32(&in_sig_bytes[0..32], &signature[0..32])?); verify_bytes[0..32].copy_from_slice(&do_xor_32(
verify_bytes[32..64] &in_sig_bytes[0..32],
.copy_from_slice(&do_xor_32(&in_sig_bytes[32..64], &signature[32..64])?); &signature.ref_value()[0..32],
)?);
verify_bytes[32..64].copy_from_slice(&do_xor_32(
&in_sig_bytes[32..64],
&signature.ref_value()[32..64],
)?);
if !is_bytes_eq_32(&verify_bytes[0..32], 0u8)? { if !is_bytes_eq_32(&verify_bytes[0..32], 0u8)? {
return Ok(false); return Ok(false);
} }
if !is_bytes_eq_32(&do_xor_32(&verify_bytes[32..64], dht_key)?, 0xFFu8)? { if !is_bytes_eq_32(
&do_xor_32(&verify_bytes[32..64], public_key.ref_value())?,
0xFFu8,
)? {
return Ok(false); return Ok(false);
} }
@ -290,13 +306,16 @@ impl CryptoSystem for CryptoSystemNONE {
fn decrypt_in_place_aead( fn decrypt_in_place_aead(
&self, &self,
body: &mut Vec<u8>, body: &mut Vec<u8>,
nonce: &BareNonce, nonce: &Nonce,
shared_secret: &BareSharedSecret, shared_secret: &SharedSecret,
_associated_data: Option<&[u8]>, _associated_data: Option<&[u8]>,
) -> VeilidAPIResult<()> { ) -> VeilidAPIResult<()> {
self.check_nonce(nonce)?;
self.check_shared_secret(shared_secret)?;
let mut blob = nonce.to_vec(); let mut blob = nonce.to_vec();
blob.extend_from_slice(&[0u8; 8]); blob.extend_from_slice(&[0u8; 8]);
let blob = do_xor_32(&blob, shared_secret)?; let blob = do_xor_32(&blob, shared_secret.ref_value())?;
if body.len() < NONE_AEAD_OVERHEAD { if body.len() < NONE_AEAD_OVERHEAD {
return Err(VeilidAPIError::generic("invalid length")); return Err(VeilidAPIError::generic("invalid length"));
@ -311,10 +330,13 @@ impl CryptoSystem for CryptoSystemNONE {
fn decrypt_aead( fn decrypt_aead(
&self, &self,
body: &[u8], body: &[u8],
nonce: &BareNonce, nonce: &Nonce,
shared_secret: &BareSharedSecret, shared_secret: &SharedSecret,
associated_data: Option<&[u8]>, associated_data: Option<&[u8]>,
) -> VeilidAPIResult<Vec<u8>> { ) -> VeilidAPIResult<Vec<u8>> {
self.check_nonce(nonce)?;
self.check_shared_secret(shared_secret)?;
let mut out = body.to_vec(); let mut out = body.to_vec();
self.decrypt_in_place_aead(&mut out, nonce, shared_secret, associated_data) self.decrypt_in_place_aead(&mut out, nonce, shared_secret, associated_data)
.map_err(map_to_string) .map_err(map_to_string)
@ -325,13 +347,16 @@ impl CryptoSystem for CryptoSystemNONE {
fn encrypt_in_place_aead( fn encrypt_in_place_aead(
&self, &self,
body: &mut Vec<u8>, body: &mut Vec<u8>,
nonce: &BareNonce, nonce: &Nonce,
shared_secret: &BareSharedSecret, shared_secret: &SharedSecret,
_associated_data: Option<&[u8]>, _associated_data: Option<&[u8]>,
) -> VeilidAPIResult<()> { ) -> VeilidAPIResult<()> {
self.check_nonce(nonce)?;
self.check_shared_secret(shared_secret)?;
let mut blob = nonce.to_vec(); let mut blob = nonce.to_vec();
blob.extend_from_slice(&[0u8; 8]); blob.extend_from_slice(&[0u8; 8]);
let blob = do_xor_32(&blob, shared_secret)?; let blob = do_xor_32(&blob, shared_secret.ref_value())?;
do_xor_inplace(body, &blob)?; do_xor_inplace(body, &blob)?;
body.append(&mut blob.to_vec()); body.append(&mut blob.to_vec());
Ok(()) Ok(())
@ -340,10 +365,13 @@ impl CryptoSystem for CryptoSystemNONE {
fn encrypt_aead( fn encrypt_aead(
&self, &self,
body: &[u8], body: &[u8],
nonce: &BareNonce, nonce: &Nonce,
shared_secret: &BareSharedSecret, shared_secret: &SharedSecret,
associated_data: Option<&[u8]>, associated_data: Option<&[u8]>,
) -> VeilidAPIResult<Vec<u8>> { ) -> VeilidAPIResult<Vec<u8>> {
self.check_nonce(nonce)?;
self.check_shared_secret(shared_secret)?;
let mut out = body.to_vec(); let mut out = body.to_vec();
self.encrypt_in_place_aead(&mut out, nonce, shared_secret, associated_data) self.encrypt_in_place_aead(&mut out, nonce, shared_secret, associated_data)
.map_err(map_to_string) .map_err(map_to_string)
@ -355,12 +383,15 @@ impl CryptoSystem for CryptoSystemNONE {
fn crypt_in_place_no_auth( fn crypt_in_place_no_auth(
&self, &self,
body: &mut [u8], body: &mut [u8],
nonce: &BareNonce, nonce: &Nonce,
shared_secret: &BareSharedSecret, shared_secret: &SharedSecret,
) -> VeilidAPIResult<()> { ) -> VeilidAPIResult<()> {
self.check_nonce(nonce)?;
self.check_shared_secret(shared_secret)?;
let mut blob = nonce.to_vec(); let mut blob = nonce.to_vec();
blob.extend_from_slice(&[0u8; 8]); blob.extend_from_slice(&[0u8; 8]);
let blob = do_xor_32(&blob, shared_secret)?; let blob = do_xor_32(&blob, shared_secret.ref_value())?;
do_xor_inplace(body, &blob) do_xor_inplace(body, &blob)
} }
@ -368,21 +399,27 @@ impl CryptoSystem for CryptoSystemNONE {
&self, &self,
in_buf: &[u8], in_buf: &[u8],
out_buf: &mut [u8], out_buf: &mut [u8],
nonce: &BareNonce, nonce: &Nonce,
shared_secret: &BareSharedSecret, shared_secret: &SharedSecret,
) -> VeilidAPIResult<()> { ) -> VeilidAPIResult<()> {
self.check_nonce(nonce)?;
self.check_shared_secret(shared_secret)?;
let mut blob = nonce.to_vec(); let mut blob = nonce.to_vec();
blob.extend_from_slice(&[0u8; 8]); blob.extend_from_slice(&[0u8; 8]);
let blob = do_xor_32(&blob, shared_secret)?; let blob = do_xor_32(&blob, shared_secret.ref_value())?;
do_xor_b2b(in_buf, out_buf, &blob) do_xor_b2b(in_buf, out_buf, &blob)
} }
fn crypt_no_auth_aligned_8( fn crypt_no_auth_aligned_8(
&self, &self,
in_buf: &[u8], in_buf: &[u8],
nonce: &BareNonce, nonce: &Nonce,
shared_secret: &BareSharedSecret, shared_secret: &SharedSecret,
) -> VeilidAPIResult<Vec<u8>> { ) -> VeilidAPIResult<Vec<u8>> {
self.check_nonce(nonce)?;
self.check_shared_secret(shared_secret)?;
let mut out_buf = unsafe { aligned_8_u8_vec_uninit(in_buf.len()) }; let mut out_buf = unsafe { aligned_8_u8_vec_uninit(in_buf.len()) };
self.crypt_b2b_no_auth(in_buf, &mut out_buf, nonce, shared_secret)?; self.crypt_b2b_no_auth(in_buf, &mut out_buf, nonce, shared_secret)?;
Ok(out_buf) Ok(out_buf)
@ -391,9 +428,12 @@ impl CryptoSystem for CryptoSystemNONE {
fn crypt_no_auth_unaligned( fn crypt_no_auth_unaligned(
&self, &self,
in_buf: &[u8], in_buf: &[u8],
nonce: &BareNonce, nonce: &Nonce,
shared_secret: &BareSharedSecret, shared_secret: &SharedSecret,
) -> VeilidAPIResult<Vec<u8>> { ) -> VeilidAPIResult<Vec<u8>> {
self.check_nonce(nonce)?;
self.check_shared_secret(shared_secret)?;
let mut out_buf = unsafe { unaligned_u8_vec_uninit(in_buf.len()) }; let mut out_buf = unsafe { unaligned_u8_vec_uninit(in_buf.len()) };
self.crypt_b2b_no_auth(in_buf, &mut out_buf, nonce, shared_secret)?; self.crypt_b2b_no_auth(in_buf, &mut out_buf, nonce, shared_secret)?;
Ok(out_buf) Ok(out_buf)

View file

@ -19,13 +19,14 @@ const VLD0_DOMAIN_SIGN: &[u8] = b"VLD0_SIGN";
const VLD0_DOMAIN_CRYPT: &[u8] = b"VLD0_CRYPT"; const VLD0_DOMAIN_CRYPT: &[u8] = b"VLD0_CRYPT";
const VLD0_AEAD_OVERHEAD: usize = 16; const VLD0_AEAD_OVERHEAD: usize = 16;
pub const CRYPTO_KIND_VLD0: CryptoKind = CryptoKind(*b"VLD0"); pub const CRYPTO_KIND_VLD0: CryptoKind = CryptoKind::new(*b"VLD0");
pub const CRYPTO_KIND_VLD0_FOURCC: u32 = u32::from_be_bytes(*b"VLD0"); pub const CRYPTO_KIND_VLD0_FOURCC: u32 = u32::from_be_bytes(*b"VLD0");
pub use sizes::*; pub use sizes::*;
fn public_to_x25519_pk(public: &BarePublicKey) -> VeilidAPIResult<xd::PublicKey> { fn public_to_x25519_pk(public: &PublicKey) -> VeilidAPIResult<xd::PublicKey> {
let pk_ed = ed::VerifyingKey::from_bytes( let pk_ed = ed::VerifyingKey::from_bytes(
public public
.ref_value()
.bytes() .bytes()
.try_into() .try_into()
.map_err(VeilidAPIError::internal)?, .map_err(VeilidAPIError::internal)?,
@ -33,11 +34,11 @@ fn public_to_x25519_pk(public: &BarePublicKey) -> VeilidAPIResult<xd::PublicKey>
.map_err(VeilidAPIError::internal)?; .map_err(VeilidAPIError::internal)?;
Ok(xd::PublicKey::from(*pk_ed.to_montgomery().as_bytes())) Ok(xd::PublicKey::from(*pk_ed.to_montgomery().as_bytes()))
} }
fn secret_to_x25519_sk(secret: &BareSecretKey) -> VeilidAPIResult<xd::StaticSecret> { fn secret_to_x25519_sk(secret: &SecretKey) -> VeilidAPIResult<xd::StaticSecret> {
// NOTE: ed::SigningKey.to_scalar() does not produce an unreduced scalar, we want the raw bytes here // NOTE: ed::SigningKey.to_scalar() does not produce an unreduced scalar, we want the raw bytes here
// See https://github.com/dalek-cryptography/curve25519-dalek/issues/565 // See https://github.com/dalek-cryptography/curve25519-dalek/issues/565
let hash: [u8; VLD0_SIGNATURE_LENGTH] = ed::Sha512::default() let hash: [u8; VLD0_SIGNATURE_LENGTH] = ed::Sha512::default()
.chain_update(secret.bytes()) .chain_update(secret.ref_value().bytes())
.finalize() .finalize()
.into(); .into();
let mut output = [0u8; VLD0_SECRET_KEY_LENGTH]; let mut output = [0u8; VLD0_SECRET_KEY_LENGTH];
@ -46,14 +47,14 @@ fn secret_to_x25519_sk(secret: &BareSecretKey) -> VeilidAPIResult<xd::StaticSecr
Ok(xd::StaticSecret::from(output)) Ok(xd::StaticSecret::from(output))
} }
pub(crate) fn vld0_generate_keypair() -> BareKeyPair { pub(crate) fn vld0_generate_keypair() -> KeyPair {
let mut csprng = VeilidRng {}; let mut csprng = VeilidRng {};
let signing_key = ed::SigningKey::generate(&mut csprng); let signing_key = ed::SigningKey::generate(&mut csprng);
let verifying_key = signing_key.verifying_key(); let verifying_key = signing_key.verifying_key();
let public_key = BarePublicKey::new(&verifying_key.to_bytes()); let public_key = BarePublicKey::new(&verifying_key.to_bytes());
let secret_key = BareSecretKey::new(&signing_key.to_bytes()); let secret_key = BareSecretKey::new(&signing_key.to_bytes());
BareKeyPair::new(public_key, secret_key) KeyPair::new(CRYPTO_KIND_VLD0, BareKeyPair::new(public_key, secret_key))
} }
/// V0 CryptoSystem /// V0 CryptoSystem
@ -80,11 +81,7 @@ impl CryptoSystem for CryptoSystemVLD0 {
// Cached Operations // Cached Operations
#[instrument(level = "trace", skip_all)] #[instrument(level = "trace", skip_all)]
fn cached_dh( fn cached_dh(&self, key: &PublicKey, secret: &SecretKey) -> VeilidAPIResult<SharedSecret> {
&self,
key: &BarePublicKey,
secret: &BareSecretKey,
) -> VeilidAPIResult<BareSharedSecret> {
self.crypto() self.crypto()
.cached_dh_internal::<CryptoSystemVLD0>(self, key, secret) .cached_dh_internal::<CryptoSystemVLD0>(self, key, secret)
} }
@ -125,11 +122,7 @@ impl CryptoSystem for CryptoSystemVLD0 {
} }
#[instrument(level = "trace", target = "crypto", skip_all)] #[instrument(level = "trace", target = "crypto", skip_all)]
fn derive_shared_secret( fn derive_shared_secret(&self, password: &[u8], salt: &[u8]) -> VeilidAPIResult<SharedSecret> {
&self,
password: &[u8],
salt: &[u8],
) -> VeilidAPIResult<BareSharedSecret> {
if salt.len() < Salt::MIN_LENGTH || salt.len() > Salt::MAX_LENGTH { if salt.len() < Salt::MIN_LENGTH || salt.len() > Salt::MAX_LENGTH {
apibail_generic!("invalid salt length"); apibail_generic!("invalid salt length");
} }
@ -141,29 +134,28 @@ impl CryptoSystem for CryptoSystemVLD0 {
argon2 argon2
.hash_password_into(password, salt, &mut output_key_material) .hash_password_into(password, salt, &mut output_key_material)
.map_err(VeilidAPIError::generic)?; .map_err(VeilidAPIError::generic)?;
Ok(BareSharedSecret::new(&output_key_material)) Ok(SharedSecret::new(
CRYPTO_KIND_VLD0,
BareSharedSecret::new(&output_key_material),
))
} }
#[instrument(level = "trace", target = "crypto", skip_all)] #[instrument(level = "trace", target = "crypto", skip_all)]
fn random_nonce(&self) -> BareNonce { fn random_nonce(&self) -> Nonce {
let mut nonce = [0u8; VLD0_NONCE_LENGTH]; let mut nonce = [0u8; VLD0_NONCE_LENGTH];
random_bytes(&mut nonce); random_bytes(&mut nonce);
BareNonce::new(&nonce) Nonce::new(&nonce)
} }
#[instrument(level = "trace", target = "crypto", skip_all)] #[instrument(level = "trace", target = "crypto", skip_all)]
fn random_shared_secret(&self) -> BareSharedSecret { fn random_shared_secret(&self) -> SharedSecret {
let mut s = [0u8; VLD0_SHARED_SECRET_LENGTH]; let mut s = [0u8; VLD0_SHARED_SECRET_LENGTH];
random_bytes(&mut s); random_bytes(&mut s);
BareSharedSecret::new(&s) SharedSecret::new(CRYPTO_KIND_VLD0, BareSharedSecret::new(&s))
} }
#[instrument(level = "trace", target = "crypto", skip_all)] #[instrument(level = "trace", target = "crypto", skip_all)]
fn compute_dh( fn compute_dh(&self, key: &PublicKey, secret: &SecretKey) -> VeilidAPIResult<SharedSecret> {
&self,
key: &BarePublicKey,
secret: &BareSecretKey,
) -> VeilidAPIResult<BareSharedSecret> {
let pk_xd = public_to_x25519_pk(key)?; let pk_xd = public_to_x25519_pk(key)?;
let sk_xd = secret_to_x25519_sk(secret)?; let sk_xd = secret_to_x25519_sk(secret)?;
@ -174,27 +166,33 @@ impl CryptoSystem for CryptoSystemVLD0 {
hasher.update(&dh_bytes); hasher.update(&dh_bytes);
let output = hasher.finalize(); let output = hasher.finalize();
Ok(BareSharedSecret::new(output.as_bytes())) Ok(SharedSecret::new(
CRYPTO_KIND_VLD0,
BareSharedSecret::new(output.as_bytes()),
))
} }
#[instrument(level = "trace", target = "crypto", skip_all)] #[instrument(level = "trace", target = "crypto", skip_all)]
fn generate_keypair(&self) -> BareKeyPair { fn generate_keypair(&self) -> KeyPair {
vld0_generate_keypair() vld0_generate_keypair()
} }
#[instrument(level = "trace", target = "crypto", skip_all)] #[instrument(level = "trace", target = "crypto", skip_all)]
fn generate_hash(&self, data: &[u8]) -> BareHashDigest { fn generate_hash(&self, data: &[u8]) -> HashDigest {
BareHashDigest::new(blake3::hash(data).as_bytes()) HashDigest::new(
CRYPTO_KIND_VLD0,
BareHashDigest::new(blake3::hash(data).as_bytes()),
)
} }
#[instrument(level = "trace", target = "crypto", skip_all)] #[instrument(level = "trace", target = "crypto", skip_all)]
fn generate_hash_reader( fn generate_hash_reader(&self, reader: &mut dyn std::io::Read) -> VeilidAPIResult<PublicKey> {
&self,
reader: &mut dyn std::io::Read,
) -> VeilidAPIResult<BarePublicKey> {
let mut hasher = blake3::Hasher::new(); let mut hasher = blake3::Hasher::new();
std::io::copy(reader, &mut hasher).map_err(VeilidAPIError::generic)?; std::io::copy(reader, &mut hasher).map_err(VeilidAPIError::generic)?;
Ok(BarePublicKey::new(hasher.finalize().as_bytes())) Ok(PublicKey::new(
CRYPTO_KIND_VLD0,
BarePublicKey::new(hasher.finalize().as_bytes()),
))
} }
// Validation // Validation
@ -224,61 +222,63 @@ impl CryptoSystem for CryptoSystemVLD0 {
} }
#[instrument(level = "trace", target = "crypto", skip_all)] #[instrument(level = "trace", target = "crypto", skip_all)]
fn validate_keypair(&self, public_key: &BarePublicKey, secret_key: &BareSecretKey) -> bool { fn validate_keypair(
&self,
public_key: &PublicKey,
secret_key: &SecretKey,
) -> VeilidAPIResult<bool> {
self.check_public_key(public_key)?;
self.check_secret_key(secret_key)?;
let data = vec![0u8; 512]; let data = vec![0u8; 512];
let Ok(sig) = self.sign(public_key, secret_key, &data) else { let Ok(sig) = self.sign(public_key, secret_key, &data) else {
return false; return Ok(false);
}; };
let Ok(v) = self.verify(public_key, &data, &sig) else { let Ok(v) = self.verify(public_key, &data, &sig) else {
return false; return Ok(false);
}; };
v Ok(v)
} }
#[instrument(level = "trace", target = "crypto", skip_all)] #[instrument(level = "trace", target = "crypto", skip_all)]
fn validate_hash(&self, data: &[u8], hash_digest: &BareHashDigest) -> bool { fn validate_hash(&self, data: &[u8], hash_digest: &HashDigest) -> VeilidAPIResult<bool> {
self.check_hash_digest(hash_digest)?;
let bytes = *blake3::hash(data).as_bytes(); let bytes = *blake3::hash(data).as_bytes();
bytes == hash_digest.bytes() Ok(bytes == hash_digest.ref_value().bytes())
} }
#[instrument(level = "trace", target = "crypto", skip_all)] #[instrument(level = "trace", target = "crypto", skip_all)]
fn validate_hash_reader( fn validate_hash_reader(
&self, &self,
reader: &mut dyn std::io::Read, reader: &mut dyn std::io::Read,
hash_digest: &BareHashDigest, hash_digest: &HashDigest,
) -> VeilidAPIResult<bool> { ) -> VeilidAPIResult<bool> {
self.check_hash_digest(hash_digest)?;
let mut hasher = blake3::Hasher::new(); let mut hasher = blake3::Hasher::new();
std::io::copy(reader, &mut hasher).map_err(VeilidAPIError::generic)?; std::io::copy(reader, &mut hasher).map_err(VeilidAPIError::generic)?;
let bytes = *hasher.finalize().as_bytes(); let bytes = *hasher.finalize().as_bytes();
Ok(bytes == hash_digest.bytes()) Ok(bytes == hash_digest.ref_value().bytes())
}
// Distance Metric
#[instrument(level = "trace", target = "crypto", skip_all)]
fn distance(&self, hash1: &BareHashDigest, hash2: &BareHashDigest) -> BareHashDistance {
let mut bytes = [0u8; VLD0_HASH_DIGEST_LENGTH];
(0..VLD0_HASH_DIGEST_LENGTH).for_each(|n| {
bytes[n] = hash1[n] ^ hash2[n];
});
BareHashDistance::new(&bytes)
} }
// Authentication // Authentication
#[instrument(level = "trace", target = "crypto", skip_all)] #[instrument(level = "trace", target = "crypto", skip_all)]
fn sign( fn sign(
&self, &self,
public_key: &BarePublicKey, public_key: &PublicKey,
secret_key: &BareSecretKey, secret_key: &SecretKey,
data: &[u8], data: &[u8],
) -> VeilidAPIResult<BareSignature> { ) -> VeilidAPIResult<Signature> {
self.check_public_key(public_key)?;
self.check_secret_key(secret_key)?;
let mut kpb: [u8; VLD0_SECRET_KEY_LENGTH + VLD0_PUBLIC_KEY_LENGTH] = let mut kpb: [u8; VLD0_SECRET_KEY_LENGTH + VLD0_PUBLIC_KEY_LENGTH] =
[0u8; VLD0_SECRET_KEY_LENGTH + VLD0_PUBLIC_KEY_LENGTH]; [0u8; VLD0_SECRET_KEY_LENGTH + VLD0_PUBLIC_KEY_LENGTH];
kpb[..VLD0_SECRET_KEY_LENGTH].copy_from_slice(secret_key); kpb[..VLD0_SECRET_KEY_LENGTH].copy_from_slice(secret_key.ref_value().bytes());
kpb[VLD0_SECRET_KEY_LENGTH..].copy_from_slice(public_key); kpb[VLD0_SECRET_KEY_LENGTH..].copy_from_slice(public_key.ref_value().bytes());
let keypair = ed::SigningKey::from_keypair_bytes(&kpb) let keypair = ed::SigningKey::from_keypair_bytes(&kpb)
.map_err(|e| VeilidAPIError::parse_error("Keypair is invalid", e))?; .map_err(|e| VeilidAPIError::parse_error("Keypair is invalid", e))?;
@ -289,7 +289,7 @@ impl CryptoSystem for CryptoSystemVLD0 {
.sign_prehashed(dig, Some(VLD0_DOMAIN_SIGN)) .sign_prehashed(dig, Some(VLD0_DOMAIN_SIGN))
.map_err(VeilidAPIError::internal)?; .map_err(VeilidAPIError::internal)?;
let sig = BareSignature::new(&sig_bytes.to_bytes()); let sig = Signature::new(CRYPTO_KIND_VLD0, BareSignature::new(&sig_bytes.to_bytes()));
if !self.verify(public_key, data, &sig)? { if !self.verify(public_key, data, &sig)? {
apibail_internal!("newly created signature does not verify"); apibail_internal!("newly created signature does not verify");
@ -300,12 +300,16 @@ impl CryptoSystem for CryptoSystemVLD0 {
#[instrument(level = "trace", target = "crypto", skip_all)] #[instrument(level = "trace", target = "crypto", skip_all)]
fn verify( fn verify(
&self, &self,
public_key: &BarePublicKey, public_key: &PublicKey,
data: &[u8], data: &[u8],
signature: &BareSignature, signature: &Signature,
) -> VeilidAPIResult<bool> { ) -> VeilidAPIResult<bool> {
self.check_public_key(public_key)?;
self.check_signature(signature)?;
let pk = ed::VerifyingKey::from_bytes( let pk = ed::VerifyingKey::from_bytes(
public_key public_key
.ref_value()
.bytes() .bytes()
.try_into() .try_into()
.map_err(VeilidAPIError::internal)?, .map_err(VeilidAPIError::internal)?,
@ -313,6 +317,7 @@ impl CryptoSystem for CryptoSystemVLD0 {
.map_err(|e| VeilidAPIError::parse_error("Public key is invalid", e))?; .map_err(|e| VeilidAPIError::parse_error("Public key is invalid", e))?;
let sig = ed::Signature::from_bytes( let sig = ed::Signature::from_bytes(
signature signature
.ref_value()
.bytes() .bytes()
.try_into() .try_into()
.map_err(VeilidAPIError::internal)?, .map_err(VeilidAPIError::internal)?,
@ -335,11 +340,14 @@ impl CryptoSystem for CryptoSystemVLD0 {
fn decrypt_in_place_aead( fn decrypt_in_place_aead(
&self, &self,
body: &mut Vec<u8>, body: &mut Vec<u8>,
nonce: &BareNonce, nonce: &Nonce,
shared_secret: &BareSharedSecret, shared_secret: &SharedSecret,
associated_data: Option<&[u8]>, associated_data: Option<&[u8]>,
) -> VeilidAPIResult<()> { ) -> VeilidAPIResult<()> {
self.check_shared_secret(shared_secret)?;
let shared_secret_bytes: [u8; VLD0_SHARED_SECRET_LENGTH] = shared_secret let shared_secret_bytes: [u8; VLD0_SHARED_SECRET_LENGTH] = shared_secret
.ref_value()
.bytes() .bytes()
.try_into() .try_into()
.map_err(VeilidAPIError::internal)?; .map_err(VeilidAPIError::internal)?;
@ -358,10 +366,13 @@ impl CryptoSystem for CryptoSystemVLD0 {
fn decrypt_aead( fn decrypt_aead(
&self, &self,
body: &[u8], body: &[u8],
nonce: &BareNonce, nonce: &Nonce,
shared_secret: &BareSharedSecret, shared_secret: &SharedSecret,
associated_data: Option<&[u8]>, associated_data: Option<&[u8]>,
) -> VeilidAPIResult<Vec<u8>> { ) -> VeilidAPIResult<Vec<u8>> {
self.check_nonce(nonce)?;
self.check_shared_secret(shared_secret)?;
let mut out = body.to_vec(); let mut out = body.to_vec();
self.decrypt_in_place_aead(&mut out, nonce, shared_secret, associated_data) self.decrypt_in_place_aead(&mut out, nonce, shared_secret, associated_data)
.map_err(map_to_string) .map_err(map_to_string)
@ -373,11 +384,15 @@ impl CryptoSystem for CryptoSystemVLD0 {
fn encrypt_in_place_aead( fn encrypt_in_place_aead(
&self, &self,
body: &mut Vec<u8>, body: &mut Vec<u8>,
nonce: &BareNonce, nonce: &Nonce,
shared_secret: &BareSharedSecret, shared_secret: &SharedSecret,
associated_data: Option<&[u8]>, associated_data: Option<&[u8]>,
) -> VeilidAPIResult<()> { ) -> VeilidAPIResult<()> {
self.check_nonce(nonce)?;
self.check_shared_secret(shared_secret)?;
let shared_secret_bytes: [u8; VLD0_SHARED_SECRET_LENGTH] = shared_secret let shared_secret_bytes: [u8; VLD0_SHARED_SECRET_LENGTH] = shared_secret
.ref_value()
.bytes() .bytes()
.try_into() .try_into()
.map_err(VeilidAPIError::internal)?; .map_err(VeilidAPIError::internal)?;
@ -397,10 +412,13 @@ impl CryptoSystem for CryptoSystemVLD0 {
fn encrypt_aead( fn encrypt_aead(
&self, &self,
body: &[u8], body: &[u8],
nonce: &BareNonce, nonce: &Nonce,
shared_secret: &BareSharedSecret, shared_secret: &SharedSecret,
associated_data: Option<&[u8]>, associated_data: Option<&[u8]>,
) -> VeilidAPIResult<Vec<u8>> { ) -> VeilidAPIResult<Vec<u8>> {
self.check_nonce(nonce)?;
self.check_shared_secret(shared_secret)?;
let mut out = body.to_vec(); let mut out = body.to_vec();
self.encrypt_in_place_aead(&mut out, nonce, shared_secret, associated_data) self.encrypt_in_place_aead(&mut out, nonce, shared_secret, associated_data)
.map_err(map_to_string) .map_err(map_to_string)
@ -413,10 +431,14 @@ impl CryptoSystem for CryptoSystemVLD0 {
fn crypt_in_place_no_auth( fn crypt_in_place_no_auth(
&self, &self,
body: &mut [u8], body: &mut [u8],
nonce: &BareNonce, nonce: &Nonce,
shared_secret: &BareSharedSecret, shared_secret: &SharedSecret,
) -> VeilidAPIResult<()> { ) -> VeilidAPIResult<()> {
self.check_nonce(nonce)?;
self.check_shared_secret(shared_secret)?;
let shared_secret_bytes: [u8; VLD0_SHARED_SECRET_LENGTH] = shared_secret let shared_secret_bytes: [u8; VLD0_SHARED_SECRET_LENGTH] = shared_secret
.ref_value()
.bytes() .bytes()
.try_into() .try_into()
.map_err(VeilidAPIError::internal)?; .map_err(VeilidAPIError::internal)?;
@ -435,10 +457,14 @@ impl CryptoSystem for CryptoSystemVLD0 {
&self, &self,
in_buf: &[u8], in_buf: &[u8],
out_buf: &mut [u8], out_buf: &mut [u8],
nonce: &BareNonce, nonce: &Nonce,
shared_secret: &BareSharedSecret, shared_secret: &SharedSecret,
) -> VeilidAPIResult<()> { ) -> VeilidAPIResult<()> {
self.check_nonce(nonce)?;
self.check_shared_secret(shared_secret)?;
let shared_secret_bytes: [u8; VLD0_SHARED_SECRET_LENGTH] = shared_secret let shared_secret_bytes: [u8; VLD0_SHARED_SECRET_LENGTH] = shared_secret
.ref_value()
.bytes() .bytes()
.try_into() .try_into()
.map_err(VeilidAPIError::internal)?; .map_err(VeilidAPIError::internal)?;
@ -458,9 +484,12 @@ impl CryptoSystem for CryptoSystemVLD0 {
fn crypt_no_auth_aligned_8( fn crypt_no_auth_aligned_8(
&self, &self,
in_buf: &[u8], in_buf: &[u8],
nonce: &BareNonce, nonce: &Nonce,
shared_secret: &BareSharedSecret, shared_secret: &SharedSecret,
) -> VeilidAPIResult<Vec<u8>> { ) -> VeilidAPIResult<Vec<u8>> {
self.check_nonce(nonce)?;
self.check_shared_secret(shared_secret)?;
let mut out_buf = unsafe { aligned_8_u8_vec_uninit(in_buf.len()) }; let mut out_buf = unsafe { aligned_8_u8_vec_uninit(in_buf.len()) };
self.crypt_b2b_no_auth(in_buf, &mut out_buf, nonce, shared_secret)?; self.crypt_b2b_no_auth(in_buf, &mut out_buf, nonce, shared_secret)?;
Ok(out_buf) Ok(out_buf)
@ -470,9 +499,12 @@ impl CryptoSystem for CryptoSystemVLD0 {
fn crypt_no_auth_unaligned( fn crypt_no_auth_unaligned(
&self, &self,
in_buf: &[u8], in_buf: &[u8],
nonce: &BareNonce, nonce: &Nonce,
shared_secret: &BareSharedSecret, shared_secret: &SharedSecret,
) -> VeilidAPIResult<Vec<u8>> { ) -> VeilidAPIResult<Vec<u8>> {
self.check_nonce(nonce)?;
self.check_shared_secret(shared_secret)?;
let mut out_buf = unsafe { unaligned_u8_vec_uninit(in_buf.len()) }; let mut out_buf = unsafe { unaligned_u8_vec_uninit(in_buf.len()) };
self.crypt_b2b_no_auth(in_buf, &mut out_buf, nonce, shared_secret)?; self.crypt_b2b_no_auth(in_buf, &mut out_buf, nonce, shared_secret)?;
Ok(out_buf) Ok(out_buf)

View file

@ -1,4 +1,4 @@
use super::*; use super::*;
pub const CRYPTO_KIND_VLD1: CryptoKind = CryptoKind(*b"VLD1"); pub const CRYPTO_KIND_VLD1: CryptoKind = CryptoKind::new(*b"VLD1");
pub const CRYPTO_KIND_VLD1_FOURCC: u32 = u32::from_be_bytes(*b"VLD1"); pub const CRYPTO_KIND_VLD1_FOURCC: u32 = u32::from_be_bytes(*b"VLD1");

View file

@ -1,45 +1,62 @@
use super::*; use super::*;
use crate::*; use crate::*;
// Diffie-Hellman key exchange cache // Diffie-Hellman key agreement cache
#[derive(Serialize, Deserialize, PartialEq, Eq, Hash)] #[derive(Debug, PartialEq, Eq, Hash)]
pub struct DHCacheKey { pub struct DHCacheKey {
pub key: BarePublicKey, pub key: PublicKey,
pub secret: BareSecretKey, pub secret: SecretKey,
} }
#[derive(Serialize, Deserialize)] impl fmt::Display for DHCacheKey {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}.{}", self.key, self.secret)
}
}
impl FromStr for DHCacheKey {
type Err = VeilidAPIError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let Some((pks, sks)) = s.split_once('.') else {
apibail_parse_error!("s", s);
};
let key = PublicKey::from_str(pks)?;
let secret = SecretKey::from_str(sks)?;
Ok(DHCacheKey { key, secret })
}
}
impl<'de> Deserialize<'de> for DHCacheKey {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
FromStr::from_str(&s).map_err(de::Error::custom)
}
}
impl Serialize for DHCacheKey {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(&self.to_string())
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct DHCacheValue { pub struct DHCacheValue {
pub shared_secret: BareSharedSecret, pub shared_secret: SharedSecret,
} }
pub type DHCache = LruCache<DHCacheKey, DHCacheValue>; pub type DHCache = LruCache<DHCacheKey, DHCacheValue>;
pub const DH_CACHE_SIZE: usize = 4096; pub const DH_CACHE_SIZE: usize = 4096;
pub fn cache_to_bytes(cache: &DHCache) -> Vec<u8> { pub fn cache_to_bytes(cache: &DHCache) -> Vec<u8> {
let cnt: usize = cache.len(); serialize_json_bytes(cache)
let mut out: Vec<u8> = Vec::with_capacity(cnt * (32 + 32 + 32));
for e in cache.iter() {
out.extend_from_slice(&e.0.key);
out.extend_from_slice(&e.0.secret);
out.extend_from_slice(&e.1.shared_secret);
}
let mut rev: Vec<u8> = Vec::with_capacity(out.len());
for d in out.chunks(32 + 32 + 32).rev() {
rev.extend(d);
}
rev
} }
pub fn bytes_to_cache(bytes: &[u8], cache: &mut DHCache) { pub fn bytes_to_cache(bytes: &[u8]) -> EyreResult<DHCache> {
for d in bytes.chunks(32 + 32 + 32) { deserialize_json_bytes(bytes).wrap_err("cache format invalid")
let k = DHCacheKey {
key: BarePublicKey::new(d[0..32].try_into().expect("asdf")),
secret: BareSecretKey::new(d[32..64].try_into().expect("asdf")),
};
let v = DHCacheValue {
shared_secret: BareSharedSecret::new(d[64..96].try_into().expect("asdf")),
};
cache.insert(k, v);
}
} }

View file

@ -1,364 +0,0 @@
#![allow(clippy::absurd_extreme_comparisons)]
use super::*;
use crate::*;
use core::convert::TryInto;
pub const MAX_ENVELOPE_SIZE: usize = 65507;
pub const MIN_ENVELOPE_SIZE: usize = 0x6A + 0x40; // Header + BareSignature
// Version number of envelope format
fourcc_type!(EnvelopeVersion);
pub const ENVELOPE_VERSION_VLD0: EnvelopeVersion = EnvelopeVersion(*b"ENV0");
// pub const ENVELOPE_VERSION_VLD0_FOURCC: u32 = u32::from_be_bytes(*b"ENV0");
/// Envelope versions in order of preference, best envelope version is the first one, worst is the last one
pub const VALID_ENVELOPE_VERSIONS: [EnvelopeVersion; 1] = [ENVELOPE_VERSION_VLD0];
/// Number of envelope versions to keep on structures if many are present beyond the ones we consider valid
pub const MAX_ENVELOPE_VERSIONS: usize = 16;
/// Envelopes are versioned
///
/// These are the formats for the on-the-wire serialization performed by this module
///
/// #[repr(C, packed)]
/// struct EnvelopeHeader {
/// // Size is at least 4 bytes. Depending on the version specified, the size may vary and should be case to the appropriate struct
/// version: [u8; 4], // 0x00: 0x45 0x4E 0x56 0x30 ("ENV0")
/// }
///
/// #[repr(C, packed)]
/// struct EnvelopeV0 {
/// // Size is 106 bytes without signature and 170 with signature
/// version: [u8; 4], // 0x00: 0x45 0x4E 0x56 0x30 ("ENV0")
/// crypto_kind: [u8; 4], // 0x04: CryptoSystemVersion FOURCC code (CryptoKind)
/// size: u16, // 0x08: Total size of the envelope including the encrypted operations message. Maximum size is 65,507 bytes, which is the data size limit for a single UDP message on IPv4.
/// timestamp: u64, // 0x0A: Duration since UNIX_EPOCH in microseconds when this message is sent. Messages older than 10 seconds are dropped.
/// nonce: [u8; 24], // 0x12: Random nonce for replay protection and for dh
/// sender_id: [u8; 32], // 0x2A: Node ID of the message source, which is the public key of the sender (must be verified with find_node if this is a new node_id/address combination)
/// recipient_id: [u8; 32], // 0x4A: Node ID of the intended recipient, which is the public key of the recipient (must be the receiving node, or a relay lease holder)
/// // 0x6A: message is appended (operations)
/// signature: [u8; 64], // 0x?? (end-0x40): BareSignature of the entire envelope including header is appended to the packet
/// // entire header needs to be included in message digest, relays are not allowed to modify the envelope without invalidating the signature.
/// }
#[derive(Debug, Clone, PartialEq, Eq, Default)]
pub struct Envelope {
version: EnvelopeVersion,
crypto_kind: CryptoKind,
timestamp: Timestamp,
nonce: BareNonce,
sender_id: BareNodeId,
recipient_id: BareNodeId,
}
impl Envelope {
#[must_use]
pub fn new(
version: EnvelopeVersion,
crypto_kind: CryptoKind,
timestamp: Timestamp,
nonce: BareNonce,
sender_id: BareNodeId,
recipient_id: BareNodeId,
) -> Self {
assert!(VALID_ENVELOPE_VERSIONS.contains(&version));
assert!(VALID_CRYPTO_KINDS.contains(&crypto_kind));
Self {
version,
crypto_kind,
timestamp,
nonce,
sender_id,
recipient_id,
}
}
#[instrument(level = "trace", target = "envelope", skip_all)]
pub fn from_signed_data(
crypto: &Crypto,
data: &[u8],
network_key: &Option<BareSharedSecret>,
) -> VeilidAPIResult<Envelope> {
// Ensure we are at least the length of the envelope
// Silent drop here, as we use zero length packets as part of the protocol for hole punching
if data.len() < MIN_ENVELOPE_SIZE {
apibail_generic!("envelope data too small");
}
// Check envelope version
let version: EnvelopeVersion = data[0x00..0x04]
.try_into()
.map_err(VeilidAPIError::internal)?;
if !VALID_ENVELOPE_VERSIONS.contains(&version) {
apibail_parse_error!("unsupported envelope version", version);
}
// Check crypto kind
let crypto_kind = CryptoKind(
data[0x04..0x08]
.try_into()
.map_err(VeilidAPIError::internal)?,
);
let Some(vcrypto) = crypto.get(crypto_kind) else {
apibail_parse_error!("unsupported crypto kind", crypto_kind);
};
// Get size and ensure it matches the size of the envelope and is less than the maximum message size
let size: u16 = u16::from_le_bytes(
data[0x08..0x0A]
.try_into()
.map_err(VeilidAPIError::internal)?,
);
if (size as usize) > MAX_ENVELOPE_SIZE {
apibail_parse_error!("envelope too large", size);
}
if (size as usize) != data.len() {
apibail_parse_error!(
"size doesn't match envelope size",
format!(
"size doesn't match envelope size: size={} data.len()={}",
size,
data.len()
)
);
}
// Get the timestamp
let timestamp: Timestamp = u64::from_le_bytes(
data[0x0A..0x12]
.try_into()
.map_err(VeilidAPIError::internal)?,
)
.into();
// Get nonce and sender node id
let mut nonce_slice: [u8; VLD0_NONCE_LENGTH] = data[0x12..0x2A]
.try_into()
.map_err(VeilidAPIError::internal)?;
let mut sender_id_slice: [u8; VLD0_HASH_DIGEST_LENGTH] = data[0x2A..0x4A]
.try_into()
.map_err(VeilidAPIError::internal)?;
let mut recipient_id_slice: [u8; VLD0_HASH_DIGEST_LENGTH] = data[0x4A..0x6A]
.try_into()
.map_err(VeilidAPIError::internal)?;
// Apply network key (not the best, but it will keep networks from colliding without much overhead)
if let Some(nk) = network_key.as_ref() {
for n in 0..VLD0_NONCE_LENGTH {
nonce_slice[n] ^= nk[n];
}
for n in 0..VLD0_HASH_DIGEST_LENGTH {
sender_id_slice[n] ^= nk[n];
}
for n in 0..VLD0_HASH_DIGEST_LENGTH {
recipient_id_slice[n] ^= nk[n];
}
}
let nonce: BareNonce = BareNonce::new(&nonce_slice);
let sender_id = BareNodeId::new(&sender_id_slice);
let recipient_id = BareNodeId::new(&recipient_id_slice);
// Ensure sender_id and recipient_id are not the same
if sender_id == recipient_id {
apibail_parse_error!(
"sender_id should not be same as recipient_id",
recipient_id.encode()
);
}
// Get signature
let signature = BareSignature::new(
data[(data.len() - 64)..]
.try_into()
.map_err(VeilidAPIError::internal)?,
);
// Validate signature
if !vcrypto
.verify(
&sender_id.clone().into(),
&data[0..(data.len() - 64)],
&signature,
)
.map_err(VeilidAPIError::internal)?
{
apibail_parse_error!("signature verification of envelope failed", signature);
}
// Return envelope
Ok(Self {
version,
crypto_kind,
timestamp,
nonce,
sender_id,
recipient_id,
})
}
#[instrument(level = "trace", target = "envelope", skip_all)]
pub fn decrypt_body(
&self,
crypto: &Crypto,
data: &[u8],
secret_key: &BareSecretKey,
network_key: &Option<BareSharedSecret>,
) -> VeilidAPIResult<Vec<u8>> {
// Get DH secret
let vcrypto = crypto
.get(self.crypto_kind)
.expect("need to ensure only valid crypto kinds here");
let mut dh_secret = vcrypto.cached_dh(&self.sender_id.clone().into(), secret_key)?;
// Apply network key
if let Some(nk) = network_key.as_ref() {
let mut dh_secret_bytes = dh_secret.to_vec();
for n in 0..VLD0_SHARED_SECRET_LENGTH {
dh_secret_bytes[n] ^= nk[n];
}
dh_secret = BareSharedSecret::new(&dh_secret_bytes);
}
// Decrypt message without authentication
let body = vcrypto.crypt_no_auth_aligned_8(
&data[0x6A..data.len() - 64],
&self.nonce,
&dh_secret,
)?;
// Decompress body
let body = decompress_size_prepended(&body, Some(MAX_ENVELOPE_SIZE))?;
Ok(body)
}
#[instrument(level = "trace", target = "envelope", skip_all, err)]
pub fn to_encrypted_data(
&self,
crypto: &Crypto,
body: &[u8],
secret_key: &BareSecretKey,
network_key: &Option<BareSharedSecret>,
) -> VeilidAPIResult<Vec<u8>> {
// Ensure body isn't too long
let uncompressed_body_size: usize = body.len() + MIN_ENVELOPE_SIZE;
if uncompressed_body_size > MAX_ENVELOPE_SIZE {
apibail_parse_error!(
"envelope size before compression is too large",
uncompressed_body_size
);
}
// Compress body
let body = compress_prepend_size(body);
// Ensure body isn't too long
let envelope_size: usize = body.len() + MIN_ENVELOPE_SIZE;
if envelope_size > MAX_ENVELOPE_SIZE {
apibail_parse_error!(
"envelope size after compression is too large",
envelope_size
);
}
// Generate dh secret
let vcrypto = crypto
.get(self.crypto_kind)
.expect("need to ensure only valid crypto kinds here");
let mut dh_secret = vcrypto.cached_dh(&self.recipient_id.clone().into(), secret_key)?;
// Write envelope body
let mut data = vec![0u8; envelope_size];
// Write version
data[0x00..0x04].copy_from_slice(&self.version.0);
// Write crypto kind
data[0x04..0x08].copy_from_slice(&self.crypto_kind.0);
// Write size
data[0x08..0x0A].copy_from_slice(&(envelope_size as u16).to_le_bytes());
// Write timestamp
data[0x0A..0x12].copy_from_slice(&self.timestamp.as_u64().to_le_bytes());
// Write nonce
data[0x12..0x2A].copy_from_slice(&self.nonce);
// Write sender node id
data[0x2A..0x4A].copy_from_slice(&self.sender_id);
// Write recipient node id
data[0x4A..0x6A].copy_from_slice(&self.recipient_id);
// Apply network key (not the best, but it will keep networks from colliding without much overhead)
if let Some(nk) = network_key.as_ref() {
let mut dh_secret_bytes = dh_secret.to_vec();
for n in 0..VLD0_SHARED_SECRET_LENGTH {
dh_secret_bytes[n] ^= nk[n];
}
for n in 0..VLD0_NONCE_LENGTH {
data[0x12 + n] ^= nk[n];
}
for n in 0..VLD0_HASH_DIGEST_LENGTH {
data[0x2A + n] ^= nk[n];
}
for n in 0..VLD0_HASH_DIGEST_LENGTH {
data[0x4A + n] ^= nk[n];
}
dh_secret = BareSharedSecret::new(&dh_secret_bytes);
}
// Encrypt message
let encrypted_body = vcrypto.crypt_no_auth_unaligned(&body, &self.nonce, &dh_secret)?;
// Write body
if !encrypted_body.is_empty() {
data[0x6A..envelope_size - 64].copy_from_slice(encrypted_body.as_slice());
}
// Sign the envelope
let signature = vcrypto.sign(
&self.sender_id.clone().into(),
secret_key,
&data[0..(envelope_size - 64)],
)?;
// Append the signature
data[(envelope_size - 64)..].copy_from_slice(&signature);
Ok(data)
}
pub fn get_version(&self) -> EnvelopeVersion {
self.version
}
pub fn get_crypto_kind(&self) -> CryptoKind {
self.crypto_kind
}
pub fn get_timestamp(&self) -> Timestamp {
self.timestamp
}
#[expect(dead_code)]
pub fn get_nonce(&self) -> BareNonce {
self.nonce.clone()
}
#[expect(dead_code)]
pub fn get_bare_sender_id(&self) -> BareNodeId {
self.sender_id.clone()
}
pub fn get_sender_id(&self) -> NodeId {
NodeId::new(self.crypto_kind, self.sender_id.clone())
}
#[expect(dead_code)]
pub fn get_bare_recipient_id(&self) -> BareNodeId {
self.recipient_id.clone()
}
pub fn get_recipient_id(&self) -> NodeId {
NodeId::new(self.crypto_kind, self.recipient_id.clone())
}
}

View file

@ -0,0 +1,497 @@
use super::*;
use crate::routing_table::*;
use core::convert::TryInto;
// Version number of envelope format
fourcc_type!(EnvelopeVersion);
// ENV0
pub const ENVELOPE_VERSION_ENV0: EnvelopeVersion = EnvelopeVersion::new(*b"ENV0");
pub const ENV0_NONCE_LENGTH: usize = 24;
pub const ENV0_SIGNATURE_LENGTH: usize = 64;
pub const ENV0_MAX_ENVELOPE_SIZE: usize = 65507;
pub const ENV0_MIN_ENVELOPE_SIZE: usize = 0x6A + 0x40; // Header + BareSignature
/// Envelope versions in order of preference, best envelope version is the first one, worst is the last one
pub const VALID_ENVELOPE_VERSIONS: [EnvelopeVersion; 1] = [ENVELOPE_VERSION_ENV0];
/// Number of envelope versions to keep on structures if many are present beyond the ones we consider valid
pub const MAX_ENVELOPE_VERSIONS: usize = 16;
/// Envelopes are versioned
///
/// These are the formats for the on-the-wire serialization performed by this module
///
/// #[repr(C, packed)]
/// struct EnvelopeHeader {
/// // Size is at least 4 bytes. Depending on the version specified, the size may vary and should be case to the appropriate struct
/// version: [u8; 4], // 0x00: 0x45 0x4E 0x56 0x30 ("ENV0")
/// }
///
/// #[repr(C, packed)]
/// struct EnvelopeENV0 {
/// // Size is 106 bytes without signature and 170 with signature
/// version: [u8; 4], // 0x00: 0x45 0x4E 0x56 0x30 ("ENV0")
/// crypto_kind: [u8; 4], // 0x04: CryptoSystemVersion FOURCC code (CryptoKind)
/// size: u16, // 0x08: Total size of the envelope including the encrypted operations message. Maximum size is 65,507 bytes, which is the data size limit for a single UDP message on IPv4.
/// timestamp: u64, // 0x0A: Duration since UNIX_EPOCH in microseconds when this message is sent. Messages older than 10 seconds are dropped.
/// nonce: [u8; 24], // 0x12: Random nonce for replay protection and for dh
/// sender_id: [u8; 32], // 0x2A: Node ID of the message source, which is the public key of the sender (must be verified with find_node if this is a new node_id/address combination)
/// recipient_id: [u8; 32], // 0x4A: Node ID of the intended recipient, which is the public key of the recipient (must be the receiving node, or a relay lease holder)
/// // 0x6A: message is appended (operations)
/// signature: [u8; 64], // 0x?? (end-0x40): BareSignature of the entire envelope including header is appended to the packet
/// // entire header needs to be included in message digest, relays are not allowed to modify the envelope without invalidating the signature.
/// }
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Envelope {
ENV0 { env0: EnvelopeENV0 },
}
impl Envelope {
#[instrument(level = "trace", target = "envelope", skip_all)]
pub fn try_new_env0(
crypto: &Crypto,
crypto_kind: CryptoKind,
timestamp: Timestamp,
nonce: Nonce,
sender_id: NodeId,
recipient_id: NodeId,
) -> VeilidAPIResult<Self> {
Ok(Self::ENV0 {
env0: EnvelopeENV0::try_new(
crypto,
crypto_kind,
timestamp,
nonce,
sender_id,
recipient_id,
)?,
})
}
#[instrument(level = "trace", target = "envelope", skip_all)]
pub fn try_from_signed_data(
crypto: &Crypto,
data: &[u8],
network_key: &Option<BareSharedSecret>,
) -> VeilidAPIResult<Self> {
// Ensure we are at least the length of the envelope
// Silent drop here, as we use zero length packets as part of the protocol for hole punching
if data.len() < 4 {
apibail_generic!("envelope header too small");
}
// Check envelope version
let version: EnvelopeVersion = data[0x00..0x04]
.try_into()
.map_err(VeilidAPIError::internal)?;
match version {
ENVELOPE_VERSION_ENV0 => Ok(Self::ENV0 {
env0: EnvelopeENV0::try_from_signed_data(crypto, data, network_key)?,
}),
_ => {
apibail_parse_error!("unsupported envelope version", version);
}
}
}
#[instrument(level = "trace", target = "envelope", skip_all)]
pub fn decrypt_body(
&self,
crypto: &Crypto,
data: &[u8],
secret_key: &SecretKey,
network_key: &Option<BareSharedSecret>,
) -> VeilidAPIResult<Vec<u8>> {
match self {
Envelope::ENV0 { env0 } => env0.decrypt_body(crypto, data, secret_key, network_key),
}
}
#[instrument(level = "trace", target = "envelope", skip_all, err)]
pub fn to_encrypted_data(
&self,
crypto: &Crypto,
body: &[u8],
secret_key: &SecretKey,
network_key: &Option<BareSharedSecret>,
) -> VeilidAPIResult<Vec<u8>> {
match self {
Envelope::ENV0 { env0 } => {
env0.to_encrypted_data(crypto, body, secret_key, network_key)
}
}
}
pub fn get_version(&self) -> EnvelopeVersion {
match self {
Envelope::ENV0 { env0: _ } => ENVELOPE_VERSION_ENV0,
}
}
pub fn get_crypto_kind(&self) -> CryptoKind {
match self {
Envelope::ENV0 { env0 } => env0.get_crypto_kind(),
}
}
pub fn get_timestamp(&self) -> Timestamp {
match self {
Envelope::ENV0 { env0 } => env0.get_timestamp(),
}
}
pub fn get_sender_id(&self) -> NodeId {
match self {
Envelope::ENV0 { env0 } => env0.get_sender_id(),
}
}
pub fn get_recipient_id(&self) -> NodeId {
match self {
Envelope::ENV0 { env0 } => env0.get_recipient_id(),
}
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct EnvelopeENV0 {
crypto_kind: CryptoKind,
timestamp: Timestamp,
nonce: Nonce,
bare_sender_id: BareNodeId,
bare_recipient_id: BareNodeId,
}
impl EnvelopeENV0 {
fn try_new(
crypto: &Crypto,
crypto_kind: CryptoKind,
timestamp: Timestamp,
nonce: Nonce,
sender_id: NodeId,
recipient_id: NodeId,
) -> VeilidAPIResult<Self> {
let vcrypto = Self::validate_crypto_kind(crypto, crypto_kind)?;
vcrypto.check_nonce(&nonce)?;
Self::check_node_id(crypto_kind, &sender_id)?;
Self::check_node_id(crypto_kind, &recipient_id)?;
Ok(Self {
crypto_kind,
timestamp,
nonce,
bare_sender_id: sender_id.value(),
bare_recipient_id: recipient_id.value(),
})
}
fn try_from_signed_data(
crypto: &Crypto,
data: &[u8],
network_key: &Option<BareSharedSecret>,
) -> VeilidAPIResult<Self> {
// Ensure we are at least the length of the envelope
// Silent drop here, as we use zero length packets as part of the protocol for hole punching
if data.len() < ENV0_MIN_ENVELOPE_SIZE {
apibail_generic!("envelope data too small");
}
// Check crypto kind
let crypto_kind = CryptoKind::new(
data[0x04..0x08]
.try_into()
.map_err(VeilidAPIError::internal)?,
);
let vcrypto = Self::validate_crypto_kind(crypto, crypto_kind)?;
// Get size and ensure it matches the size of the envelope and is less than the maximum message size
let size: u16 = u16::from_le_bytes(
data[0x08..0x0A]
.try_into()
.map_err(VeilidAPIError::internal)?,
);
if (size as usize) > ENV0_MAX_ENVELOPE_SIZE {
apibail_parse_error!("envelope too large", size);
}
if (size as usize) != data.len() {
apibail_parse_error!(
"size doesn't match envelope size",
format!(
"size doesn't match envelope size: size={} data.len()={}",
size,
data.len()
)
);
}
// Get the timestamp
let timestamp: Timestamp = u64::from_le_bytes(
data[0x0A..0x12]
.try_into()
.map_err(VeilidAPIError::internal)?,
)
.into();
// Get nonce and sender node id
let mut nonce_slice: [u8; ENV0_NONCE_LENGTH] = data[0x12..0x2A]
.try_into()
.map_err(VeilidAPIError::internal)?;
let mut sender_id_slice: [u8; HASH_COORDINATE_LENGTH] = data[0x2A..0x4A]
.try_into()
.map_err(VeilidAPIError::internal)?;
let mut recipient_id_slice: [u8; HASH_COORDINATE_LENGTH] = data[0x4A..0x6A]
.try_into()
.map_err(VeilidAPIError::internal)?;
// Apply network key (not the best, but it will keep networks from colliding without much overhead)
if let Some(nk) = network_key.as_ref() {
for n in 0..ENV0_NONCE_LENGTH {
nonce_slice[n] ^= nk[n];
}
for n in 0..HASH_COORDINATE_LENGTH {
sender_id_slice[n] ^= nk[n];
}
for n in 0..HASH_COORDINATE_LENGTH {
recipient_id_slice[n] ^= nk[n];
}
}
let nonce: Nonce = Nonce::new(&nonce_slice);
let bare_sender_id = BareNodeId::new(&sender_id_slice);
let bare_recipient_id = BareNodeId::new(&recipient_id_slice);
// Ensure sender_id and recipient_id are not the same
if bare_sender_id == bare_recipient_id {
apibail_parse_error!(
"bare_sender_id should not be same as bare_recipient_id",
bare_recipient_id.encode()
);
}
let sender_public_key = PublicKey::new(crypto_kind, BarePublicKey::new(&bare_sender_id));
// Get signature
let bare_signature = BareSignature::new(
data[(data.len() - ENV0_SIGNATURE_LENGTH)..]
.try_into()
.map_err(VeilidAPIError::internal)?,
);
let signature = Signature::new(crypto_kind, bare_signature);
// Validate signature
if !vcrypto
.verify(
&sender_public_key,
&data[0..(data.len() - ENV0_SIGNATURE_LENGTH)],
&signature,
)
.map_err(VeilidAPIError::internal)?
{
apibail_parse_error!("signature verification of envelope failed", signature);
}
// Return envelope
Ok(Self {
crypto_kind,
timestamp,
nonce,
bare_sender_id,
bare_recipient_id,
})
}
pub fn decrypt_body(
&self,
crypto: &Crypto,
data: &[u8],
secret_key: &SecretKey,
network_key: &Option<BareSharedSecret>,
) -> VeilidAPIResult<Vec<u8>> {
// Get DH secret
let vcrypto = crypto
.get(self.crypto_kind)
.expect("need to ensure only valid crypto kinds here");
vcrypto.check_secret_key(secret_key)?;
let sender_public_key =
PublicKey::new(self.crypto_kind, BarePublicKey::new(&self.bare_sender_id));
let mut dh_secret = vcrypto.cached_dh(&sender_public_key, secret_key)?;
// Apply network key
if let Some(nk) = network_key.as_ref() {
let mut dh_secret_bytes = dh_secret.ref_value().to_vec();
for n in 0..dh_secret_bytes.len() {
dh_secret_bytes[n] ^= nk[n % dh_secret_bytes.len()];
}
dh_secret =
SharedSecret::new(dh_secret.kind(), BareSharedSecret::new(&dh_secret_bytes));
}
// Decrypt message without authentication
let body = vcrypto.crypt_no_auth_aligned_8(
&data[0x6A..data.len() - ENV0_SIGNATURE_LENGTH],
&self.nonce,
&dh_secret,
)?;
// Decompress body
let body = decompress_size_prepended(&body, Some(ENV0_MAX_ENVELOPE_SIZE))?;
Ok(body)
}
pub fn to_encrypted_data(
&self,
crypto: &Crypto,
body: &[u8],
secret_key: &SecretKey,
network_key: &Option<BareSharedSecret>,
) -> VeilidAPIResult<Vec<u8>> {
let vcrypto = crypto
.get(self.crypto_kind)
.expect("need to ensure only valid crypto kinds here");
vcrypto.check_secret_key(secret_key)?;
// Ensure body isn't too long
let uncompressed_body_size: usize = body.len() + ENV0_MIN_ENVELOPE_SIZE;
if uncompressed_body_size > ENV0_MAX_ENVELOPE_SIZE {
apibail_parse_error!(
"envelope size before compression is too large",
uncompressed_body_size
);
}
// Compress body
let body = compress_prepend_size(body);
// Ensure body isn't too long
let envelope_size: usize = body.len() + ENV0_MIN_ENVELOPE_SIZE;
if envelope_size > ENV0_MAX_ENVELOPE_SIZE {
apibail_parse_error!(
"envelope size after compression is too large",
envelope_size
);
}
// Generate dh secret
let recipient_public_key = PublicKey::new(
self.crypto_kind,
BarePublicKey::new(&self.bare_recipient_id),
);
let mut dh_secret = vcrypto.cached_dh(&recipient_public_key, secret_key)?;
// Write envelope body
let mut data = vec![0u8; envelope_size];
// Write version
data[0x00..0x04].copy_from_slice(&ENVELOPE_VERSION_ENV0.0);
// Write crypto kind
data[0x04..0x08].copy_from_slice(self.crypto_kind.bytes());
// Write size
data[0x08..0x0A].copy_from_slice(&(envelope_size as u16).to_le_bytes());
// Write timestamp
data[0x0A..0x12].copy_from_slice(&self.timestamp.as_u64().to_le_bytes());
// Write nonce
data[0x12..0x2A].copy_from_slice(&self.nonce);
// Write sender node id
data[0x2A..0x4A].copy_from_slice(&self.bare_sender_id);
// Write recipient node id
data[0x4A..0x6A].copy_from_slice(&self.bare_recipient_id);
// Apply network key (not the best, but it will keep networks from colliding without much overhead)
if let Some(nk) = network_key.as_ref() {
let mut dh_secret_bytes = dh_secret.ref_value().to_vec();
for n in 0..dh_secret_bytes.len() {
dh_secret_bytes[n] ^= nk[n % dh_secret_bytes.len()];
}
for n in 0..ENV0_NONCE_LENGTH {
data[0x12 + n] ^= nk[n];
}
for n in 0..HASH_COORDINATE_LENGTH {
data[0x2A + n] ^= nk[n];
}
for n in 0..HASH_COORDINATE_LENGTH {
data[0x4A + n] ^= nk[n];
}
dh_secret =
SharedSecret::new(dh_secret.kind(), BareSharedSecret::new(&dh_secret_bytes));
}
// Encrypt message
let encrypted_body = vcrypto.crypt_no_auth_unaligned(&body, &self.nonce, &dh_secret)?;
// Write body
if !encrypted_body.is_empty() {
data[0x6A..envelope_size - ENV0_SIGNATURE_LENGTH]
.copy_from_slice(encrypted_body.as_slice());
}
// Sign the envelope
let sender_public_key =
PublicKey::new(self.crypto_kind, BarePublicKey::new(&self.bare_sender_id));
let signature = vcrypto.sign(
&sender_public_key,
secret_key,
&data[0..(envelope_size - vcrypto.signature_length())],
)?;
// Append the signature
data[(envelope_size - ENV0_SIGNATURE_LENGTH)..].copy_from_slice(signature.ref_value());
Ok(data)
}
pub fn get_crypto_kind(&self) -> CryptoKind {
self.crypto_kind
}
pub fn get_timestamp(&self) -> Timestamp {
self.timestamp
}
pub fn get_sender_id(&self) -> NodeId {
NodeId::new(self.crypto_kind, self.bare_sender_id.clone())
}
pub fn get_recipient_id(&self) -> NodeId {
NodeId::new(self.crypto_kind, self.bare_recipient_id.clone())
}
//////////////////////////////////////////////////////////////////
fn validate_crypto_kind(
crypto: &Crypto,
crypto_kind: CryptoKind,
) -> VeilidAPIResult<CryptoSystemGuard<'_>> {
let vcrypto = crypto
.get(crypto_kind)
.ok_or_else(|| VeilidAPIError::parse_error("unsupported crypto kind", crypto_kind))?;
// Verify crypto kind can be used with this envelope
if vcrypto.nonce_length() != ENV0_NONCE_LENGTH
|| vcrypto.hash_digest_length() != HASH_COORDINATE_LENGTH
|| vcrypto.public_key_length() != HASH_COORDINATE_LENGTH
|| vcrypto.signature_length() != ENV0_SIGNATURE_LENGTH
{
apibail_generic!("unsupported crypto kind for this envelope type");
}
Ok(vcrypto)
}
fn check_node_id(crypto_kind: CryptoKind, node_id: &NodeId) -> VeilidAPIResult<()> {
if node_id.kind() != crypto_kind {
apibail_parse_error!("invalid crypto kind for ENV0", node_id.kind());
}
if node_id.ref_value().len() != HASH_COORDINATE_LENGTH {
apibail_parse_error!("invalid node_id length for ENV0", node_id.ref_value().len());
}
Ok(())
}
}

View file

@ -1,3 +1,5 @@
use core::marker::PhantomData;
use super::*; use super::*;
/// Guard to access a particular cryptosystem /// Guard to access a particular cryptosystem
@ -52,9 +54,9 @@ impl AsyncCryptoSystemGuard<'_> {
// Cached Operations // Cached Operations
pub async fn cached_dh( pub async fn cached_dh(
&self, &self,
key: &BarePublicKey, key: &PublicKey,
secret: &BareSecretKey, secret: &SecretKey,
) -> VeilidAPIResult<BareSharedSecret> { ) -> VeilidAPIResult<SharedSecret> {
yielding(|| self.guard.cached_dh(key, secret)).await yielding(|| self.guard.cached_dh(key, secret)).await
} }
@ -77,47 +79,50 @@ impl AsyncCryptoSystemGuard<'_> {
&self, &self,
password: &[u8], password: &[u8],
salt: &[u8], salt: &[u8],
) -> VeilidAPIResult<BareSharedSecret> { ) -> VeilidAPIResult<SharedSecret> {
yielding(|| self.guard.derive_shared_secret(password, salt)).await yielding(|| self.guard.derive_shared_secret(password, salt)).await
} }
pub async fn random_nonce(&self) -> BareNonce { pub async fn random_nonce(&self) -> Nonce {
yielding(|| self.guard.random_nonce()).await yielding(|| self.guard.random_nonce()).await
} }
pub async fn random_shared_secret(&self) -> BareSharedSecret { pub async fn random_shared_secret(&self) -> SharedSecret {
yielding(|| self.guard.random_shared_secret()).await yielding(|| self.guard.random_shared_secret()).await
} }
pub async fn compute_dh( pub async fn compute_dh(
&self, &self,
key: &BarePublicKey, key: &PublicKey,
secret: &BareSecretKey, secret: &SecretKey,
) -> VeilidAPIResult<BareSharedSecret> { ) -> VeilidAPIResult<SharedSecret> {
yielding(|| self.guard.compute_dh(key, secret)).await yielding(|| self.guard.compute_dh(key, secret)).await
} }
pub async fn generate_shared_secret( pub async fn generate_shared_secret(
&self, &self,
key: &BarePublicKey, key: &PublicKey,
secret: &BareSecretKey, secret: &SecretKey,
domain: &[u8], domain: &[u8],
) -> VeilidAPIResult<BareSharedSecret> { ) -> VeilidAPIResult<SharedSecret> {
let dh = self.compute_dh(key, secret).await?; let dh = self.compute_dh(key, secret).await?;
Ok(BareSharedSecret::from( let hash = self
self.generate_hash(&[&dh, domain, VEILID_DOMAIN_API].concat()) .generate_hash(&[&dh.into_value(), domain, VEILID_DOMAIN_API].concat())
.await, .await;
Ok(SharedSecret::new(
hash.kind(),
BareSharedSecret::new(&hash.into_value()),
)) ))
} }
pub async fn generate_keypair(&self) -> BareKeyPair { pub async fn generate_keypair(&self) -> KeyPair {
yielding(|| self.guard.generate_keypair()).await yielding(|| self.guard.generate_keypair()).await
} }
pub async fn generate_hash(&self, data: &[u8]) -> BareHashDigest { pub async fn generate_hash(&self, data: &[u8]) -> HashDigest {
yielding(|| self.guard.generate_hash(data)).await yielding(|| self.guard.generate_hash(data)).await
} }
pub async fn generate_hash_reader( pub async fn generate_hash_reader(
&self, &self,
reader: &mut dyn std::io::Read, reader: &mut dyn std::io::Read,
) -> VeilidAPIResult<BarePublicKey> { ) -> VeilidAPIResult<PublicKey> {
yielding(|| self.guard.generate_hash_reader(reader)).await yielding(|| self.guard.generate_hash_reader(reader)).await
} }
@ -154,59 +159,58 @@ impl AsyncCryptoSystemGuard<'_> {
pub fn default_salt_length(&self) -> usize { pub fn default_salt_length(&self) -> usize {
self.guard.default_salt_length() self.guard.default_salt_length()
} }
pub fn check_shared_secret(&self, secret: &BareSharedSecret) -> VeilidAPIResult<()> { pub fn check_shared_secret(&self, secret: &SharedSecret) -> VeilidAPIResult<()> {
self.guard.check_shared_secret(secret) self.guard.check_shared_secret(secret)
} }
pub fn check_nonce(&self, nonce: &BareNonce) -> VeilidAPIResult<()> { pub fn check_nonce(&self, nonce: &Nonce) -> VeilidAPIResult<()> {
self.guard.check_nonce(nonce) self.guard.check_nonce(nonce)
} }
pub fn check_hash_digest(&self, hash: &BareHashDigest) -> VeilidAPIResult<()> { pub fn check_hash_digest(&self, hash: &HashDigest) -> VeilidAPIResult<()> {
self.guard.check_hash_digest(hash) self.guard.check_hash_digest(hash)
} }
pub fn check_public_key(&self, key: &BarePublicKey) -> VeilidAPIResult<()> { pub fn check_public_key(&self, key: &PublicKey) -> VeilidAPIResult<()> {
self.guard.check_public_key(key) self.guard.check_public_key(key)
} }
pub fn check_secret_key(&self, key: &BareSecretKey) -> VeilidAPIResult<()> { pub fn check_secret_key(&self, key: &SecretKey) -> VeilidAPIResult<()> {
self.guard.check_secret_key(key) self.guard.check_secret_key(key)
} }
pub fn check_signature(&self, signature: &BareSignature) -> VeilidAPIResult<()> { pub fn check_signature(&self, signature: &Signature) -> VeilidAPIResult<()> {
self.guard.check_signature(signature) self.guard.check_signature(signature)
} }
pub async fn validate_keypair(&self, key: &BarePublicKey, secret: &BareSecretKey) -> bool { pub async fn validate_keypair(
&self,
key: &PublicKey,
secret: &SecretKey,
) -> VeilidAPIResult<bool> {
yielding(|| self.guard.validate_keypair(key, secret)).await yielding(|| self.guard.validate_keypair(key, secret)).await
} }
pub async fn validate_hash(&self, data: &[u8], hash: &BareHashDigest) -> bool { pub async fn validate_hash(&self, data: &[u8], hash: &HashDigest) -> VeilidAPIResult<bool> {
yielding(|| self.guard.validate_hash(data, hash)).await yielding(|| self.guard.validate_hash(data, hash)).await
} }
pub async fn validate_hash_reader( pub async fn validate_hash_reader(
&self, &self,
reader: &mut dyn std::io::Read, reader: &mut dyn std::io::Read,
hash: &BareHashDigest, hash: &HashDigest,
) -> VeilidAPIResult<bool> { ) -> VeilidAPIResult<bool> {
yielding(|| self.guard.validate_hash_reader(reader, hash)).await yielding(|| self.guard.validate_hash_reader(reader, hash)).await
} }
// Distance Metric
pub async fn distance(&self, key1: &BareHashDigest, key2: &BareHashDigest) -> BareHashDistance {
yielding(|| self.guard.distance(key1, key2)).await
}
// Authentication // Authentication
pub async fn sign( pub async fn sign(
&self, &self,
key: &BarePublicKey, key: &PublicKey,
secret: &BareSecretKey, secret: &SecretKey,
data: &[u8], data: &[u8],
) -> VeilidAPIResult<BareSignature> { ) -> VeilidAPIResult<Signature> {
yielding(|| self.guard.sign(key, secret, data)).await yielding(|| self.guard.sign(key, secret, data)).await
} }
pub async fn verify( pub async fn verify(
&self, &self,
key: &BarePublicKey, key: &PublicKey,
data: &[u8], data: &[u8],
signature: &BareSignature, signature: &Signature,
) -> VeilidAPIResult<bool> { ) -> VeilidAPIResult<bool> {
yielding(|| self.guard.verify(key, data, signature)).await yielding(|| self.guard.verify(key, data, signature)).await
} }
@ -215,8 +219,8 @@ impl AsyncCryptoSystemGuard<'_> {
pub async fn decrypt_in_place_aead( pub async fn decrypt_in_place_aead(
&self, &self,
body: &mut Vec<u8>, body: &mut Vec<u8>,
nonce: &BareNonce, nonce: &Nonce,
shared_secret: &BareSharedSecret, shared_secret: &SharedSecret,
associated_data: Option<&[u8]>, associated_data: Option<&[u8]>,
) -> VeilidAPIResult<()> { ) -> VeilidAPIResult<()> {
yielding(|| { yielding(|| {
@ -229,8 +233,8 @@ impl AsyncCryptoSystemGuard<'_> {
pub async fn decrypt_aead( pub async fn decrypt_aead(
&self, &self,
body: &[u8], body: &[u8],
nonce: &BareNonce, nonce: &Nonce,
shared_secret: &BareSharedSecret, shared_secret: &SharedSecret,
associated_data: Option<&[u8]>, associated_data: Option<&[u8]>,
) -> VeilidAPIResult<Vec<u8>> { ) -> VeilidAPIResult<Vec<u8>> {
yielding(|| { yielding(|| {
@ -243,8 +247,8 @@ impl AsyncCryptoSystemGuard<'_> {
pub async fn encrypt_in_place_aead( pub async fn encrypt_in_place_aead(
&self, &self,
body: &mut Vec<u8>, body: &mut Vec<u8>,
nonce: &BareNonce, nonce: &Nonce,
shared_secret: &BareSharedSecret, shared_secret: &SharedSecret,
associated_data: Option<&[u8]>, associated_data: Option<&[u8]>,
) -> VeilidAPIResult<()> { ) -> VeilidAPIResult<()> {
yielding(|| { yielding(|| {
@ -257,8 +261,8 @@ impl AsyncCryptoSystemGuard<'_> {
pub async fn encrypt_aead( pub async fn encrypt_aead(
&self, &self,
body: &[u8], body: &[u8],
nonce: &BareNonce, nonce: &Nonce,
shared_secret: &BareSharedSecret, shared_secret: &SharedSecret,
associated_data: Option<&[u8]>, associated_data: Option<&[u8]>,
) -> VeilidAPIResult<Vec<u8>> { ) -> VeilidAPIResult<Vec<u8>> {
yielding(|| { yielding(|| {
@ -272,8 +276,8 @@ impl AsyncCryptoSystemGuard<'_> {
pub async fn crypt_in_place_no_auth( pub async fn crypt_in_place_no_auth(
&self, &self,
body: &mut [u8], body: &mut [u8],
nonce: &BareNonce, nonce: &Nonce,
shared_secret: &BareSharedSecret, shared_secret: &SharedSecret,
) -> VeilidAPIResult<()> { ) -> VeilidAPIResult<()> {
yielding(|| { yielding(|| {
self.guard self.guard
@ -286,8 +290,8 @@ impl AsyncCryptoSystemGuard<'_> {
&self, &self,
in_buf: &[u8], in_buf: &[u8],
out_buf: &mut [u8], out_buf: &mut [u8],
nonce: &BareNonce, nonce: &Nonce,
shared_secret: &BareSharedSecret, shared_secret: &SharedSecret,
) -> VeilidAPIResult<()> { ) -> VeilidAPIResult<()> {
yielding(|| { yielding(|| {
self.guard self.guard
@ -299,8 +303,8 @@ impl AsyncCryptoSystemGuard<'_> {
pub async fn crypt_no_auth_aligned_8( pub async fn crypt_no_auth_aligned_8(
&self, &self,
body: &[u8], body: &[u8],
nonce: &BareNonce, nonce: &Nonce,
shared_secret: &BareSharedSecret, shared_secret: &SharedSecret,
) -> VeilidAPIResult<Vec<u8>> { ) -> VeilidAPIResult<Vec<u8>> {
yielding(|| { yielding(|| {
self.guard self.guard
@ -312,8 +316,8 @@ impl AsyncCryptoSystemGuard<'_> {
pub async fn crypt_no_auth_unaligned( pub async fn crypt_no_auth_unaligned(
&self, &self,
body: &[u8], body: &[u8],
nonce: &BareNonce, nonce: &Nonce,
shared_secret: &BareSharedSecret, shared_secret: &SharedSecret,
) -> VeilidAPIResult<Vec<u8>> { ) -> VeilidAPIResult<Vec<u8>> {
yielding(|| { yielding(|| {
self.guard self.guard

View file

@ -1,4 +1,3 @@
mod blake3digest512;
mod dh_cache; mod dh_cache;
mod envelope; mod envelope;
mod guard; mod guard;
@ -6,41 +5,20 @@ mod receipt;
mod types; mod types;
pub mod crypto_system; pub mod crypto_system;
#[cfg(feature = "enable-crypto-none")]
pub(crate) mod none;
#[doc(hidden)] #[doc(hidden)]
pub mod tests; pub mod tests;
#[cfg(feature = "enable-crypto-vld0")]
pub(crate) mod vld0;
// #[cfg(feature = "enable-crypto-vld1")]
// pub(crate) mod vld1;
pub use blake3digest512::*;
pub use crypto_system::*; pub use crypto_system::*;
use dh_cache::*;
pub(crate) use envelope::*; pub(crate) use envelope::*;
pub use guard::*; pub use guard::*;
pub(crate) use receipt::*; pub(crate) use receipt::*;
pub use types::*; pub use types::*;
#[cfg(feature = "enable-crypto-none")]
pub use none::sizes::*;
#[cfg(feature = "enable-crypto-none")]
pub(crate) use none::*;
#[cfg(feature = "enable-crypto-vld0")]
pub use vld0::sizes::*;
#[cfg(feature = "enable-crypto-vld0")]
pub(crate) use vld0::*;
// #[cfg(feature = "enable-crypto-vld1")]
// pub(crate) use vld1::*;
use super::*; use super::*;
use core::convert::TryInto; use core::convert::TryInto;
use dh_cache::*;
use hashlink::linked_hash_map::Entry; use hashlink::linked_hash_map::Entry;
use hashlink::LruCache; use hashlink::LruCache;
use std::marker::PhantomData;
impl_veilid_log_facility!("crypto"); impl_veilid_log_facility!("crypto");
@ -67,8 +45,9 @@ cfg_if! {
} }
/// Number of cryptosystem signatures to keep on structures if many are present beyond the ones we consider valid /// Number of cryptosystem signatures to keep on structures if many are present beyond the ones we consider valid
pub const MAX_CRYPTO_KINDS: usize = 3; pub const MAX_CRYPTO_KINDS: usize = 3;
/// Return the best cryptosystem kind we support /// Return the best cryptosystem kind we support
pub fn best_crypto_kind() -> CryptoKind { pub(crate) fn best_crypto_kind() -> CryptoKind {
VALID_CRYPTO_KINDS[0] VALID_CRYPTO_KINDS[0]
} }
@ -160,16 +139,23 @@ impl Crypto {
.open("crypto_caches", 1) .open("crypto_caches", 1)
.await .await
.wrap_err("failed to open crypto_caches")?; .wrap_err("failed to open crypto_caches")?;
let caches_valid = match db.load(0, b"cache_validity_key").await? { let mut caches_valid = match db.load(0, b"cache_validity_key").await? {
Some(v) => v == cache_validity_key, Some(v) => v == cache_validity_key,
None => false, None => false,
}; };
if caches_valid { if caches_valid {
if let Some(b) = db.load(0, b"dh_cache").await? { if let Some(b) = db.load(0, b"dh_cache").await? {
let mut inner = self.inner.lock(); let mut inner = self.inner.lock();
bytes_to_cache(&b, &mut inner.dh_cache); if let Ok(dh_cache) = bytes_to_cache(&b) {
inner.dh_cache = dh_cache;
} else {
caches_valid = false;
}
} }
} else { }
if !caches_valid {
drop(db); drop(db);
table_store.delete("crypto_caches").await?; table_store.delete("crypto_caches").await?;
db = table_store.open("crypto_caches", 1).await?; db = table_store.open("crypto_caches", 1).await?;
@ -245,12 +231,12 @@ impl Crypto {
} }
// Factory method to get the best crypto version // Factory method to get the best crypto version
pub fn best(&self) -> CryptoSystemGuard<'_> { pub(crate) fn best(&self) -> CryptoSystemGuard<'_> {
self.get(best_crypto_kind()).unwrap() self.get(best_crypto_kind()).unwrap()
} }
// Factory method to get the best crypto version for async use // Factory method to get the best crypto version for async use
pub fn best_async(&self) -> AsyncCryptoSystemGuard<'_> { pub(crate) fn best_async(&self) -> AsyncCryptoSystemGuard<'_> {
self.get_async(best_crypto_kind()).unwrap() self.get_async(best_crypto_kind()).unwrap()
} }
@ -261,17 +247,17 @@ impl Crypto {
&self, &self,
public_keys: &[PublicKey], public_keys: &[PublicKey],
data: &[u8], data: &[u8],
typed_signatures: &[Signature], signatures: &[Signature],
) -> VeilidAPIResult<Option<PublicKeyGroup>> { ) -> VeilidAPIResult<Option<PublicKeyGroup>> {
let mut out = PublicKeyGroup::with_capacity(public_keys.len()); let mut out = PublicKeyGroup::with_capacity(public_keys.len());
for sig in typed_signatures { for signature in signatures {
for nid in public_keys { for public_key in public_keys {
if nid.kind() == sig.kind() { if public_key.kind() == signature.kind() {
if let Some(vcrypto) = self.get(sig.kind()) { if let Some(vcrypto) = self.get(signature.kind()) {
if !vcrypto.verify(nid.ref_value(), data, sig.ref_value())? { if !vcrypto.verify(public_key, data, signature)? {
return Ok(None); return Ok(None);
} }
out.add(nid.clone()); out.add(public_key.clone());
} }
} }
} }
@ -285,17 +271,16 @@ impl Crypto {
pub fn generate_signatures<F, R>( pub fn generate_signatures<F, R>(
&self, &self,
data: &[u8], data: &[u8],
typed_key_pairs: &[KeyPair], key_pairs: &[KeyPair],
transform: F, transform: F,
) -> VeilidAPIResult<Vec<R>> ) -> VeilidAPIResult<Vec<R>>
where where
F: Fn(&KeyPair, BareSignature) -> R, F: Fn(&KeyPair, Signature) -> R,
{ {
let mut out = Vec::<R>::with_capacity(typed_key_pairs.len()); let mut out = Vec::<R>::with_capacity(key_pairs.len());
for kp in typed_key_pairs { for kp in key_pairs {
if let Some(vcrypto) = self.get(kp.kind()) { if let Some(vcrypto) = self.get(kp.kind()) {
let sig = let sig = vcrypto.sign(&kp.key(), &kp.secret(), data)?;
vcrypto.sign(kp.ref_value().ref_key(), kp.ref_value().ref_secret(), data)?;
out.push(transform(kp, sig)) out.push(transform(kp, sig))
} }
} }
@ -308,12 +293,12 @@ impl Crypto {
#[cfg(feature = "enable-crypto-vld0")] #[cfg(feature = "enable-crypto-vld0")]
if crypto_kind == CRYPTO_KIND_VLD0 { if crypto_kind == CRYPTO_KIND_VLD0 {
let kp = vld0_generate_keypair(); let kp = vld0_generate_keypair();
return Ok(KeyPair::new(crypto_kind, kp)); return Ok(kp);
} }
#[cfg(feature = "enable-crypto-none")] #[cfg(feature = "enable-crypto-none")]
if crypto_kind == CRYPTO_KIND_NONE { if crypto_kind == CRYPTO_KIND_NONE {
let kp = none_generate_keypair(); let kp = none_generate_keypair();
return Ok(KeyPair::new(crypto_kind, kp)); return Ok(kp);
} }
Err(VeilidAPIError::generic("invalid crypto kind")) Err(VeilidAPIError::generic("invalid crypto kind"))
} }
@ -323,9 +308,11 @@ impl Crypto {
fn cached_dh_internal<T: CryptoSystem>( fn cached_dh_internal<T: CryptoSystem>(
&self, &self,
vcrypto: &T, vcrypto: &T,
key: &BarePublicKey, key: &PublicKey,
secret: &BareSecretKey, secret: &SecretKey,
) -> VeilidAPIResult<BareSharedSecret> { ) -> VeilidAPIResult<SharedSecret> {
vcrypto.check_public_key(key)?;
vcrypto.check_secret_key(secret)?;
Ok( Ok(
match self.inner.lock().dh_cache.entry(DHCacheKey { match self.inner.lock().dh_cache.entry(DHCacheKey {
key: key.clone(), key: key.clone(),
@ -430,10 +417,7 @@ impl Crypto {
let (public_key, secret_key) = let (public_key, secret_key) =
if let (Some(public_key), Some(secret_key)) = (public_key, secret_key) { if let (Some(public_key), Some(secret_key)) = (public_key, secret_key) {
// Validate node id // Validate node id
if !vcrypto if !vcrypto.validate_keypair(&public_key, &secret_key).await? {
.validate_keypair(&public_key.value(), &secret_key.value())
.await
{
apibail_generic!(format!( apibail_generic!(format!(
"secret_key and public_key don't match:\npublic_key: {}\nsecret_key: {}", "secret_key and public_key don't match:\npublic_key: {}\nsecret_key: {}",
public_key, secret_key public_key, secret_key
@ -443,11 +427,7 @@ impl Crypto {
} else { } else {
// If we still don't have a valid keypair, generate one // If we still don't have a valid keypair, generate one
veilid_log!(self debug "generating new node {} keypair", ck); veilid_log!(self debug "generating new node {} keypair", ck);
let kp = vcrypto.generate_keypair().await; vcrypto.generate_keypair().await.into_split()
(
PublicKey::new(ck, kp.key()),
SecretKey::new(ck, kp.secret()),
)
}; };
veilid_log!(self info "Public Key: {}", public_key); veilid_log!(self info "Public Key: {}", public_key);
@ -476,13 +456,7 @@ impl Crypto {
.expect("Valid crypto kind is not actually valid."); .expect("Valid crypto kind is not actually valid.");
#[cfg(test)] #[cfg(test)]
let (public_key, secret_key) = { let (public_key, secret_key) = vcrypto.generate_keypair().await.into_split();
let kp = vcrypto.generate_keypair().await;
(
PublicKey::new(ck, kp.key()),
SecretKey::new(ck, kp.secret()),
)
};
#[cfg(not(test))] #[cfg(not(test))]
let (public_key, secret_key) = self.setup_public_key(vcrypto, table_store).await?; let (public_key, secret_key) = self.setup_public_key(vcrypto, table_store).await?;

View file

@ -1,238 +0,0 @@
#![allow(clippy::absurd_extreme_comparisons)]
use super::*;
use crate::*;
use core::convert::TryInto;
pub const MAX_RECEIPT_SIZE: usize = 1380;
pub const MAX_EXTRA_DATA_SIZE: usize = MAX_RECEIPT_SIZE - MIN_RECEIPT_SIZE; // 1250
pub const MIN_RECEIPT_SIZE: usize = 130;
// Version number of receipt format
fourcc_type!(ReceiptVersion);
pub const RECEIPT_VERSION_VLD0: ReceiptVersion = ReceiptVersion(*b"RCP0");
// pub const RECEIPT_VERSION_VLD0_FOURCC: u32 = u32::from_be_bytes(*b"RCP0");
/// Receipt versions in order of preference, best receipt version is the first one, worst is the last one
pub const VALID_RECEIPT_VERSIONS: [ReceiptVersion; 1] = [RECEIPT_VERSION_VLD0];
static_assertions::const_assert_eq!(VALID_RECEIPT_VERSIONS.len(), VALID_ENVELOPE_VERSIONS.len());
/// Return the best receipt version we support
pub fn best_receipt_version() -> ReceiptVersion {
VALID_RECEIPT_VERSIONS[0]
}
/// Out-of-band receipts are versioned along with envelopes.
///
/// #[repr(C, packed)]
/// struct ReceiptHeader {
/// // Size is at least 4 bytes. Depending on the version specified, the size may vary and should be case to the appropriate struct
/// version: [u8; 4], // 0x00: 0x52 0x43 0x50 0x30 ("RCP0")
/// }
///
/// #[repr(C, packed)]
/// struct ReceiptV0 {
/// // Size is 66 bytes without extra data and signature, 130 with signature
/// version: [u8; 4], // 0x00: 0x52 0x43 0x50 0x30 ("RCP0")
/// crypto_kind: [u8; 4], // 0x04: CryptoSystemVersion FOURCC code
/// size: u16, // 0x08: Total size of the receipt including the extra data and the signature. Maximum size is 1380 bytes.
/// nonce: [u8; 24], // 0x0A: Randomly chosen bytes that represent a unique receipt. Could be used to encrypt the extra data, but it's not required.
/// sender_id: [u8; 32], // 0x22: Node ID of the message source, which is the public key of the sender
/// extra_data: [u8; ??], // 0x42: Extra data is appended (arbitrary extra data, not encrypted by receipt itself, maximum size is 1250 bytes)
/// signature: [u8; 64], // 0x?? (end-0x40): BareSignature of the entire receipt including header and extra data is appended to the packet
/// }
#[derive(Debug, Clone, PartialEq, Eq, Default)]
pub struct Receipt {
version: ReceiptVersion,
crypto_kind: CryptoKind,
nonce: BareNonce,
sender_id: BareNodeId,
extra_data: Vec<u8>,
}
impl Receipt {
pub fn try_new<D: AsRef<[u8]>>(
version: ReceiptVersion,
crypto_kind: CryptoKind,
nonce: BareNonce,
sender_id: BareNodeId,
extra_data: D,
) -> VeilidAPIResult<Self> {
assert!(VALID_RECEIPT_VERSIONS.contains(&version));
assert!(VALID_CRYPTO_KINDS.contains(&crypto_kind));
if extra_data.as_ref().len() > MAX_EXTRA_DATA_SIZE {
apibail_parse_error!(
"extra data too large for receipt",
extra_data.as_ref().len()
);
}
Ok(Self {
version,
crypto_kind,
nonce,
sender_id,
extra_data: Vec::from(extra_data.as_ref()),
})
}
#[instrument(level = "trace", target = "receipt", skip_all, err)]
pub fn from_signed_data(crypto: &Crypto, data: &[u8]) -> VeilidAPIResult<Receipt> {
// Ensure we are at least the length of the envelope
if data.len() < MIN_RECEIPT_SIZE {
apibail_parse_error!("receipt too small", data.len());
}
// Check version
let version: ReceiptVersion = data[0x00..0x04]
.try_into()
.map_err(VeilidAPIError::internal)?;
if !VALID_RECEIPT_VERSIONS.contains(&version) {
apibail_parse_error!("unsupported receipt version", version);
}
// Check crypto kind
let crypto_kind = CryptoKind::try_from(&data[0x04..0x08])?;
let Some(vcrypto) = crypto.get(crypto_kind) else {
apibail_parse_error!("unsupported crypto kind", crypto_kind);
};
// Get size and ensure it matches the size of the envelope and is less than the maximum message size
let size: u16 = u16::from_le_bytes(
data[0x08..0x0A]
.try_into()
.map_err(VeilidAPIError::internal)?,
);
if (size as usize) > MAX_RECEIPT_SIZE {
apibail_parse_error!("receipt size is too large", size);
}
if (size as usize) != data.len() {
apibail_parse_error!(
"size doesn't match receipt size",
format!("size={} data.len()={}", size, data.len())
);
}
// Get sender id
let sender_id = BareNodeId::new(
data[0x22..0x42]
.try_into()
.map_err(VeilidAPIError::internal)?,
);
// Get signature
let signature = BareSignature::new(
data[(data.len() - 64)..]
.try_into()
.map_err(VeilidAPIError::internal)?,
);
// Validate signature
if !vcrypto
.verify(
&sender_id.clone().into(),
&data[0..(data.len() - 64)],
&signature,
)
.map_err(VeilidAPIError::generic)?
{
apibail_parse_error!("signature failure in receipt", signature);
}
// Get nonce
let nonce: BareNonce = BareNonce::new(
data[0x0A..0x22]
.try_into()
.map_err(VeilidAPIError::internal)?,
);
// Get extra data and signature
let extra_data: Vec<u8> = Vec::from(&data[0x42..(data.len() - 64)]);
// Return receipt
Ok(Self {
version,
crypto_kind,
nonce,
sender_id,
extra_data,
})
}
#[instrument(level = "trace", target = "receipt", skip_all, err)]
pub fn to_signed_data(
&self,
crypto: &Crypto,
secret: &BareSecretKey,
) -> VeilidAPIResult<Vec<u8>> {
// Ensure extra data isn't too long
let receipt_size: usize = self.extra_data.len() + MIN_RECEIPT_SIZE;
if receipt_size > MAX_RECEIPT_SIZE {
apibail_parse_error!("receipt too large", receipt_size);
}
// Get crypto version
let vcrypto = crypto
.get(self.crypto_kind)
.expect("need to ensure only valid crypto kinds here");
let mut data: Vec<u8> = vec![0u8; receipt_size];
// Write version
data[0x00..0x04].copy_from_slice(&self.version.0);
// Write crypto kind
data[0x04..0x08].copy_from_slice(&self.crypto_kind.0);
// Write size
data[0x08..0x0A].copy_from_slice(&(receipt_size as u16).to_le_bytes());
// Write nonce
data[0x0A..0x22].copy_from_slice(&self.nonce);
// Write sender node id
data[0x22..0x42].copy_from_slice(&self.sender_id);
// Write extra data
if !self.extra_data.is_empty() {
data[0x42..(receipt_size - 64)].copy_from_slice(self.extra_data.as_slice());
}
// Sign the receipt
let signature = vcrypto
.sign(
&self.sender_id.clone().into(),
secret,
&data[0..(receipt_size - 64)],
)
.map_err(VeilidAPIError::generic)?;
// Append the signature
data[(receipt_size - 64)..].copy_from_slice(&signature);
Ok(data)
}
#[expect(dead_code)]
pub fn get_version(&self) -> ReceiptVersion {
self.version
}
#[expect(dead_code)]
pub fn get_crypto_kind(&self) -> CryptoKind {
self.crypto_kind
}
pub fn get_nonce(&self) -> BareNonce {
self.nonce.clone()
}
#[expect(dead_code)]
pub fn get_bare_sender_id(&self) -> BareNodeId {
self.sender_id.clone()
}
#[expect(dead_code)]
pub fn get_sender_id(&self) -> NodeId {
NodeId::new(self.crypto_kind, self.sender_id.clone())
}
#[must_use]
pub fn get_extra_data(&self) -> &[u8] {
&self.extra_data
}
}

View file

@ -0,0 +1,337 @@
#![allow(clippy::absurd_extreme_comparisons)]
use super::*;
use crate::routing_table::*;
use core::convert::TryInto;
// Version number of receipt format
fourcc_type!(ReceiptVersion);
// RCP0
pub const RECEIPT_VERSION_RCP0: ReceiptVersion = ReceiptVersion::new(*b"RCP0");
pub const RCP0_NONCE_LENGTH: usize = 24;
pub const RCP0_SIGNATURE_LENGTH: usize = 64;
pub const RCP0_MAX_RECEIPT_SIZE: usize = 1380;
pub const RCP0_MAX_EXTRA_DATA_SIZE: usize = RCP0_MAX_RECEIPT_SIZE - RCP0_MIN_RECEIPT_SIZE; // 1250
pub const RCP0_MIN_RECEIPT_SIZE: usize = 130;
/// Receipt versions in order of preference, best receipt version is the first one, worst is the last one
pub const VALID_RECEIPT_VERSIONS: [ReceiptVersion; 1] = [RECEIPT_VERSION_RCP0];
static_assertions::const_assert_eq!(VALID_RECEIPT_VERSIONS.len(), VALID_ENVELOPE_VERSIONS.len());
/// Return the best receipt version we support
pub fn best_receipt_version() -> ReceiptVersion {
VALID_RECEIPT_VERSIONS[0]
}
/// Out-of-band receipts are versioned along with envelopes.
///
/// #[repr(C, packed)]
/// struct ReceiptHeader {
/// // Size is at least 4 bytes. Depending on the version specified, the size may vary and should be case to the appropriate struct
/// version: [u8; 4], // 0x00: 0x52 0x43 0x50 0x30 ("RCP0")
/// }
///
/// #[repr(C, packed)]
/// struct ReceiptRCP0 {
/// // Size is 66 bytes without extra data and signature, 130 with signature
/// version: [u8; 4], // 0x00: 0x52 0x43 0x50 0x30 ("RCP0")
/// crypto_kind: [u8; 4], // 0x04: CryptoSystemVersion FOURCC code
/// size: u16, // 0x08: Total size of the receipt including the extra data and the signature. Maximum size is 1380 bytes.
/// nonce: [u8; 24], // 0x0A: Randomly chosen bytes that represent a unique receipt. Could be used to encrypt the extra data, but it's not required.
/// sender_id: [u8; 32], // 0x22: Node ID of the message source, which is the public key of the sender
/// extra_data: [u8; ??], // 0x42: Extra data is appended (arbitrary extra data, not encrypted by receipt itself, maximum size is 1250 bytes)
/// signature: [u8; 64], // 0x?? (end-0x40): BareSignature of the entire receipt including header and extra data is appended to the packet
/// }
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Receipt {
RCP0 { rcp0: ReceiptRCP0 },
}
impl Receipt {
#[instrument(level = "trace", target = "envelope", skip_all)]
pub fn try_new_rcp0<D: AsRef<[u8]>>(
crypto: &Crypto,
crypto_kind: CryptoKind,
nonce: Nonce,
sender_id: NodeId,
extra_data: D,
) -> VeilidAPIResult<Self> {
Ok(Self::RCP0 {
rcp0: ReceiptRCP0::try_new(crypto, crypto_kind, nonce, sender_id, extra_data)?,
})
}
#[instrument(level = "trace", target = "receipt", skip_all, err)]
pub fn try_from_signed_data(crypto: &Crypto, data: &[u8]) -> VeilidAPIResult<Receipt> {
// Ensure we are at least the length of the envelope
if data.len() < 4 {
apibail_parse_error!("receipt header too small", data.len());
}
// Check version
let version: ReceiptVersion = data[0x00..0x04]
.try_into()
.map_err(VeilidAPIError::internal)?;
match version {
RECEIPT_VERSION_RCP0 => Ok(Self::RCP0 {
rcp0: ReceiptRCP0::try_from_signed_data(crypto, data)?,
}),
_ => {
apibail_parse_error!("unsupported receipt version", version);
}
}
}
#[instrument(level = "trace", target = "envelope", skip_all)]
pub fn to_signed_data(
&self,
crypto: &Crypto,
secret_key: &SecretKey,
) -> VeilidAPIResult<Vec<u8>> {
match self {
Receipt::RCP0 { rcp0 } => rcp0.to_signed_data(crypto, secret_key),
}
}
#[expect(dead_code)]
pub fn get_version(&self) -> ReceiptVersion {
match self {
Receipt::RCP0 { rcp0: _ } => RECEIPT_VERSION_RCP0,
}
}
#[expect(dead_code)]
pub fn get_crypto_kind(&self) -> CryptoKind {
match self {
Receipt::RCP0 { rcp0 } => rcp0.get_crypto_kind(),
}
}
pub fn get_nonce(&self) -> Nonce {
match self {
Receipt::RCP0 { rcp0 } => rcp0.get_nonce(),
}
}
#[expect(dead_code)]
pub fn get_sender_id(&self) -> NodeId {
match self {
Receipt::RCP0 { rcp0 } => rcp0.get_sender_id(),
}
}
pub fn get_extra_data(&self) -> &[u8] {
match self {
Receipt::RCP0 { rcp0 } => rcp0.get_extra_data(),
}
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct ReceiptRCP0 {
crypto_kind: CryptoKind,
nonce: Nonce,
bare_sender_id: BareNodeId,
extra_data: Vec<u8>,
}
impl ReceiptRCP0 {
pub fn try_new<D: AsRef<[u8]>>(
crypto: &Crypto,
crypto_kind: CryptoKind,
nonce: Nonce,
sender_id: NodeId,
extra_data: D,
) -> VeilidAPIResult<Self> {
let vcrypto = Self::validate_crypto_kind(crypto, crypto_kind)?;
vcrypto.check_nonce(&nonce)?;
Self::check_node_id(crypto_kind, &sender_id)?;
if extra_data.as_ref().len() > RCP0_MAX_EXTRA_DATA_SIZE {
apibail_parse_error!(
"extra data too large for receipt",
extra_data.as_ref().len()
);
}
Ok(Self {
crypto_kind,
nonce,
bare_sender_id: sender_id.value(),
extra_data: Vec::from(extra_data.as_ref()),
})
}
#[instrument(level = "trace", target = "receipt", skip_all, err)]
pub fn try_from_signed_data(crypto: &Crypto, data: &[u8]) -> VeilidAPIResult<Self> {
// Ensure we are at least the length of the envelope
if data.len() < RCP0_MIN_RECEIPT_SIZE {
apibail_parse_error!("receipt too small", data.len());
}
// Check crypto kind
let crypto_kind = CryptoKind::try_from(&data[0x04..0x08])?;
let vcrypto = Self::validate_crypto_kind(crypto, crypto_kind)?;
// Get size and ensure it matches the size of the envelope and is less than the maximum message size
let size: u16 = u16::from_le_bytes(
data[0x08..0x0A]
.try_into()
.map_err(VeilidAPIError::internal)?,
);
if (size as usize) > RCP0_MAX_RECEIPT_SIZE {
apibail_parse_error!("receipt size is too large", size);
}
if (size as usize) != data.len() {
apibail_parse_error!(
"size doesn't match receipt size",
format!("size={} data.len()={}", size, data.len())
);
}
// Get sender id
let bare_sender_id = BareNodeId::new(
data[0x22..0x42]
.try_into()
.map_err(VeilidAPIError::internal)?,
);
let sender_public_key = PublicKey::new(crypto_kind, BarePublicKey::new(&bare_sender_id));
// Get signature
let bare_signature = BareSignature::new(
data[(data.len() - 64)..]
.try_into()
.map_err(VeilidAPIError::internal)?,
);
let signature = Signature::new(crypto_kind, bare_signature);
// Validate signature
if !vcrypto
.verify(&sender_public_key, &data[0..(data.len() - 64)], &signature)
.map_err(VeilidAPIError::generic)?
{
apibail_parse_error!("signature failure in receipt", signature);
}
// Get nonce
let nonce: Nonce = Nonce::new(
data[0x0A..0x22]
.try_into()
.map_err(VeilidAPIError::internal)?,
);
// Get extra data and signature
let extra_data: Vec<u8> = Vec::from(&data[0x42..(data.len() - 64)]);
// Return receipt
Ok(Self {
crypto_kind,
nonce,
bare_sender_id,
extra_data,
})
}
#[instrument(level = "trace", target = "receipt", skip_all, err)]
pub fn to_signed_data(
&self,
crypto: &Crypto,
secret_key: &SecretKey,
) -> VeilidAPIResult<Vec<u8>> {
let vcrypto = crypto
.get(self.crypto_kind)
.expect("need to ensure only valid crypto kinds here");
vcrypto.check_secret_key(secret_key)?;
// Ensure extra data isn't too long
let receipt_size: usize = self.extra_data.len() + RCP0_MIN_RECEIPT_SIZE;
if receipt_size > RCP0_MAX_RECEIPT_SIZE {
apibail_parse_error!("receipt too large", receipt_size);
}
let mut data: Vec<u8> = vec![0u8; receipt_size];
// Write version
data[0x00..0x04].copy_from_slice(&RECEIPT_VERSION_RCP0.0);
// Write crypto kind
data[0x04..0x08].copy_from_slice(self.crypto_kind.bytes());
// Write size
data[0x08..0x0A].copy_from_slice(&(receipt_size as u16).to_le_bytes());
// Write nonce
data[0x0A..0x22].copy_from_slice(&self.nonce);
// Write sender node id
data[0x22..0x42].copy_from_slice(&self.bare_sender_id);
// Write extra data
if !self.extra_data.is_empty() {
data[0x42..(receipt_size - RCP0_SIGNATURE_LENGTH)]
.copy_from_slice(self.extra_data.as_slice());
}
// Sign the receipt
let sender_public_key =
PublicKey::new(self.crypto_kind, BarePublicKey::new(&self.bare_sender_id));
let signature = vcrypto
.sign(
&sender_public_key,
secret_key,
&data[0..(receipt_size - RCP0_SIGNATURE_LENGTH)],
)
.map_err(VeilidAPIError::generic)?;
// Append the signature
data[(receipt_size - 64)..].copy_from_slice(signature.ref_value());
Ok(data)
}
pub fn get_crypto_kind(&self) -> CryptoKind {
self.crypto_kind
}
pub fn get_nonce(&self) -> Nonce {
self.nonce.clone()
}
pub fn get_sender_id(&self) -> NodeId {
NodeId::new(self.crypto_kind, self.bare_sender_id.clone())
}
pub fn get_extra_data(&self) -> &[u8] {
&self.extra_data
}
//////////////////////////////////////////////////////////////////
fn validate_crypto_kind(
crypto: &Crypto,
crypto_kind: CryptoKind,
) -> VeilidAPIResult<CryptoSystemGuard<'_>> {
let vcrypto = crypto
.get(crypto_kind)
.ok_or_else(|| VeilidAPIError::parse_error("unsupported crypto kind", crypto_kind))?;
// Verify crypto kind can be used with this envelope
if vcrypto.nonce_length() != RCP0_NONCE_LENGTH
|| vcrypto.hash_digest_length() != HASH_COORDINATE_LENGTH
|| vcrypto.public_key_length() != HASH_COORDINATE_LENGTH
{
apibail_generic!("unsupported crypto kind for this envelope type");
}
Ok(vcrypto)
}
fn check_node_id(crypto_kind: CryptoKind, node_id: &NodeId) -> VeilidAPIResult<()> {
if node_id.kind() != crypto_kind {
apibail_parse_error!("invalid crypto kind for RCP0", node_id.kind());
}
if node_id.ref_value().len() != HASH_COORDINATE_LENGTH {
apibail_parse_error!("invalid node_id length for RCP0", node_id.ref_value().len());
}
Ok(())
}
}

View file

@ -132,6 +132,13 @@ pub fn fix_fake_public_key() -> PublicKey {
) )
} }
pub fn fix_fake_secret_key() -> SecretKey {
SecretKey::new(
CryptoKind::from_str("FAKE").unwrap(),
fix_fake_bare_secret_key(),
)
}
pub fn fix_fake_bare_secret_key() -> BareSecretKey { pub fn fix_fake_bare_secret_key() -> BareSecretKey {
let mut fake_key = [0u8; VLD0_SECRET_KEY_LENGTH]; let mut fake_key = [0u8; VLD0_SECRET_KEY_LENGTH];
random_bytes(&mut fake_key); random_bytes(&mut fake_key);

View file

@ -182,9 +182,15 @@ pub async fn test_no_auth(vcrypto: &AsyncCryptoSystemGuard<'_>) {
pub async fn test_dh(vcrypto: &AsyncCryptoSystemGuard<'_>) { pub async fn test_dh(vcrypto: &AsyncCryptoSystemGuard<'_>) {
trace!("test_dh"); trace!("test_dh");
let (dht_key, dht_key_secret) = vcrypto.generate_keypair().await.into_split(); let (dht_key, dht_key_secret) = vcrypto.generate_keypair().await.into_split();
assert!(vcrypto.validate_keypair(&dht_key, &dht_key_secret).await); assert!(vcrypto
.validate_keypair(&dht_key, &dht_key_secret)
.await
.expect("should succeed"));
let (dht_key2, dht_key_secret2) = vcrypto.generate_keypair().await.into_split(); let (dht_key2, dht_key_secret2) = vcrypto.generate_keypair().await.into_split();
assert!(vcrypto.validate_keypair(&dht_key2, &dht_key_secret2).await); assert!(vcrypto
.validate_keypair(&dht_key2, &dht_key_secret2)
.await
.expect("should succeed"));
let r1 = vcrypto let r1 = vcrypto
.compute_dh(&dht_key, &dht_key_secret2) .compute_dh(&dht_key, &dht_key_secret2)

View file

@ -18,16 +18,25 @@ pub async fn test_envelope_round_trip(
// Create envelope // Create envelope
let ts = Timestamp::from(0x12345678ABCDEF69u64); let ts = Timestamp::from(0x12345678ABCDEF69u64);
let nonce = vcrypto.random_nonce().await; let nonce = vcrypto.random_nonce().await;
let (sender_id, sender_secret) = vcrypto.generate_keypair().await.into_split(); let (sender_public_key, sender_secret) = vcrypto.generate_keypair().await.into_split();
let (recipient_id, recipient_secret) = vcrypto.generate_keypair().await.into_split(); let sender_id = crypto
let envelope = Envelope::new( .routing_table()
envelope_version, .generate_node_id(&sender_public_key)
vcrypto.kind(), .expect("should generate node id");
ts, let (recipient_public_key, recipient_secret) = vcrypto.generate_keypair().await.into_split();
nonce, let recipient_id = crypto
sender_id.into(), .routing_table()
recipient_id.into(), .generate_node_id(&recipient_public_key)
); .expect("should generate node id");
let envelope = match envelope_version {
ENVELOPE_VERSION_ENV0 => {
Envelope::try_new_env0(&crypto, vcrypto.kind(), ts, nonce, sender_id, recipient_id)
.expect("should create envelope")
}
_ => {
panic!("unsupported envelope version");
}
};
// Create arbitrary body // Create arbitrary body
let body = b"This is an arbitrary body"; let body = b"This is an arbitrary body";
@ -38,7 +47,7 @@ pub async fn test_envelope_round_trip(
.expect("failed to encrypt data"); .expect("failed to encrypt data");
// Deserialize from bytes // Deserialize from bytes
let envelope2 = Envelope::from_signed_data(&crypto, &enc_data, &network_key) let envelope2 = Envelope::try_from_signed_data(&crypto, &enc_data, &network_key)
.expect("failed to deserialize envelope from data"); .expect("failed to deserialize envelope from data");
let body2 = envelope2 let body2 = envelope2
@ -54,13 +63,13 @@ pub async fn test_envelope_round_trip(
let mut mod_enc_data = enc_data.clone(); let mut mod_enc_data = enc_data.clone();
mod_enc_data[enc_data_len - 1] ^= 0x80u8; mod_enc_data[enc_data_len - 1] ^= 0x80u8;
assert!( assert!(
Envelope::from_signed_data(&crypto, &mod_enc_data, &network_key).is_err(), Envelope::try_from_signed_data(&crypto, &mod_enc_data, &network_key).is_err(),
"should have failed to decode envelope with modified signature" "should have failed to decode envelope with modified signature"
); );
let mut mod_enc_data2 = enc_data.clone(); let mut mod_enc_data2 = enc_data.clone();
mod_enc_data2[enc_data_len - 65] ^= 0x80u8; mod_enc_data2[enc_data_len - 65] ^= 0x80u8;
assert!( assert!(
Envelope::from_signed_data(&crypto, &mod_enc_data2, &network_key).is_err(), Envelope::try_from_signed_data(&crypto, &mod_enc_data2, &network_key).is_err(),
"should have failed to decode envelope with modified data" "should have failed to decode envelope with modified data"
); );
} }
@ -76,15 +85,20 @@ pub async fn test_receipt_round_trip(
// Create receipt // Create receipt
let nonce = vcrypto.random_nonce().await; let nonce = vcrypto.random_nonce().await;
let (sender_id, sender_secret) = vcrypto.generate_keypair().await.into_split(); let (sender_public_key, sender_secret) = vcrypto.generate_keypair().await.into_split();
let receipt = Receipt::try_new( let sender_id = crypto
receipt_version, .routing_table()
vcrypto.kind(), .generate_node_id(&sender_public_key)
nonce, .expect("should generate node id");
sender_id.into(), let receipt = match receipt_version {
body, RECEIPT_VERSION_RCP0 => {
) Receipt::try_new_rcp0(&crypto, vcrypto.kind(), nonce, sender_id, body)
.expect("should not fail"); .expect("should not fail")
}
_ => {
panic!("unsupported receipt version");
}
};
// Serialize to bytes // Serialize to bytes
let mut enc_data = receipt let mut enc_data = receipt
@ -92,12 +106,12 @@ pub async fn test_receipt_round_trip(
.expect("failed to make signed data"); .expect("failed to make signed data");
// Deserialize from bytes // Deserialize from bytes
let receipt2 = Receipt::from_signed_data(&crypto, &enc_data) let receipt2 = Receipt::try_from_signed_data(&crypto, &enc_data)
.expect("failed to deserialize envelope from data"); .expect("failed to deserialize envelope from data");
// Should not validate even when a single bit is changed // Should not validate even when a single bit is changed
enc_data[5] = 0x01; enc_data[5] = 0x01;
let _ = Receipt::from_signed_data(&crypto, &enc_data) let _ = Receipt::try_from_signed_data(&crypto, &enc_data)
.expect_err("should have failed to decrypt using wrong secret"); .expect_err("should have failed to decrypt using wrong secret");
// Compare receipts // Compare receipts
@ -114,8 +128,12 @@ pub async fn test_all() {
let vcrypto = crypto.get_async(v).unwrap(); let vcrypto = crypto.get_async(v).unwrap();
test_envelope_round_trip(ev, &vcrypto, None).await; test_envelope_round_trip(ev, &vcrypto, None).await;
test_envelope_round_trip(ev, &vcrypto, Some(vcrypto.random_shared_secret().await)) test_envelope_round_trip(
.await; ev,
&vcrypto,
Some(vcrypto.random_shared_secret().await.into_value()),
)
.await;
} }
} }

View file

@ -3,162 +3,188 @@ use crate::crypto::tests::fixtures::*;
pub async fn test_generate_secret(vcrypto: &AsyncCryptoSystemGuard<'_>) { pub async fn test_generate_secret(vcrypto: &AsyncCryptoSystemGuard<'_>) {
// Verify keys generate // Verify keys generate
let (dht_key, dht_key_secret) = vcrypto.generate_keypair().await.into_split(); let (public_key, secret_key) = vcrypto.generate_keypair().await.into_split();
let (dht_key2, dht_key_secret2) = vcrypto.generate_keypair().await.into_split(); let (public_key2, secret_key2) = vcrypto.generate_keypair().await.into_split();
// Verify byte patterns are different between public and secret // Verify byte patterns are different between public and secret
assert_ne!(dht_key.bytes(), dht_key_secret.bytes()); assert_ne!(
assert_ne!(dht_key2.bytes(), dht_key_secret2.bytes()); public_key.ref_value().bytes(),
secret_key.ref_value().bytes()
);
assert_ne!(
public_key2.ref_value().bytes(),
secret_key2.ref_value().bytes()
);
// Verify the keys and secrets are different across keypairs // Verify the keys and secrets are different across keypairs
assert_ne!(dht_key, dht_key2); assert_ne!(public_key, public_key2);
assert_ne!(dht_key_secret, dht_key_secret2); assert_ne!(secret_key, secret_key2);
} }
pub async fn test_sign_and_verify(vcrypto: &AsyncCryptoSystemGuard<'_>) { pub async fn test_sign_and_verify(vcrypto: &AsyncCryptoSystemGuard<'_>) {
// Make two keys // Make two keys
let (dht_key, dht_key_secret) = vcrypto.generate_keypair().await.into_split(); let (public_key, secret_key) = vcrypto.generate_keypair().await.into_split();
let (dht_key2, dht_key_secret2) = vcrypto.generate_keypair().await.into_split(); let (public_key2, secret_key2) = vcrypto.generate_keypair().await.into_split();
// Sign the same message twice // Sign the same message twice
let dht_sig = vcrypto let sig = vcrypto
.sign(&dht_key, &dht_key_secret, LOREM_IPSUM) .sign(&public_key, &secret_key, LOREM_IPSUM)
.await .await
.unwrap(); .unwrap();
trace!("dht_sig: {:?}", dht_sig); trace!("sig: {:?}", sig);
let dht_sig_b = vcrypto let sig_b = vcrypto
.sign(&dht_key, &dht_key_secret, LOREM_IPSUM) .sign(&public_key, &secret_key, LOREM_IPSUM)
.await .await
.unwrap(); .unwrap();
// Sign a second message // Sign a second message
let dht_sig_c = vcrypto let sig_c = vcrypto
.sign(&dht_key, &dht_key_secret, CHEEZBURGER) .sign(&public_key, &secret_key, CHEEZBURGER)
.await .await
.unwrap(); .unwrap();
trace!("dht_sig_c: {:?}", dht_sig_c); trace!("sig_c: {:?}", sig_c);
// Verify they are the same signature // Verify they are the same signature
assert_eq!(dht_sig, dht_sig_b); assert_eq!(sig, sig_b);
// Sign the same message with a different key // Sign the same message with a different key
let dht_sig2 = vcrypto let sig2 = vcrypto
.sign(&dht_key2, &dht_key_secret2, LOREM_IPSUM) .sign(&public_key2, &secret_key2, LOREM_IPSUM)
.await .await
.unwrap(); .unwrap();
// Verify a different key gives a different signature // Verify a different key gives a different signature
assert_ne!(dht_sig2, dht_sig_b); assert_ne!(sig2, sig_b);
// Try using the wrong secret to sign // Try using the wrong secret to sign
let a1 = vcrypto let a1 = vcrypto
.sign(&dht_key, &dht_key_secret, LOREM_IPSUM) .sign(&public_key, &secret_key, LOREM_IPSUM)
.await .await
.unwrap(); .unwrap();
let a2 = vcrypto let a2 = vcrypto
.sign(&dht_key2, &dht_key_secret2, LOREM_IPSUM) .sign(&public_key2, &secret_key2, LOREM_IPSUM)
.await .await
.unwrap(); .unwrap();
let _b1 = vcrypto let _b1 = vcrypto
.sign(&dht_key, &dht_key_secret2, LOREM_IPSUM) .sign(&public_key, &secret_key2, LOREM_IPSUM)
.await .await
.unwrap_err(); .unwrap_err();
let _b2 = vcrypto let _b2 = vcrypto
.sign(&dht_key2, &dht_key_secret, LOREM_IPSUM) .sign(&public_key2, &secret_key, LOREM_IPSUM)
.await .await
.unwrap_err(); .unwrap_err();
assert_ne!(a1, a2); assert_ne!(a1, a2);
assert_eq!(vcrypto.verify(&dht_key, LOREM_IPSUM, &a1).await, Ok(true)); assert_eq!(
assert_eq!(vcrypto.verify(&dht_key2, LOREM_IPSUM, &a2).await, Ok(true)); vcrypto.verify(&public_key, LOREM_IPSUM, &a1).await,
assert_eq!(vcrypto.verify(&dht_key, LOREM_IPSUM, &a2).await, Ok(false)); Ok(true)
assert_eq!(vcrypto.verify(&dht_key2, LOREM_IPSUM, &a1).await, Ok(false)); );
assert_eq!(
vcrypto.verify(&public_key2, LOREM_IPSUM, &a2).await,
Ok(true)
);
assert_eq!(
vcrypto.verify(&public_key, LOREM_IPSUM, &a2).await,
Ok(false)
);
assert_eq!(
vcrypto.verify(&public_key2, LOREM_IPSUM, &a1).await,
Ok(false)
);
// Try verifications that should work // Try verifications that should work
assert_eq!( assert_eq!(
vcrypto.verify(&dht_key, LOREM_IPSUM, &dht_sig).await, vcrypto.verify(&public_key, LOREM_IPSUM, &sig).await,
Ok(true) Ok(true)
); );
assert_eq!( assert_eq!(
vcrypto.verify(&dht_key, LOREM_IPSUM, &dht_sig_b).await, vcrypto.verify(&public_key, LOREM_IPSUM, &sig_b).await,
Ok(true) Ok(true)
); );
assert_eq!( assert_eq!(
vcrypto.verify(&dht_key2, LOREM_IPSUM, &dht_sig2).await, vcrypto.verify(&public_key2, LOREM_IPSUM, &sig2).await,
Ok(true) Ok(true)
); );
assert_eq!( assert_eq!(
vcrypto.verify(&dht_key, CHEEZBURGER, &dht_sig_c).await, vcrypto.verify(&public_key, CHEEZBURGER, &sig_c).await,
Ok(true) Ok(true)
); );
// Try verifications that shouldn't work // Try verifications that shouldn't work
assert_eq!( assert_eq!(
vcrypto.verify(&dht_key2, LOREM_IPSUM, &dht_sig).await, vcrypto.verify(&public_key2, LOREM_IPSUM, &sig).await,
Ok(false) Ok(false)
); );
assert_eq!( assert_eq!(
vcrypto.verify(&dht_key, LOREM_IPSUM, &dht_sig2).await, vcrypto.verify(&public_key, LOREM_IPSUM, &sig2).await,
Ok(false) Ok(false)
); );
assert_eq!( assert_eq!(
vcrypto.verify(&dht_key2, CHEEZBURGER, &dht_sig_c).await, vcrypto.verify(&public_key2, CHEEZBURGER, &sig_c).await,
Ok(false) Ok(false)
); );
assert_eq!( assert_eq!(
vcrypto.verify(&dht_key, CHEEZBURGER, &dht_sig).await, vcrypto.verify(&public_key, CHEEZBURGER, &sig).await,
Ok(false) Ok(false)
); );
} }
pub async fn test_key_conversions(vcrypto: &AsyncCryptoSystemGuard<'_>) { pub async fn test_key_conversions(vcrypto: &AsyncCryptoSystemGuard<'_>) {
// Test default key // Test default key
let (dht_key, dht_key_secret) = (BarePublicKey::default(), BareSecretKey::default()); let (public_key, secret_key) = (
assert!(dht_key.bytes().is_empty()); PublicKey::new(
assert!(dht_key_secret.bytes().is_empty()); vcrypto.kind(),
let dht_key_string = String::from(&dht_key); BarePublicKey::new(&vec![0u8; vcrypto.public_key_length()]),
trace!("dht_key_string: {:?}", dht_key_string); ),
let dht_key_string2 = String::from(&dht_key); SecretKey::new(
trace!("dht_key_string2: {:?}", dht_key_string2); vcrypto.kind(),
assert_eq!(dht_key_string, dht_key_string2); BareSecretKey::new(&vec![0u8; vcrypto.secret_key_length()]),
),
);
let public_key_string = public_key.to_string();
trace!("public_key_string: {:?}", public_key_string);
let public_key_string2 = public_key.to_string();
trace!("public_key_string2: {:?}", public_key_string2);
assert_eq!(public_key_string, public_key_string2);
let dht_key_secret_string = String::from(&dht_key_secret); let secret_key_string = secret_key.to_string();
trace!("dht_key_secret_string: {:?}", dht_key_secret_string); trace!("secret_key_string: {:?}", secret_key_string);
assert_eq!(dht_key_secret_string, dht_key_string); assert_eq!(secret_key_string, public_key_string);
// Make different keys // Make different keys
let (dht_key2, dht_key_secret2) = vcrypto.generate_keypair().await.into_split(); let (public_key2, secret_key2) = vcrypto.generate_keypair().await.into_split();
trace!("dht_key2: {:?}", dht_key2); trace!("public_key2: {:?}", public_key2);
trace!("dht_key_secret2: {:?}", dht_key_secret2); trace!("secret_key2: {:?}", secret_key2);
let (dht_key3, _dht_key_secret3) = vcrypto.generate_keypair().await.into_split(); let (public_key3, secret_key3) = vcrypto.generate_keypair().await.into_split();
trace!("dht_key3: {:?}", dht_key3); trace!("public_key3: {:?}", public_key3);
trace!("_dht_key_secret3: {:?}", _dht_key_secret3); trace!("secret_key3: {:?}", secret_key3);
let dht_key2_string = String::from(&dht_key2); let public_key2_string = public_key2.to_string();
let dht_key2_string2 = String::from(&dht_key2); let public_key2_string2 = public_key2.to_string();
let dht_key3_string = String::from(&dht_key3); let public_key3_string = public_key3.to_string();
assert_eq!(dht_key2_string, dht_key2_string2); assert_eq!(public_key2_string, public_key2_string2);
assert_ne!(dht_key3_string, dht_key2_string); assert_ne!(public_key3_string, public_key2_string);
let dht_key_secret2_string = String::from(&dht_key_secret2); let secret_key2_string = secret_key2.to_string();
assert_ne!(dht_key_secret2_string, dht_key_secret_string); assert_ne!(secret_key2_string, secret_key_string);
assert_ne!(dht_key_secret2_string, dht_key2_string); assert_ne!(secret_key2_string, public_key2_string);
// Assert they convert back correctly // Assert they convert back correctly
let dht_key_back = BarePublicKey::try_from(dht_key_string.as_str()).unwrap(); let public_key_back = PublicKey::try_from(public_key_string.as_str()).unwrap();
let dht_key_back2 = BarePublicKey::try_from(dht_key_string2.as_str()).unwrap(); let public_key_back2 = PublicKey::try_from(public_key_string2.as_str()).unwrap();
assert_eq!(dht_key_back, dht_key_back2); assert_eq!(public_key_back, public_key_back2);
assert_eq!(dht_key_back, dht_key); assert_eq!(public_key_back, public_key);
assert_eq!(dht_key_back2, dht_key); assert_eq!(public_key_back2, public_key);
let dht_key_secret_back = BareSecretKey::try_from(dht_key_secret_string.as_str()).unwrap(); let secret_key_back = SecretKey::try_from(secret_key_string.as_str()).unwrap();
assert_eq!(dht_key_secret_back, dht_key_secret); assert_eq!(secret_key_back, secret_key);
let dht_key2_back = BarePublicKey::try_from(dht_key2_string.as_str()).unwrap(); let public_key2_back = PublicKey::try_from(public_key2_string.as_str()).unwrap();
let dht_key2_back2 = BarePublicKey::try_from(dht_key2_string2.as_str()).unwrap(); let public_key2_back2 = PublicKey::try_from(public_key2_string2.as_str()).unwrap();
assert_eq!(dht_key2_back, dht_key2_back2); assert_eq!(public_key2_back, public_key2_back2);
assert_eq!(dht_key2_back, dht_key2); assert_eq!(public_key2_back, public_key2);
assert_eq!(dht_key2_back2, dht_key2); assert_eq!(public_key2_back2, public_key2);
let dht_key_secret2_back = BareSecretKey::try_from(dht_key_secret2_string.as_str()).unwrap(); let secret_key2_back = SecretKey::try_from(secret_key2_string.as_str()).unwrap();
assert_eq!(dht_key_secret2_back, dht_key_secret2); assert_eq!(secret_key2_back, secret_key2);
// Assert string roundtrip // Assert string roundtrip
assert_eq!(String::from(&dht_key2_back), dht_key2_string); assert_eq!(secret_key2_back.to_string(), secret_key2_string);
// These conversions should fail // These conversions should fail
assert!(BarePublicKey::try_from("whatever!").is_err()); assert!(BarePublicKey::try_from("whatever!").is_err());
assert!(BareSecretKey::try_from("whatever!").is_err()); assert!(BareSecretKey::try_from("whatever!").is_err());
@ -167,41 +193,42 @@ pub async fn test_key_conversions(vcrypto: &AsyncCryptoSystemGuard<'_>) {
} }
pub async fn test_encode_decode(vcrypto: &AsyncCryptoSystemGuard<'_>) { pub async fn test_encode_decode(vcrypto: &AsyncCryptoSystemGuard<'_>) {
let dht_key = BarePublicKey::try_decode("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA").unwrap(); let public_key =
let dht_key_secret = BarePublicKey::try_decode("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA").unwrap();
let secret_key =
BareSecretKey::try_decode("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA").unwrap(); BareSecretKey::try_decode("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA").unwrap();
let dht_key_b = BarePublicKey::new(&EMPTY_KEY); let public_key_b = BarePublicKey::new(&EMPTY_KEY);
let dht_key_secret_b = BareSecretKey::new(&EMPTY_KEY_SECRET); let secret_key_b = BareSecretKey::new(&EMPTY_KEY_SECRET);
assert_eq!(dht_key, dht_key_b); assert_eq!(public_key, public_key_b);
assert_eq!(dht_key_secret, dht_key_secret_b); assert_eq!(secret_key, secret_key_b);
let (dht_key2, dht_key_secret2) = vcrypto.generate_keypair().await.into_split(); let (public_key2, secret_key2) = vcrypto.generate_keypair().await.value().into_split();
let e1 = dht_key.encode(); let e1 = public_key.encode();
trace!("e1: {:?}", e1); trace!("e1: {:?}", e1);
assert_eq!(e1, "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA".to_owned()); assert_eq!(e1, "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA".to_owned());
let e1s = dht_key_secret.encode(); let e1s = secret_key.encode();
trace!("e1s: {:?}", e1s); trace!("e1s: {:?}", e1s);
let e2 = dht_key2.encode(); let e2 = public_key2.encode();
trace!("e2: {:?}", e2); trace!("e2: {:?}", e2);
let e2s = dht_key_secret2.encode(); let e2s = secret_key2.encode();
trace!("e2s: {:?}", e2s); trace!("e2s: {:?}", e2s);
let d1 = BarePublicKey::try_decode(e1.as_str()).unwrap(); let d1 = BarePublicKey::try_decode(e1.as_str()).unwrap();
trace!("d1: {:?}", d1); trace!("d1: {:?}", d1);
assert_eq!(dht_key, d1); assert_eq!(public_key, d1);
let d1s = BareSecretKey::try_decode(e1s.as_str()).unwrap(); let d1s = BareSecretKey::try_decode(e1s.as_str()).unwrap();
trace!("d1s: {:?}", d1s); trace!("d1s: {:?}", d1s);
assert_eq!(dht_key_secret, d1s); assert_eq!(secret_key, d1s);
let d2 = BarePublicKey::try_decode(e2.as_str()).unwrap(); let d2 = BarePublicKey::try_decode(e2.as_str()).unwrap();
trace!("d2: {:?}", d2); trace!("d2: {:?}", d2);
assert_eq!(dht_key2, d2); assert_eq!(public_key2, d2);
let d2s = BareSecretKey::try_decode(e2s.as_str()).unwrap(); let d2s = BareSecretKey::try_decode(e2s.as_str()).unwrap();
trace!("d2s: {:?}", d2s); trace!("d2s: {:?}", d2s);
assert_eq!(dht_key_secret2, d2s); assert_eq!(secret_key2, d2s);
// Failures // Failures
let f1 = BareSecretKey::try_decode("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA!"); let f1 = BareSecretKey::try_decode("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA!");
@ -216,7 +243,7 @@ pub fn test_typed_convert(vcrypto: &AsyncCryptoSystemGuard<'_>) {
vcrypto.kind() vcrypto.kind()
); );
let tk1 = PublicKey::from_str(&tks1).expect("failed"); let tk1 = PublicKey::from_str(&tks1).expect("failed");
assert!(vcrypto.check_public_key(tk1.ref_value()).is_ok()); assert!(vcrypto.check_public_key(&tk1).is_ok());
let tks1x = tk1.to_string(); let tks1x = tk1.to_string();
assert_eq!(tks1, tks1x); assert_eq!(tks1, tks1x);
@ -231,7 +258,7 @@ pub fn test_typed_convert(vcrypto: &AsyncCryptoSystemGuard<'_>) {
vcrypto.kind() vcrypto.kind()
); );
let tk3 = PublicKey::from_str(&tks3).expect("failed"); let tk3 = PublicKey::from_str(&tks3).expect("failed");
assert!(vcrypto.check_public_key(tk3.ref_value()).is_err()); assert!(vcrypto.check_public_key(&tk3).is_err());
let tks4 = "XXXX:7lxDEabK_qgjbe38RtBa3IZLrud84P6NhGP-pRTZzdQ".to_string(); let tks4 = "XXXX:7lxDEabK_qgjbe38RtBa3IZLrud84P6NhGP-pRTZzdQ".to_string();
let tk4 = PublicKey::from_str(&tks4).expect("failed"); let tk4 = PublicKey::from_str(&tks4).expect("failed");
@ -258,7 +285,7 @@ pub fn test_typed_convert(vcrypto: &AsyncCryptoSystemGuard<'_>) {
} }
async fn test_hash(vcrypto: &AsyncCryptoSystemGuard<'_>) { async fn test_hash(vcrypto: &AsyncCryptoSystemGuard<'_>) {
let mut s = BTreeSet::<BareHashDigest>::new(); let mut s = BTreeSet::<HashDigest>::new();
let k1 = vcrypto.generate_hash("abc".as_bytes()).await; let k1 = vcrypto.generate_hash("abc".as_bytes()).await;
let k2 = vcrypto.generate_hash("abcd".as_bytes()).await; let k2 = vcrypto.generate_hash("abcd".as_bytes()).await;
@ -289,24 +316,43 @@ async fn test_hash(vcrypto: &AsyncCryptoSystemGuard<'_>) {
assert_eq!(k5, v5); assert_eq!(k5, v5);
assert_eq!(k6, v6); assert_eq!(k6, v6);
vcrypto.validate_hash("abc".as_bytes(), &v1).await; vcrypto
vcrypto.validate_hash("abcd".as_bytes(), &v2).await; .validate_hash("abc".as_bytes(), &v1)
vcrypto.validate_hash("".as_bytes(), &v3).await; .await
vcrypto.validate_hash(" ".as_bytes(), &v4).await; .expect("should succeed");
vcrypto.validate_hash(LOREM_IPSUM, &v5).await; vcrypto
vcrypto.validate_hash(CHEEZBURGER, &v6).await; .validate_hash("abcd".as_bytes(), &v2)
.await
.expect("should succeed");
vcrypto
.validate_hash("".as_bytes(), &v3)
.await
.expect("should succeed");
vcrypto
.validate_hash(" ".as_bytes(), &v4)
.await
.expect("should succeed");
vcrypto
.validate_hash(LOREM_IPSUM, &v5)
.await
.expect("should succeed");
vcrypto
.validate_hash(CHEEZBURGER, &v6)
.await
.expect("should succeed");
} }
async fn test_operations(vcrypto: &AsyncCryptoSystemGuard<'_>) { async fn test_operations(vcrypto: &AsyncCryptoSystemGuard<'_>) {
// xxx we should make this fixed byte arrays when we add another cryptosystem
let k1 = vcrypto.generate_hash(LOREM_IPSUM).await; let k1 = vcrypto.generate_hash(LOREM_IPSUM).await;
let k2 = vcrypto.generate_hash(CHEEZBURGER).await; let k2 = vcrypto.generate_hash(CHEEZBURGER).await;
let k3 = vcrypto.generate_hash("abc".as_bytes()).await; let k3 = vcrypto.generate_hash("abc".as_bytes()).await;
// Get distance // Get distance
let d1 = vcrypto.distance(&k1, &k2).await; let d1 = k1.to_hash_coordinate().distance(&k2.to_hash_coordinate());
let d2 = vcrypto.distance(&k2, &k1).await; let d2 = k2.to_hash_coordinate().distance(&k1.to_hash_coordinate());
let d3 = vcrypto.distance(&k1, &k3).await; let d3 = k1.to_hash_coordinate().distance(&k3.to_hash_coordinate());
let d4 = vcrypto.distance(&k2, &k3).await; let d4 = k2.to_hash_coordinate().distance(&k3.to_hash_coordinate());
trace!("d1={:?}", d1); trace!("d1={:?}", d1);
trace!("d2={:?}", d2); trace!("d2={:?}", d2);

View file

@ -10,52 +10,20 @@ use data_encoding::BASE64URL_NOPAD;
////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////
pub trait Encodable
where
Self: Sized,
{
fn encode(&self) -> String;
fn encoded_len(&self) -> usize;
fn try_decode<S: AsRef<str>>(input: S) -> VeilidAPIResult<Self> {
let b = input.as_ref().as_bytes();
Self::try_decode_bytes(b)
}
fn try_decode_bytes(b: &[u8]) -> VeilidAPIResult<Self>;
}
//////////////////////////////////////////////////////////////////////
pub trait ByteArrayType
where
Self: Sized,
{
fn len(&self) -> usize;
fn is_empty(&self) -> bool;
fn bytes(&self) -> &[u8];
fn bit(&self, index: usize) -> bool;
fn first_nonzero_bit(&self) -> Option<usize>;
fn nibble(&self, index: usize) -> u8;
fn first_nonzero_nibble(&self) -> Option<(usize, u8)>;
}
//////////////////////////////////////////////////////////////////////
macro_rules! byte_array_type { macro_rules! byte_array_type {
($name:ident) => { ($visibility:vis $name:ident) => {
#[derive(Clone, Hash, PartialOrd, Ord, PartialEq, Eq)]
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(Tsify))] #[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(wasm_bindgen_derive::TryFromJsValue))]
#[cfg_attr( #[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), wasm_bindgen)]
all(target_arch = "wasm32", target_os = "unknown"), #[derive(Clone, Hash, Default, PartialOrd, Ord, PartialEq, Eq)]
tsify(from_wasm_abi, into_wasm_abi)
)]
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), serde(transparent))]
#[must_use] #[must_use]
pub struct $name { $visibility struct $name {
#[cfg_attr(
all(target_arch = "wasm32", target_os = "unknown"),
tsify(type = "string")
)]
bytes: Bytes, bytes: Bytes,
} }
#[cfg(all(target_arch = "wasm32", target_os = "unknown"))]
make_wasm_bindgen_stubs!($name);
impl $name { impl $name {
pub fn new(data: &[u8]) -> Self { pub fn new(data: &[u8]) -> Self {
Self { Self {
@ -65,32 +33,69 @@ macro_rules! byte_array_type {
fn new_from_bytes(bytes: Bytes) -> Self { fn new_from_bytes(bytes: Bytes) -> Self {
Self { bytes } Self { bytes }
} }
}
impl Default for $name { pub fn bytes(&self) -> &[u8] {
fn default() -> Self {
Self {
bytes: Bytes::new(),
}
}
}
impl ByteArrayType for $name {
fn len(&self) -> usize {
self.bytes.len()
}
fn is_empty(&self) -> bool {
self.bytes.is_empty()
}
fn bytes(&self) -> &[u8] {
&self.bytes &self.bytes
} }
#[allow(dead_code)]
pub fn first_nonzero_nibble(&self) -> Option<(usize, u8)> {
for i in 0..(self.bytes.len() * 2) {
let n = self.nibble(i);
if n != 0 {
return Some((i, n));
}
}
None
}
}
#[cfg(all(target_arch = "wasm32", target_os = "unknown"))]
#[wasm_bindgen]
impl $name {
#[wasm_bindgen(constructor)]
pub fn js_new(data: &[u8]) -> Self {
Self::new(data)
}
#[wasm_bindgen(js_name = parse)]
pub fn js_parse(s: String) -> VeilidAPIResult<Self> {
Self::from_str(&s)
}
#[wasm_bindgen(js_name = toString)]
pub fn js_to_string(&self) -> String {
self.to_string()
}
#[wasm_bindgen(js_name = isEqual)]
pub fn js_is_equal(&self, other: &Self) -> bool {
self == other
}
// TODO: add more typescript-only operations here
}
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), wasm_bindgen)]
#[allow(dead_code)]
impl $name {
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), wasm_bindgen(getter, js_name = length))]
pub fn len(&self) -> usize {
self.bytes.len()
}
pub fn is_empty(&self) -> bool {
self.bytes.is_empty()
}
pub fn to_vec(&self) -> Vec<u8> {
self.bytes.to_vec()
}
// Big endian bit ordering // Big endian bit ordering
fn bit(&self, index: usize) -> bool { pub fn bit(&self, index: usize) -> bool {
let bi = index / 8; let bi = index / 8;
let ti = 7 - (index % 8); let ti = 7 - (index % 8);
((self.bytes[bi] >> ti) & 1) != 0 ((self.bytes[bi] >> ti) & 1) != 0
} }
fn first_nonzero_bit(&self) -> Option<usize> { pub fn first_nonzero_bit(&self) -> Option<usize> {
for i in 0..self.bytes.len() { for i in 0..self.bytes.len() {
let b = self.bytes[i]; let b = self.bytes[i];
if b != 0 { if b != 0 {
@ -106,7 +111,7 @@ macro_rules! byte_array_type {
} }
// Big endian nibble ordering // Big endian nibble ordering
fn nibble(&self, index: usize) -> u8 { pub fn nibble(&self, index: usize) -> u8 {
let bi = index / 2; let bi = index / 2;
if index & 1 == 0 { if index & 1 == 0 {
(self.bytes[bi] >> 4) & 0xFu8 (self.bytes[bi] >> 4) & 0xFu8
@ -115,24 +120,17 @@ macro_rules! byte_array_type {
} }
} }
fn first_nonzero_nibble(&self) -> Option<(usize, u8)> { pub fn encode(&self) -> String {
for i in 0..(self.bytes.len() * 2) {
let n = self.nibble(i);
if n != 0 {
return Some((i, n));
}
}
None
}
}
impl Encodable for $name {
fn encode(&self) -> String {
BASE64URL_NOPAD.encode(&self.bytes) BASE64URL_NOPAD.encode(&self.bytes)
} }
fn encoded_len(&self) -> usize { pub fn encoded_len(&self) -> usize {
BASE64URL_NOPAD.encode_len(self.bytes.len()) BASE64URL_NOPAD.encode_len(self.bytes.len())
} }
fn try_decode_bytes(b: &[u8]) -> VeilidAPIResult<Self> { pub fn try_decode(input: &str) -> VeilidAPIResult<Self> {
let b = input.as_bytes();
Self::try_decode_bytes(b)
}
pub fn try_decode_bytes(b: &[u8]) -> VeilidAPIResult<Self> {
if b.len() == 0 { if b.len() == 0 {
return Ok(Self::default()); return Ok(Self::default());
} }
@ -218,9 +216,10 @@ macro_rules! byte_array_type {
} }
} }
impl From<$name> for Vec<u8> { impl TryFrom<$name> for Vec<u8> {
fn from(value: $name) -> Self { type Error = VeilidAPIError;
value.bytes().to_vec() fn try_from(value: $name) -> Result<Self, Self::Error> {
Ok(value.bytes().to_vec())
} }
} }
@ -243,75 +242,36 @@ macro_rules! byte_array_type {
} }
} }
} }
}; }
} }
///////////////////////////////////////// /////////////////////////////////////////
byte_array_type!(BarePublicKey); // Untyped public key (variable length)
byte_array_type!(BareSecretKey); byte_array_type!(pub BarePublicKey);
byte_array_type!(BareEncapsulationKey); // Untyped secret key (variable length)
byte_array_type!(BareDecapsulationKey); byte_array_type!(pub BareSecretKey);
byte_array_type!(BareSignature); // Untyped encapsulation key (variable length)
byte_array_type!(BareNonce); byte_array_type!(pub BareEncapsulationKey);
// Untyped decapsulation key (variable length)
byte_array_type!(pub BareDecapsulationKey);
// Untyped signature (variable length)
byte_array_type!(pub BareSignature);
// Untyped hash digest (hashed to 32 bytes)
byte_array_type!(pub BareHashDigest);
// Untyped shared secret (variable length)
byte_array_type!(pub BareSharedSecret);
// Untyped record key (hashed to 32 bytes)
byte_array_type!(pub BareRecordKey);
// Untyped route id (hashed to 32 bytes)
byte_array_type!(pub BareRouteId);
// Untyped node id (hashed to 32 bytes)
byte_array_type!(pub BareNodeId);
// Untyped member id (hashed to 32 bytes)
byte_array_type!(pub BareMemberId);
// Untyped nonce (random 24 bytes, no typed variant)
byte_array_type!(pub Nonce);
/* // Internal types
Notes: byte_array_type!(pub(crate) BareHashCoordinate);
- These are actually BareHashDigest types, but not interchangable: byte_array_type!(pub(crate) HashDistance);
- BareSharedSecret
- BareRecordKey
- BareRouteId (eventually will be a BareRecordKey type with DHT Routes)
- BareNodeId (constructible from BarePublicKey)
- BareMemberId (constructible from BarePublicKey)
*/
// BareHashDigest sub-types
byte_array_type!(BareHashDigest);
byte_array_type!(BareSharedSecret);
byte_array_type!(BareRecordKey);
byte_array_type!(BareHashDistance);
byte_array_type!(BareRouteId);
byte_array_type!(BareNodeId);
byte_array_type!(BareMemberId);
// Temporary adapters for converting to/from BareHashDigest types
// Removing these will show where there's still issues.
impl From<BareHashDigest> for BareSharedSecret {
fn from(value: BareHashDigest) -> Self {
Self::new(value.bytes())
}
}
impl From<BareHashDigest> for BareRecordKey {
fn from(value: BareHashDigest) -> Self {
Self::new(value.bytes())
}
}
impl From<BareRecordKey> for BareHashDigest {
fn from(value: BareRecordKey) -> Self {
Self::new(value.bytes())
}
}
impl From<BareNodeId> for BareHashDigest {
fn from(value: BareNodeId) -> Self {
Self::new(value.bytes())
}
}
/*
- BareNodeId currently equals BarePublicKey, but should be distinct from BarePublicKey.
- BareNodeId eventually should be a BareHashDigest type that's constructable from a BarePublicKey
*/
impl From<BarePublicKey> for BareNodeId {
fn from(value: BarePublicKey) -> Self {
Self::new(value.bytes())
}
}
impl From<BareNodeId> for BarePublicKey {
fn from(value: BareNodeId) -> Self {
Self::new(value.bytes())
}
}

View file

@ -1,209 +1,191 @@
use super::*; #[macro_export]
macro_rules! impl_crypto_typed {
($visibility:vis $name:ident) => {
paste::paste! {
#[derive(Clone, Debug, PartialEq, Eq, Hash)] #[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(wasm_bindgen_derive::TryFromJsValue))]
#[must_use] #[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), wasm_bindgen)]
pub struct CryptoTyped<K> #[derive(Clone, Debug, PartialEq, Eq, Hash)]
where #[must_use]
K: Clone + fmt::Debug + PartialEq + Eq + Hash, $visibility struct $name
{ {
kind: CryptoKind, kind: CryptoKind,
value: K, value: [<Bare $name>],
} }
cfg_if::cfg_if! { #[cfg(all(target_arch = "wasm32", target_os = "unknown"))]
if #[cfg(all(target_arch = "wasm32", target_os = "unknown"))] { make_wasm_bindgen_stubs!($name);
#[wasm_bindgen(typescript_custom_section)]
const CRYPOTYPED_TYPE: &'static str = r#"
export type CryptoTyped<TCryptoKey extends string> = `${CryptoKind}:${TCryptoKey}`;
"#;
}
}
impl<K> CryptoTyped<K> impl $name {
where pub fn new(kind: CryptoKind, value: [<Bare $name>]) -> Self {
K: Clone + fmt::Debug + PartialEq + Eq + Hash, Self { kind, value }
{ }
pub fn new(kind: CryptoKind, value: K) -> Self {
Self { kind, value }
}
pub fn kind(&self) -> CryptoKind { pub fn ref_value(&self) -> &[<Bare $name>] {
self.kind &self.value
} }
pub fn value(&self) -> K { #[allow(dead_code)]
self.value.clone() pub fn into_value(self) -> [<Bare $name>] {
} self.value
pub fn ref_value(&self) -> &K { }
&self.value }
}
pub fn into_value(self) -> K {
self.value
}
}
impl<K> PartialOrd for CryptoTyped<K> #[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), wasm_bindgen)]
where impl $name {
K: Clone + fmt::Debug + PartialEq + Eq + Hash, #[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), wasm_bindgen(getter, unchecked_return_type = "CryptoKind"))]
K: Ord + PartialOrd, pub fn kind(&self) -> CryptoKind {
{ self.kind
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> { }
Some(self.cmp(other)) #[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), wasm_bindgen(getter))]
} #[allow(dead_code)]
} pub fn value(&self) -> [<Bare $name>] {
self.value.clone()
}
}
impl<K> Ord for CryptoTyped<K> impl PartialOrd for $name
where {
K: Clone + fmt::Debug + PartialEq + Eq + Hash, fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
K: Ord + PartialOrd, Some(self.cmp(other))
{ }
fn cmp(&self, other: &Self) -> cmp::Ordering { }
let x = compare_crypto_kind(&self.kind, &other.kind);
if x != cmp::Ordering::Equal { impl Ord for $name
return x; {
fn cmp(&self, other: &Self) -> cmp::Ordering {
let x = compare_crypto_kind(&self.kind, &other.kind);
if x != cmp::Ordering::Equal {
return x;
}
self.value.cmp(&other.value)
}
}
impl fmt::Display for $name
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
write!(f, "{}:{}", self.kind, self.value)
}
}
impl FromStr for $name
{
type Err = VeilidAPIError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let b = s.as_bytes();
if b.len() > 5 && b[4..5] == b":"[..] {
let kind: CryptoKind = b[0..4].try_into().expect("should not fail to convert");
let value = [<Bare $name>]::try_decode_bytes(&b[5..])?;
Ok(Self { kind, value })
} else {
let kind = best_crypto_kind();
let value = [<Bare $name>]::try_decode_bytes(b)?;
Ok(Self { kind, value })
}
}
}
impl TryFrom<String> for $name
{
type Error = VeilidAPIError;
fn try_from(s: String) -> Result<Self, Self::Error> {
Self::from_str(&s)
}
}
impl TryFrom<&str> for $name
{
type Error = VeilidAPIError;
fn try_from(s: &str) -> Result<Self, Self::Error> {
Self::from_str(s)
}
}
impl<'de> Deserialize<'de> for $name
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let s = <String as Deserialize>::deserialize(deserializer)?;
FromStr::from_str(&s).map_err(serde::de::Error::custom)
}
}
impl Serialize for $name
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serializer.collect_str(self)
}
}
#[cfg(all(target_arch = "wasm32", target_os = "unknown"))]
#[wasm_bindgen]
impl $name {
#[wasm_bindgen(constructor)]
pub fn js_new(kind: CryptoKind, value: [<Bare $name>]) -> Self {
Self::new(kind,value)
}
#[wasm_bindgen(js_name = parse)]
pub fn js_parse(s: String) -> VeilidAPIResult<Self> {
Self::from_str(&s)
}
#[wasm_bindgen(js_name = toString)]
pub fn js_to_string(&self) -> String {
self.to_string()
}
#[wasm_bindgen(js_name = isEqual)]
pub fn js_is_equal(&self, other: &Self) -> bool {
self == other
}
// TODO: add more typescript-only operations here
}
} }
self.value.cmp(&other.value) };
}
} }
impl<K> fmt::Display for CryptoTyped<K> #[macro_export]
where macro_rules! impl_crypto_typed_vec {
K: Clone + fmt::Debug + PartialEq + Eq + Hash, ($visibility:vis $name:ident) => {
K: fmt::Display, paste::paste! {
{ impl<'a> TryFrom<&'a [u8]> for $name
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { {
write!(f, "{}:{}", self.kind, self.value) type Error = VeilidAPIError;
}
}
impl<K> FromStr for CryptoTyped<K> fn try_from(b: &'a [u8]) -> Result<Self, Self::Error> {
where if b.len() < 4 {
K: Clone + fmt::Debug + PartialEq + Eq + Hash, apibail_generic!("invalid cryptotyped format");
K: Encodable, }
{ let kind: CryptoKind = b[0..4].try_into()?;
type Err = VeilidAPIError; let value: [<Bare $name>] = b[4..].into();
fn from_str(s: &str) -> Result<Self, Self::Err> { Ok(Self { kind, value })
let b = s.as_bytes(); }
if b.len() > 5 && b[4..5] == b":"[..] { }
let kind: CryptoKind = b[0..4].try_into().expect("should not fail to convert");
let value = K::try_decode_bytes(&b[5..])?; impl TryFrom<Vec<u8>> for $name
Ok(Self { kind, value }) {
} else { type Error = VeilidAPIError;
let kind = best_crypto_kind();
let value = K::try_decode_bytes(b)?; fn try_from(b: Vec<u8>) -> Result<Self, Self::Error> {
Ok(Self { kind, value }) Self::try_from(b.as_slice())
}
}
impl From<$name> for Vec<u8>
{
fn from(v: $name) -> Self {
let mut out = v.kind.0.to_vec();
out.extend_from_slice(v.value.as_ref());
out
}
}
} }
} };
}
impl<K> TryFrom<String> for CryptoTyped<K>
where
K: Clone + fmt::Debug + PartialEq + Eq + Hash,
K: Encodable,
{
type Error = VeilidAPIError;
fn try_from(s: String) -> Result<Self, Self::Error> {
Self::from_str(&s)
}
}
impl<K> TryFrom<&str> for CryptoTyped<K>
where
K: Clone + fmt::Debug + PartialEq + Eq + Hash,
K: Encodable,
{
type Error = VeilidAPIError;
fn try_from(s: &str) -> Result<Self, Self::Error> {
Self::from_str(s)
}
}
impl<'a, K> TryFrom<&'a [u8]> for CryptoTyped<K>
where
K: Clone + fmt::Debug + PartialEq + Eq + Hash,
K: From<&'a [u8]>,
{
type Error = VeilidAPIError;
fn try_from(b: &'a [u8]) -> Result<Self, Self::Error> {
if b.len() < 4 {
apibail_generic!("invalid cryptotyped format");
}
let kind: CryptoKind = b[0..4].try_into()?;
let value: K = b[4..].into();
Ok(Self { kind, value })
}
}
impl<K> TryFrom<Vec<u8>> for CryptoTyped<K>
where
K: Clone + fmt::Debug + PartialEq + Eq + Hash,
K: for<'a> From<&'a [u8]>,
{
type Error = VeilidAPIError;
fn try_from(b: Vec<u8>) -> Result<Self, Self::Error> {
Self::try_from(b.as_slice())
}
}
impl<K> From<CryptoTyped<K>> for Vec<u8>
where
K: Clone + fmt::Debug + PartialEq + Eq + Hash,
K: AsRef<[u8]>,
{
fn from(v: CryptoTyped<K>) -> Self {
let mut out = v.kind.0.to_vec();
out.extend_from_slice(v.value.as_ref());
out
}
}
impl<'de, K> Deserialize<'de> for CryptoTyped<K>
where
K: Clone + fmt::Debug + PartialEq + Eq + Hash,
K: Encodable,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let s = <String as Deserialize>::deserialize(deserializer)?;
FromStr::from_str(&s).map_err(serde::de::Error::custom)
}
}
impl<K> Serialize for CryptoTyped<K>
where
K: Clone + fmt::Debug + PartialEq + Eq + Hash,
K: fmt::Display,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serializer.collect_str(self)
}
}
impl CryptoTyped<BareKeyPair> {
pub fn new_from_parts(key: PublicKey, bare_secret: BareSecretKey) -> Self {
Self {
kind: key.kind(),
value: BareKeyPair::new(key.value(), bare_secret),
}
}
pub fn key(&self) -> PublicKey {
PublicKey::new(self.kind, self.ref_value().key())
}
pub fn secret(&self) -> SecretKey {
SecretKey::new(self.kind, self.ref_value().secret())
}
pub fn bare_secret(&self) -> BareSecretKey {
self.ref_value().secret()
}
pub fn ref_bare_secret(&self) -> &BareSecretKey {
self.ref_value().ref_secret()
}
} }

View file

@ -1,301 +1,282 @@
use super::*; #[macro_export]
macro_rules! impl_crypto_typed_group {
($visibility:vis $name:ident) => {
paste::paste! {
#[derive(Clone, Debug, Serialize, Deserialize, PartialOrd, Ord, PartialEq, Eq, Hash, Default)] #[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), derive(wasm_bindgen_derive::TryFromJsValue))]
#[serde(from = "Vec<CryptoTyped<K>>", into = "Vec<CryptoTyped<K>>")] #[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), wasm_bindgen)]
pub struct CryptoTypedGroup<K> #[derive(Clone, Debug, Serialize, Deserialize, PartialOrd, Ord, PartialEq, Eq, Hash, Default)]
where #[serde(from = "Vec<_>", into = "Vec<_>")]
K: Clone pub struct [<$name Group>]
+ fmt::Debug {
+ fmt::Display items: Vec<$name>,
+ FromStr
+ PartialEq
+ Eq
+ PartialOrd
+ Ord
+ Hash
+ Encodable,
{
items: Vec<CryptoTyped<K>>,
}
cfg_if::cfg_if! {
if #[cfg(all(target_arch = "wasm32", target_os = "unknown"))] {
#[wasm_bindgen(typescript_custom_section)]
const CRYPOTYPEDGROUP_TYPE: &'static str = r#"
export type CryptoTypedGroup<TCryptoKey extends string> = Array<CryptoTyped<TCryptoKey>>;
"#;
}
}
impl<K> CryptoTypedGroup<K>
where
K: Clone
+ fmt::Debug
+ fmt::Display
+ FromStr
+ PartialEq
+ Eq
+ PartialOrd
+ Ord
+ Hash
+ Encodable,
{
#[must_use]
pub fn new() -> Self {
Self { items: Vec::new() }
}
#[must_use]
pub fn with_capacity(cap: usize) -> Self {
Self {
items: Vec::with_capacity(cap),
}
}
pub fn kinds(&self) -> Vec<CryptoKind> {
let mut out = Vec::new();
for tk in &self.items {
out.push(tk.kind());
}
out.sort_by(compare_crypto_kind);
out
}
#[must_use]
pub fn keys(&self) -> Vec<K> {
let mut out = Vec::new();
for tk in &self.items {
out.push(tk.value());
}
out
}
#[must_use]
pub fn get(&self, kind: CryptoKind) -> Option<CryptoTyped<K>> {
self.items.iter().find(|x| x.kind() == kind).cloned()
}
pub fn add(&mut self, typed_key: CryptoTyped<K>) {
for x in &mut self.items {
if x.kind() == typed_key.kind() {
*x = typed_key;
return;
} }
}
self.items.push(typed_key); #[cfg(all(target_arch = "wasm32", target_os = "unknown"))]
self.items.sort() make_wasm_bindgen_stubs!([<$name Group>]);
}
pub fn add_all(&mut self, typed_keys: &[CryptoTyped<K>]) { impl [<$name Group>]
'outer: for typed_key in typed_keys { {
for x in &mut self.items { #[must_use]
if x.kind() == typed_key.kind() { pub fn new() -> Self {
*x = typed_key.clone(); Self { items: Vec::new() }
continue 'outer; }
#[must_use]
pub fn with_capacity(cap: usize) -> Self {
Self {
items: Vec::with_capacity(cap),
}
}
pub fn iter(&self) -> core::slice::Iter<'_, $name> {
self.items.iter()
}
pub fn add_all_from_slice(&mut self, typed_keys: &[$name]) {
'outer: for typed_key in typed_keys {
for x in &mut self.items {
if x.kind() == typed_key.kind() {
*x = typed_key.clone();
continue 'outer;
}
}
self.items.push(typed_key.clone());
}
self.items.sort()
}
pub fn contains_any_from_slice(&self, typed_keys: &[$name]) -> bool {
for typed_key in typed_keys {
if self.items.contains(typed_key) {
return true;
}
}
false
}
}
#[cfg(all(target_arch = "wasm32", target_os = "unknown"))]
#[wasm_bindgen]
impl [<$name Group>]
{
#[must_use]
pub fn get(&self,
#[wasm_bindgen(unchecked_param_type = "CryptoKind")]
kind: CryptoKind) -> Option<$name> {
self.items.iter().find(|x| x.kind() == kind).cloned()
}
pub fn remove(&mut self,
#[wasm_bindgen(unchecked_param_type = "CryptoKind")]
kind: CryptoKind) -> Option<$name> {
if let Some(idx) = self.items.iter().position(|x| x.kind() == kind) {
return Some(self.items.remove(idx));
}
None
}
pub fn remove_all(&mut self,
#[wasm_bindgen(unchecked_param_type = "CryptoKind[]")]
kinds: Vec<CryptoKind>) {
for k in kinds {
self.remove(k);
}
} }
} }
self.items.push(typed_key.clone()); #[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))]
} impl [<$name Group>]
self.items.sort() {
} #[must_use]
pub fn remove(&mut self, kind: CryptoKind) -> Option<CryptoTyped<K>> { pub fn get(&self, kind: CryptoKind) -> Option<$name> {
if let Some(idx) = self.items.iter().position(|x| x.kind() == kind) { self.items.iter().find(|x| x.kind() == kind).cloned()
return Some(self.items.remove(idx)); }
}
None
}
pub fn remove_all(&mut self, kinds: &[CryptoKind]) {
for k in kinds {
self.remove(*k);
}
}
pub fn clear(&mut self) {
self.items.clear();
}
/// Return preferred typed key of our supported crypto kinds pub fn remove(&mut self, kind: CryptoKind) -> Option<$name> {
#[must_use] if let Some(idx) = self.items.iter().position(|x| x.kind() == kind) {
pub fn best(&self) -> Option<CryptoTyped<K>> { return Some(self.items.remove(idx));
self.items }
.iter() None
.find(|k| VALID_CRYPTO_KINDS.contains(&k.kind())) }
.cloned()
} pub fn remove_all(&mut self, kinds: Vec<CryptoKind>) {
#[must_use] for k in kinds {
pub fn is_empty(&self) -> bool { self.remove(k);
self.items.is_empty() }
} }
#[must_use] }
pub fn len(&self) -> usize {
self.items.len()
} #[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), wasm_bindgen)]
pub fn iter(&self) -> core::slice::Iter<'_, CryptoTyped<K>> { impl [<$name Group>] {
self.items.iter() #[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), wasm_bindgen(getter, unchecked_return_type = "CryptoKind[]"))]
} pub fn kinds(&self) -> Vec<CryptoKind> {
pub fn contains(&self, typed_key: &CryptoTyped<K>) -> bool { let mut out = Vec::new();
self.items.contains(typed_key) for tk in &self.items {
} out.push(tk.kind());
pub fn contains_any(&self, typed_keys: &[CryptoTyped<K>]) -> bool { }
for typed_key in typed_keys { out.sort_by(compare_crypto_kind);
if self.items.contains(typed_key) { out
return true; }
#[must_use]
pub fn keys(&self) -> Vec<[<Bare $name>]> {
let mut out = Vec::<[<Bare $name>]>::new();
for tk in &self.items {
out.push(tk.value());
}
out
}
#[must_use]
pub fn is_empty(&self) -> bool {
self.items.is_empty()
}
#[must_use]
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), wasm_bindgen(getter, js_name = length))]
pub fn len(&self) -> usize {
self.items.len()
}
pub fn contains(&self, typed_key: &$name) -> bool {
self.items.contains(typed_key)
}
#[must_use]
pub fn contains_any(&self, typed_keys: Vec<$name>) -> bool {
self.contains_any_from_slice(&typed_keys)
}
#[must_use]
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), wasm_bindgen(js_name = toArray))]
pub fn to_vec(&self) -> Vec<$name> {
self.items.clone()
}
pub fn add(&mut self, typed_key: $name) {
for x in &mut self.items {
if x.kind() == typed_key.kind() {
*x = typed_key;
return;
}
}
self.items.push(typed_key);
self.items.sort()
}
pub fn add_all(&mut self, typed_keys: Vec<$name>) {
self.add_all_from_slice(&typed_keys)
}
pub fn clear(&mut self) {
self.items.clear();
}
}
impl core::ops::Deref for [<$name Group>]
{
type Target = [$name];
#[inline]
fn deref(&self) -> &[$name] {
&self.items
}
}
impl fmt::Display for [<$name Group>]
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
write!(f, "[")?;
let mut first = true;
for x in &self.items {
if first {
first = false;
} else {
write!(f, ",")?;
}
write!(f, "{}", x)?;
}
write!(f, "]")
}
}
impl FromStr for [<$name Group>]
{
type Err = VeilidAPIError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut items = Vec::new();
if s.len() < 2 {
apibail_parse_error!("invalid length", s);
}
if &s[0..1] != "[" || &s[(s.len() - 1)..] != "]" {
apibail_parse_error!("invalid format", s);
}
for x in s[1..s.len() - 1].split(',') {
let tk = $name::from_str(x.trim())?;
items.push(tk);
}
Ok(Self { items })
}
}
impl From<$name> for [<$name Group>]
{
fn from(x: $name) -> Self {
let mut tks = [<$name Group>]::with_capacity(1);
tks.add(x);
tks
}
}
impl From<Vec<$name>> for [<$name Group>]
{
fn from(x: Vec<$name>) -> Self {
let mut tks = [<$name Group>]::with_capacity(x.len());
tks.add_all_from_slice(&x);
tks
}
}
impl From<&[$name]> for [<$name Group>]
{
fn from(x: &[$name]) -> Self {
let mut tks = [<$name Group>]::with_capacity(x.len());
tks.add_all_from_slice(x);
tks
}
}
impl From<[<$name Group>]> for Vec<$name>
{
fn from(val: [<$name Group>]) -> Self {
val.items
}
}
#[cfg(all(target_arch = "wasm32", target_os = "unknown"))]
#[wasm_bindgen]
impl [<$name Group>] {
#[wasm_bindgen(constructor)]
#[must_use]
pub fn js_new() -> Self {
Self::new()
}
#[wasm_bindgen(js_name = parse)]
pub fn js_parse(s: String) -> VeilidAPIResult<Self> {
Self::from_str(&s)
}
#[wasm_bindgen(js_name = toString)]
#[must_use]
pub fn js_to_string(&self) -> String {
self.to_string()
}
#[wasm_bindgen(js_name = isEqual)]
#[must_use]
pub fn js_is_equal(&self, other: &Self) -> bool {
self == other
}
// TODO: add more typescript-only operations here
} }
} }
false };
}
pub fn contains_value(&self, value: &K) -> bool {
for tk in &self.items {
if tk.ref_value() == value {
return true;
}
}
false
}
}
impl<K> core::ops::Deref for CryptoTypedGroup<K>
where
K: Clone
+ fmt::Debug
+ fmt::Display
+ FromStr
+ PartialEq
+ Eq
+ PartialOrd
+ Ord
+ Hash
+ Encodable,
{
type Target = [CryptoTyped<K>];
#[inline]
fn deref(&self) -> &[CryptoTyped<K>] {
&self.items
}
}
impl<K> fmt::Display for CryptoTypedGroup<K>
where
K: Clone
+ fmt::Debug
+ fmt::Display
+ FromStr
+ PartialEq
+ Eq
+ PartialOrd
+ Ord
+ Hash
+ Encodable,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
write!(f, "[")?;
let mut first = true;
for x in &self.items {
if first {
first = false;
} else {
write!(f, ",")?;
}
write!(f, "{}", x)?;
}
write!(f, "]")
}
}
impl<K> FromStr for CryptoTypedGroup<K>
where
K: Clone
+ fmt::Debug
+ fmt::Display
+ FromStr
+ PartialEq
+ Eq
+ PartialOrd
+ Ord
+ Hash
+ Encodable,
{
type Err = VeilidAPIError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut items = Vec::new();
if s.len() < 2 {
apibail_parse_error!("invalid length", s);
}
if &s[0..1] != "[" || &s[(s.len() - 1)..] != "]" {
apibail_parse_error!("invalid format", s);
}
for x in s[1..s.len() - 1].split(',') {
let tk = CryptoTyped::<K>::from_str(x.trim())?;
items.push(tk);
}
Ok(Self { items })
}
}
impl<K> From<CryptoTyped<K>> for CryptoTypedGroup<K>
where
K: Clone
+ fmt::Debug
+ fmt::Display
+ FromStr
+ PartialEq
+ Eq
+ PartialOrd
+ Ord
+ Hash
+ Encodable,
{
fn from(x: CryptoTyped<K>) -> Self {
let mut tks = CryptoTypedGroup::<K>::with_capacity(1);
tks.add(x);
tks
}
}
impl<K> From<Vec<CryptoTyped<K>>> for CryptoTypedGroup<K>
where
K: Clone
+ fmt::Debug
+ fmt::Display
+ FromStr
+ PartialEq
+ Eq
+ PartialOrd
+ Ord
+ Hash
+ Encodable,
{
fn from(x: Vec<CryptoTyped<K>>) -> Self {
let mut tks = CryptoTypedGroup::<K>::with_capacity(x.len());
tks.add_all(&x);
tks
}
}
impl<K> From<&[CryptoTyped<K>]> for CryptoTypedGroup<K>
where
K: Clone
+ fmt::Debug
+ fmt::Display
+ FromStr
+ PartialEq
+ Eq
+ PartialOrd
+ Ord
+ Hash
+ Encodable,
{
fn from(x: &[CryptoTyped<K>]) -> Self {
let mut tks = CryptoTypedGroup::<K>::with_capacity(x.len());
tks.add_all(x);
tks
}
}
impl<K> From<CryptoTypedGroup<K>> for Vec<CryptoTyped<K>>
where
K: Clone
+ fmt::Debug
+ fmt::Display
+ FromStr
+ PartialEq
+ Eq
+ PartialOrd
+ Ord
+ Hash
+ Encodable,
{
fn from(val: CryptoTypedGroup<K>) -> Self {
val.items
}
} }

View file

@ -1,56 +1,66 @@
use super::*; use super::*;
#[derive(Clone, Default, PartialOrd, Ord, PartialEq, Eq, Hash)] #[cfg_attr(
all(target_arch = "wasm32", target_os = "unknown"),
derive(wasm_bindgen_derive::TryFromJsValue)
)]
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), wasm_bindgen)]
#[derive(Clone, Default, Hash, PartialOrd, Ord, PartialEq, Eq)]
#[must_use] #[must_use]
pub struct BareKeyPair { pub struct BareKeyPair {
key: BarePublicKey, key: BarePublicKey,
secret: BareSecretKey, secret: BareSecretKey,
} }
cfg_if::cfg_if! {
if #[cfg(all(target_arch = "wasm32", target_os = "unknown"))] {
#[wasm_bindgen(typescript_custom_section)]
const KEYPAIR_TYPE: &'static str = r#"
export type BareKeyPair = `${BarePublicKey}:${BareSecretKey}`;
"#;
}
}
impl BareKeyPair { impl BareKeyPair {
pub fn new(key: BarePublicKey, secret: BareSecretKey) -> Self { pub fn new(key: BarePublicKey, secret: BareSecretKey) -> Self {
Self { key, secret } Self { key, secret }
} }
pub fn key(&self) -> BarePublicKey {
self.key.clone()
}
pub fn secret(&self) -> BareSecretKey {
self.secret.clone()
}
pub fn ref_key(&self) -> &BarePublicKey { pub fn ref_key(&self) -> &BarePublicKey {
&self.key &self.key
} }
pub fn ref_secret(&self) -> &BareSecretKey { pub fn ref_secret(&self) -> &BareSecretKey {
&self.secret &self.secret
} }
pub fn split(&self) -> (BarePublicKey, BareSecretKey) {
(self.key.clone(), self.secret.clone())
}
pub fn ref_split(&self) -> (&BarePublicKey, &BareSecretKey) { pub fn ref_split(&self) -> (&BarePublicKey, &BareSecretKey) {
(&self.key, &self.secret) (&self.key, &self.secret)
} }
pub fn split(&self) -> (BarePublicKey, BareSecretKey) {
(self.key.clone(), self.secret.clone())
}
pub fn into_split(self) -> (BarePublicKey, BareSecretKey) { pub fn into_split(self) -> (BarePublicKey, BareSecretKey) {
(self.key, self.secret) (self.key, self.secret)
} }
} }
impl Encodable for BareKeyPair { #[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), wasm_bindgen)]
fn encode(&self) -> String { #[allow(dead_code)]
impl BareKeyPair {
#[cfg_attr(
all(target_arch = "wasm32", target_os = "unknown"),
wasm_bindgen(getter)
)]
pub fn key(&self) -> BarePublicKey {
self.key.clone()
}
#[cfg_attr(
all(target_arch = "wasm32", target_os = "unknown"),
wasm_bindgen(getter)
)]
pub fn secret(&self) -> BareSecretKey {
self.secret.clone()
}
pub fn encode(&self) -> String {
format!("{}:{}", self.key.encode(), self.secret.encode()) format!("{}:{}", self.key.encode(), self.secret.encode())
} }
fn encoded_len(&self) -> usize { pub fn encoded_len(&self) -> usize {
self.key.encoded_len() + 1 + self.secret.encoded_len() self.key.encoded_len() + 1 + self.secret.encoded_len()
} }
fn try_decode_bytes(b: &[u8]) -> VeilidAPIResult<Self> { pub fn try_decode(input: &str) -> VeilidAPIResult<Self> {
let b = input.as_bytes();
Self::try_decode_bytes(b)
}
pub fn try_decode_bytes(b: &[u8]) -> VeilidAPIResult<Self> {
let parts: Vec<_> = b.split(|x| *x == b':').collect(); let parts: Vec<_> = b.split(|x| *x == b':').collect();
if parts.len() != 2 { if parts.len() != 2 {
apibail_parse_error!( apibail_parse_error!(
@ -63,6 +73,7 @@ impl Encodable for BareKeyPair {
Ok(BareKeyPair { key, secret }) Ok(BareKeyPair { key, secret })
} }
} }
impl fmt::Display for BareKeyPair { impl fmt::Display for BareKeyPair {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.encode()) write!(f, "{}", self.encode())
@ -125,3 +136,50 @@ impl<'de> serde::Deserialize<'de> for BareKeyPair {
BareKeyPair::try_decode(s.as_str()).map_err(serde::de::Error::custom) BareKeyPair::try_decode(s.as_str()).map_err(serde::de::Error::custom)
} }
} }
////////////////////////////////////////////////////////////////////////////
impl KeyPair {
pub fn into_split(self) -> (PublicKey, SecretKey) {
let kind = self.kind;
let (pk, sk) = self.into_value().into_split();
(PublicKey::new(kind, pk), SecretKey::new(kind, sk))
}
pub fn ref_bare_secret(&self) -> &BareSecretKey {
self.ref_value().ref_secret()
}
}
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), wasm_bindgen)]
#[allow(dead_code)]
impl KeyPair {
pub fn new_from_parts(key: PublicKey, bare_secret: BareSecretKey) -> Self {
Self {
kind: key.kind(),
value: BareKeyPair::new(key.value(), bare_secret),
}
}
#[cfg_attr(
all(target_arch = "wasm32", target_os = "unknown"),
wasm_bindgen(getter)
)]
pub fn key(&self) -> PublicKey {
PublicKey::new(self.kind, self.ref_value().key())
}
#[cfg_attr(
all(target_arch = "wasm32", target_os = "unknown"),
wasm_bindgen(getter)
)]
pub fn secret(&self) -> SecretKey {
SecretKey::new(self.kind, self.ref_value().secret())
}
#[cfg_attr(
all(target_arch = "wasm32", target_os = "unknown"),
wasm_bindgen(getter)
)]
pub fn bare_secret(&self) -> BareSecretKey {
self.ref_value().secret()
}
}

View file

@ -48,94 +48,38 @@ mod crypto_typed_group;
mod keypair; mod keypair;
pub use byte_array_types::*; pub use byte_array_types::*;
pub use crypto_typed::*;
pub use crypto_typed_group::*;
pub use keypair::*; pub use keypair::*;
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)] macro_rules! impl_crypto_typed_and_group {
pub type EncapsulationKey = CryptoTyped<BareEncapsulationKey>; ($visibility:vis $name:ident) => {
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)] impl_crypto_typed!($visibility $name);
pub type DecapsulationKey = CryptoTyped<BareDecapsulationKey>; impl_crypto_typed_group!($visibility $name);
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)] };
pub type PublicKey = CryptoTyped<BarePublicKey>;
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)]
pub type SecretKey = CryptoTyped<BareSecretKey>;
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)]
pub type KeyPair = CryptoTyped<BareKeyPair>;
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)]
pub type Signature = CryptoTyped<BareSignature>;
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)]
pub type SharedSecret = CryptoTyped<BareSharedSecret>;
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)]
pub type HashDigest = CryptoTyped<BareHashDigest>;
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)]
pub type RecordKey = CryptoTyped<BareRecordKey>;
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)]
pub type NodeId = CryptoTyped<BareNodeId>;
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)]
pub type RouteId = CryptoTyped<BareRouteId>;
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)]
pub type MemberId = CryptoTyped<BareMemberId>;
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)]
pub type EncapsulationKeyGroup = CryptoTypedGroup<BareEncapsulationKey>;
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)]
pub type DecapsulationKeyGroup = CryptoTypedGroup<BareDecapsulationKey>;
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)]
pub type PublicKeyGroup = CryptoTypedGroup<BarePublicKey>;
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)]
pub type SecretKeyGroup = CryptoTypedGroup<BareSecretKey>;
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)]
pub type KeyPairGroup = CryptoTypedGroup<BareKeyPair>;
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)]
pub type SignatureGroup = CryptoTypedGroup<BareSignature>;
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)]
pub type SharedSecretGroup = CryptoTypedGroup<BareSharedSecret>;
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)]
pub type HashDigestGroup = CryptoTypedGroup<BareHashDigest>;
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)]
pub type RecordKeyGroup = CryptoTypedGroup<BareRecordKey>;
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)]
pub type NodeIdGroup = CryptoTypedGroup<BareNodeId>;
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)]
pub type RouteIdGroup = CryptoTypedGroup<BareRouteId>;
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), declare)]
pub type MemberIdGroup = CryptoTypedGroup<BareMemberId>;
impl From<NodeId> for HashDigest {
fn from(value: NodeId) -> Self {
HashDigest::new(value.kind(), value.into_value().into())
}
} }
impl From<RecordKey> for HashDigest { macro_rules! impl_crypto_typed_and_group_and_vec {
fn from(value: RecordKey) -> Self { ($visibility:vis $name:ident) => {
HashDigest::new(value.kind(), value.into_value().into()) impl_crypto_typed!($visibility $name);
} impl_crypto_typed_group!($visibility $name);
impl_crypto_typed_vec!($visibility $name);
};
} }
impl From<NodeId> for PublicKey { // CryptoKind typed, with group and vector conversions
fn from(value: NodeId) -> Self { impl_crypto_typed_and_group_and_vec!(pub EncapsulationKey);
PublicKey::new(value.kind(), value.into_value().into()) impl_crypto_typed_and_group_and_vec!(pub DecapsulationKey);
} impl_crypto_typed_and_group_and_vec!(pub PublicKey);
} impl_crypto_typed_and_group_and_vec!(pub SecretKey);
impl_crypto_typed_and_group_and_vec!(pub Signature);
impl_crypto_typed_and_group_and_vec!(pub SharedSecret);
impl_crypto_typed_and_group_and_vec!(pub HashDigest);
impl_crypto_typed_and_group_and_vec!(pub RecordKey);
impl_crypto_typed_and_group_and_vec!(pub NodeId);
impl_crypto_typed_and_group_and_vec!(pub RouteId);
impl_crypto_typed_and_group_and_vec!(pub MemberId);
impl From<PublicKey> for NodeId { // No vector representation
fn from(value: PublicKey) -> Self { impl_crypto_typed_and_group!(pub KeyPair);
NodeId::new(value.kind(), value.into_value().into())
}
}
impl From<NodeIdGroup> for PublicKeyGroup { // Internal types
fn from(value: NodeIdGroup) -> Self { impl_crypto_typed!(pub(crate) HashCoordinate);
let items: Vec<PublicKey> = value.iter().map(|node_id| node_id.clone().into()).collect();
PublicKeyGroup::from(items)
}
}
impl From<PublicKeyGroup> for NodeIdGroup {
fn from(value: PublicKeyGroup) -> Self {
let items: Vec<NodeId> = value.iter().map(|node_id| node_id.clone().into()).collect();
NodeIdGroup::from(items)
}
}

View file

@ -49,7 +49,7 @@ impl BootstrapRecord {
} }
pub fn merge(&mut self, other: BootstrapRecord) { pub fn merge(&mut self, other: BootstrapRecord) {
self.public_keys.add_all(&other.public_keys); self.public_keys.add_all_from_slice(&other.public_keys);
for x in other.envelope_support { for x in other.envelope_support {
if !self.envelope_support.contains(&x) { if !self.envelope_support.contains(&x) {
self.envelope_support.push(x); self.envelope_support.push(x);
@ -83,7 +83,7 @@ impl BootstrapRecord {
.envelope_support() .envelope_support()
.iter() .iter()
.map(|x| { .map(|x| {
if (x.0)[0..3] == *b"ENV" { if x.bytes()[0..3] == *b"ENV" {
x.to_string().split_off(3) x.to_string().split_off(3)
} else { } else {
x.to_string() x.to_string()
@ -161,19 +161,20 @@ impl BootstrapRecord {
let crypto = network_manager.crypto(); let crypto = network_manager.crypto();
let sig = match crypto.generate_signatures(v1.as_bytes(), &[signing_key_pair], |kp, sig| { let sig =
Signature::new(kp.kind(), sig).to_string() match crypto.generate_signatures(v1.as_bytes(), &[signing_key_pair], |_kp, sig| {
}) { sig.to_string()
Ok(v) => { }) {
let Some(sig) = v.first().cloned() else { Ok(v) => {
bail!("No signature generated"); let Some(sig) = v.first().cloned() else {
}; bail!("No signature generated");
sig };
} sig
Err(e) => { }
bail!("Failed to generate signature: {}", e); Err(e) => {
} bail!("Failed to generate signature: {}", e);
}; }
};
v1 += &sig; v1 += &sig;
Ok(v1) Ok(v1)

View file

@ -112,7 +112,7 @@ impl NetworkManager {
// and as such, a routing domain can not be determined for it // and as such, a routing domain can not be determined for it
// by the code that receives the FindNodeA result // by the code that receives the FindNodeA result
for pi in bootv1response.peers.iter().cloned() { for pi in bootv1response.peers.iter().cloned() {
if pi.node_info().public_keys().contains_any(bsrec.public_keys()) { if pi.node_info().public_keys().contains_any_from_slice(bsrec.public_keys()) {
return Some(pi); return Some(pi);
} }
} }

View file

@ -75,8 +75,8 @@ impl NetworkManager {
.filter_map(|bsrec| { .filter_map(|bsrec| {
if routing_table.matches_own_public_key(bsrec.public_keys()) { if routing_table.matches_own_public_key(bsrec.public_keys()) {
routing_table.get_published_peer_info(routing_domain) routing_table.get_published_peer_info(routing_domain)
} else if let Some(best_public_key) = bsrec.public_keys().best() { } else if let Some(best_public_key) = bsrec.public_keys().first() {
if let Ok(best_node_id) = routing_table.generate_node_id(&best_public_key) { if let Ok(best_node_id) = routing_table.generate_node_id(best_public_key) {
if let Some(nr) = routing_table.lookup_node_ref(best_node_id).ok().flatten() if let Some(nr) = routing_table.lookup_node_ref(best_node_id).ok().flatten()
{ {
nr.get_peer_info(routing_domain) nr.get_peer_info(routing_domain)

View file

@ -104,7 +104,10 @@ impl NetworkManager {
let mut mbi = 0; let mut mbi = 0;
while mbi < merged_bootstrap_records.len() { while mbi < merged_bootstrap_records.len() {
let mbr = &mut merged_bootstrap_records[mbi]; let mbr = &mut merged_bootstrap_records[mbi];
if mbr.public_keys().contains_any(bsrec.public_keys()) { if mbr
.public_keys()
.contains_any_from_slice(bsrec.public_keys())
{
// Merge record, pop this one out // Merge record, pop this one out
let mbr = merged_bootstrap_records.remove(mbi); let mbr = merged_bootstrap_records.remove(mbi);
bsrec.merge(mbr); bsrec.merge(mbr);

View file

@ -44,7 +44,10 @@ impl NetworkManager {
let mut mbi = 0; let mut mbi = 0;
while mbi < merged_bootstrap_records.len() { while mbi < merged_bootstrap_records.len() {
let mbr = &mut merged_bootstrap_records[mbi]; let mbr = &mut merged_bootstrap_records[mbi];
if mbr.public_keys().contains_any(bsrec.public_keys()) { if mbr
.public_keys()
.contains_any_from_slice(bsrec.public_keys())
{
// Merge record, pop this one out // Merge record, pop this one out
let mbr = merged_bootstrap_records.remove(mbi); let mbr = merged_bootstrap_records.remove(mbi);
bsrec.merge(mbr); bsrec.merge(mbr);

View file

@ -67,7 +67,7 @@ pub const TXT_LOOKUP_CACHE_SIZE: usize = 256;
/// Duration that TXT lookups are valid in the cache (5 minutes, <= the DNS record expiration timeout) /// Duration that TXT lookups are valid in the cache (5 minutes, <= the DNS record expiration timeout)
pub const TXT_LOOKUP_EXPIRATION: TimestampDuration = TimestampDuration::new_secs(300); pub const TXT_LOOKUP_EXPIRATION: TimestampDuration = TimestampDuration::new_secs(300);
/// Maximum size for a message is the same as the maximum size for an Envelope /// Maximum size for a message is the same as the maximum size for an Envelope
pub const MAX_MESSAGE_SIZE: usize = MAX_ENVELOPE_SIZE; pub const MAX_MESSAGE_SIZE: usize = ENV0_MAX_ENVELOPE_SIZE;
/// Statistics table size for tracking performance by IP address /// Statistics table size for tracking performance by IP address
pub const IPADDR_TABLE_SIZE: usize = 1024; pub const IPADDR_TABLE_SIZE: usize = 1024;
/// Eviction time for ip addresses from statistics tables (5 minutes) /// Eviction time for ip addresses from statistics tables (5 minutes)
@ -111,8 +111,8 @@ impl SendDataResult {
Some(ncm) if ncm.is_direct() Some(ncm) if ncm.is_direct()
) )
} }
pub fn is_ordered(&self) -> bool { pub fn sequence_ordering(&self) -> SequenceOrdering {
self.unique_flow.flow.protocol_type().is_ordered() self.unique_flow.flow.protocol_type().sequence_ordering()
} }
pub fn unique_flow(&self) -> UniqueFlow { pub fn unique_flow(&self) -> UniqueFlow {
@ -286,9 +286,11 @@ impl NetworkManager {
Some( Some(
bcs.derive_shared_secret( bcs.derive_shared_secret(
network_key_password.as_bytes(), network_key_password.as_bytes(),
&bcs.generate_hash(network_key_password.as_bytes()), bcs.generate_hash(network_key_password.as_bytes())
.ref_value(),
) )
.expect("failed to derive network key"), .expect("failed to derive network key")
.value(),
) )
} else { } else {
None None
@ -593,6 +595,7 @@ impl NetworkManager {
/// Generates a multi-shot/normal receipt /// Generates a multi-shot/normal receipt
#[instrument(level = "trace", skip(self, extra_data, callback))] #[instrument(level = "trace", skip(self, extra_data, callback))]
#[expect(dead_code)]
pub fn generate_receipt<D: AsRef<[u8]>>( pub fn generate_receipt<D: AsRef<[u8]>>(
&self, &self,
expiration_us: TimestampDuration, expiration_us: TimestampDuration,
@ -612,15 +615,19 @@ impl NetworkManager {
let nonce = vcrypto.random_nonce(); let nonce = vcrypto.random_nonce();
let node_id = routing_table.node_id(vcrypto.kind()); let node_id = routing_table.node_id(vcrypto.kind());
let secret_key = routing_table.secret_key(vcrypto.kind()).value(); let secret_key = routing_table.secret_key(vcrypto.kind());
// Encode envelope
let version = best_receipt_version();
let receipt = match version {
RECEIPT_VERSION_RCP0 => {
Receipt::try_new_rcp0(&crypto, node_id.kind(), nonce, node_id, extra_data)?
}
_ => {
bail!("unsupported receipt version: {:?}", version);
}
};
let receipt = Receipt::try_new(
best_receipt_version(),
node_id.kind(),
nonce,
node_id.value(),
extra_data,
)?;
let out = receipt let out = receipt
.to_signed_data(&crypto, &secret_key) .to_signed_data(&crypto, &secret_key)
.wrap_err("failed to generate signed receipt")?; .wrap_err("failed to generate signed receipt")?;
@ -652,15 +659,19 @@ impl NetworkManager {
let nonce = vcrypto.random_nonce(); let nonce = vcrypto.random_nonce();
let node_id = routing_table.node_id(vcrypto.kind()); let node_id = routing_table.node_id(vcrypto.kind());
let secret_key = routing_table.secret_key(vcrypto.kind()).value(); let secret_key = routing_table.secret_key(vcrypto.kind());
let version = best_receipt_version();
let receipt = match version {
RECEIPT_VERSION_RCP0 => {
Receipt::try_new_rcp0(&crypto, node_id.kind(), nonce, node_id, extra_data)?
}
_ => {
bail!("unsupported receipt version: {:?}", version);
}
};
let receipt = Receipt::try_new(
best_receipt_version(),
node_id.kind(),
nonce,
node_id.value(),
extra_data,
)?;
let out = receipt let out = receipt
.to_signed_data(&crypto, &secret_key) .to_signed_data(&crypto, &secret_key)
.wrap_err("failed to generate signed receipt")?; .wrap_err("failed to generate signed receipt")?;
@ -687,7 +698,7 @@ impl NetworkManager {
let receipt_manager = self.receipt_manager(); let receipt_manager = self.receipt_manager();
let crypto = self.crypto(); let crypto = self.crypto();
let receipt = match Receipt::from_signed_data(&crypto, receipt_data.as_ref()) { let receipt = match Receipt::try_from_signed_data(&crypto, receipt_data.as_ref()) {
Err(e) => { Err(e) => {
return NetworkResult::invalid_message(e.to_string()); return NetworkResult::invalid_message(e.to_string());
} }
@ -713,7 +724,7 @@ impl NetworkManager {
let receipt_manager = self.receipt_manager(); let receipt_manager = self.receipt_manager();
let crypto = self.crypto(); let crypto = self.crypto();
let receipt = match Receipt::from_signed_data(&crypto, receipt_data.as_ref()) { let receipt = match Receipt::try_from_signed_data(&crypto, receipt_data.as_ref()) {
Err(e) => { Err(e) => {
return NetworkResult::invalid_message(e.to_string()); return NetworkResult::invalid_message(e.to_string());
} }
@ -738,7 +749,7 @@ impl NetworkManager {
let receipt_manager = self.receipt_manager(); let receipt_manager = self.receipt_manager();
let crypto = self.crypto(); let crypto = self.crypto();
let receipt = match Receipt::from_signed_data(&crypto, receipt_data.as_ref()) { let receipt = match Receipt::try_from_signed_data(&crypto, receipt_data.as_ref()) {
Err(e) => { Err(e) => {
return NetworkResult::invalid_message(e.to_string()); return NetworkResult::invalid_message(e.to_string());
} }
@ -755,7 +766,7 @@ impl NetworkManager {
pub async fn handle_private_receipt<R: AsRef<[u8]>>( pub async fn handle_private_receipt<R: AsRef<[u8]>>(
&self, &self,
receipt_data: R, receipt_data: R,
private_route: BarePublicKey, private_route: PublicKey,
) -> NetworkResult<()> { ) -> NetworkResult<()> {
let Ok(_guard) = self.startup_context.startup_lock.enter() else { let Ok(_guard) = self.startup_context.startup_lock.enter() else {
return NetworkResult::service_unavailable("network is not started"); return NetworkResult::service_unavailable("network is not started");
@ -764,7 +775,7 @@ impl NetworkManager {
let receipt_manager = self.receipt_manager(); let receipt_manager = self.receipt_manager();
let crypto = self.crypto(); let crypto = self.crypto();
let receipt = match Receipt::from_signed_data(&crypto, receipt_data.as_ref()) { let receipt = match Receipt::try_from_signed_data(&crypto, receipt_data.as_ref()) {
Err(e) => { Err(e) => {
return NetworkResult::invalid_message(e.to_string()); return NetworkResult::invalid_message(e.to_string());
} }
@ -805,9 +816,11 @@ impl NetworkManager {
}; };
// Restrict reverse connection to same sequencing requirement as inbound signal // Restrict reverse connection to same sequencing requirement as inbound signal
if signal_flow.protocol_type().is_ordered() { let sequencing = signal_flow
peer_nr.set_sequencing(Sequencing::EnsureOrdered); .protocol_type()
} .sequence_ordering()
.strict_sequencing();
peer_nr.set_sequencing(sequencing);
// Make a reverse connection to the peer and send the receipt to it // Make a reverse connection to the peer and send the receipt to it
rpc.rpc_call_return_receipt(Destination::direct(peer_nr), receipt) rpc.rpc_call_return_receipt(Destination::direct(peer_nr), receipt)
@ -886,21 +899,21 @@ impl NetworkManager {
}; };
let node_id = routing_table.node_id(vcrypto.kind()); let node_id = routing_table.node_id(vcrypto.kind());
let secret_key = routing_table.secret_key(vcrypto.kind()).value(); let secret_key = routing_table.secret_key(vcrypto.kind());
// Get timestamp, nonce // Get timestamp, nonce
let ts = Timestamp::now(); let ts = Timestamp::now();
let nonce = vcrypto.random_nonce(); let nonce = vcrypto.random_nonce();
// Encode envelope // Encode envelope
let envelope = Envelope::new( let envelope = match version {
version, ENVELOPE_VERSION_ENV0 => {
node_id.kind(), Envelope::try_new_env0(&crypto, node_id.kind(), ts, nonce, node_id, dest_node_id)?
ts, }
nonce, _ => {
node_id.value(), bail!("unsupported envelope version: {:?}", version);
dest_node_id.value(), }
); };
envelope envelope
.to_encrypted_data(&crypto, body.as_ref(), &secret_key, &self.network_key) .to_encrypted_data(&crypto, body.as_ref(), &secret_key, &self.network_key)
.wrap_err("envelope failed to encode") .wrap_err("envelope failed to encode")
@ -1046,7 +1059,7 @@ impl NetworkManager {
// Decode envelope header (may fail signature validation) // Decode envelope header (may fail signature validation)
let crypto = self.crypto(); let crypto = self.crypto();
let envelope = match Envelope::from_signed_data(&crypto, data, &self.network_key) { let envelope = match Envelope::try_from_signed_data(&crypto, data, &self.network_key) {
Ok(v) => v, Ok(v) => v,
Err(e) => { Err(e) => {
veilid_log!(self debug "envelope failed to decode: {}", e); veilid_log!(self debug "envelope failed to decode: {}", e);
@ -1164,9 +1177,8 @@ impl NetworkManager {
if let Some((mut relay_nr, relay_kind)) = some_relay { if let Some((mut relay_nr, relay_kind)) = some_relay {
// Ensure the protocol used to forward is of the same sequencing requirement // Ensure the protocol used to forward is of the same sequencing requirement
// Address type is allowed to change if connectivity is better // Address type is allowed to change if connectivity is better
if flow.protocol_type().is_ordered() { let sequencing = flow.protocol_type().sequence_ordering().strict_sequencing();
relay_nr.set_sequencing(Sequencing::EnsureOrdered); relay_nr.set_sequencing(sequencing);
};
// Pass relay to RPC system // Pass relay to RPC system
if let Err(e) = self.enqueue_relay(relay_nr, data.to_vec(), relay_kind) { if let Err(e) = self.enqueue_relay(relay_nr, data.to_vec(), relay_kind) {
@ -1180,7 +1192,7 @@ impl NetworkManager {
} }
// DH to get decryption key (cached) // DH to get decryption key (cached)
let secret_key = routing_table.secret_key(envelope.get_crypto_kind()).value(); let secret_key = routing_table.secret_key(envelope.get_crypto_kind());
// Decrypt the envelope body // Decrypt the envelope body
let crypto = self.crypto(); let crypto = self.crypto();
@ -1257,7 +1269,12 @@ impl NetworkManager {
// Inform the connection table about the flow's priority // Inform the connection table about the flow's priority
let is_relaying_flow = node_ref.is_relaying(routing_domain); let is_relaying_flow = node_ref.is_relaying(routing_domain);
if is_relaying_flow && flow.protocol_type().is_ordered() { if is_relaying_flow
&& matches!(
flow.protocol_type().sequence_ordering(),
SequenceOrdering::Ordered
)
{
self.connection_manager().add_relaying_flow(flow); self.connection_manager().add_relaying_flow(flow);
} }
} }

View file

@ -92,6 +92,7 @@ impl IGDManager {
} }
#[instrument(level = "trace", target = "net", skip_all)] #[instrument(level = "trace", target = "net", skip_all)]
#[expect(dead_code)]
pub async fn unmap_port( pub async fn unmap_port(
&self, &self,
protocol_type: IGDProtocolType, protocol_type: IGDProtocolType,

View file

@ -17,7 +17,7 @@ pub enum ReceiptEvent {
ReturnedSafety, ReturnedSafety,
ReturnedPrivate { ReturnedPrivate {
#[expect(dead_code)] #[expect(dead_code)]
private_route: BarePublicKey, private_route: PublicKey,
}, },
Expired, Expired,
Cancelled, Cancelled,
@ -28,7 +28,7 @@ pub(super) enum ReceiptReturned {
OutOfBand, OutOfBand,
InBand { inbound_noderef: FilteredNodeRef }, InBand { inbound_noderef: FilteredNodeRef },
Safety, Safety,
Private { private_route: BarePublicKey }, Private { private_route: PublicKey },
} }
pub trait ReceiptCallback: Send + 'static { pub trait ReceiptCallback: Send + 'static {
@ -86,7 +86,6 @@ struct ReceiptRecord {
} }
impl ReceiptRecord { impl ReceiptRecord {
#[expect(dead_code)]
pub fn new( pub fn new(
receipt: Receipt, receipt: Receipt,
expiration_ts: Timestamp, expiration_ts: Timestamp,
@ -145,7 +144,7 @@ impl PartialOrd for ReceiptRecordTimestampSort {
/////////////////////////////////// ///////////////////////////////////
struct ReceiptManagerInner { struct ReceiptManagerInner {
records_by_nonce: BTreeMap<BareNonce, Arc<Mutex<ReceiptRecord>>>, records_by_nonce: BTreeMap<Nonce, Arc<Mutex<ReceiptRecord>>>,
next_oldest_ts: Option<Timestamp>, next_oldest_ts: Option<Timestamp>,
stop_source: Option<StopSource>, stop_source: Option<StopSource>,
timeout_task: MustJoinSingleFuture<()>, timeout_task: MustJoinSingleFuture<()>,
@ -405,7 +404,7 @@ impl ReceiptManager {
} }
#[expect(dead_code)] #[expect(dead_code)]
pub async fn cancel_receipt(&self, nonce: &BareNonce) -> EyreResult<()> { pub async fn cancel_receipt(&self, nonce: &Nonce) -> EyreResult<()> {
event!(target: "receipt", Level::DEBUG, "== Cancel Receipt {}", nonce.encode()); event!(target: "receipt", Level::DEBUG, "== Cancel Receipt {}", nonce.encode());
let _guard = self.unlocked_inner.startup_lock.enter()?; let _guard = self.unlocked_inner.startup_lock.enter()?;

View file

@ -691,13 +691,13 @@ impl NetworkManager {
relay_nr.set_sequencing(sequencing); relay_nr.set_sequencing(sequencing);
// Tighten sequencing for the target to the best reverse connection flow we can get // Tighten sequencing for the target to the best reverse connection flow we can get
let tighten = peer_a let max_ordering = peer_a
.node_info() .node_info()
.filtered_dial_info_details(DialInfoDetail::NO_SORT, &|did| { .filtered_dial_info_details(DialInfoDetail::NO_SORT, &|did| {
did.matches_filter(&dial_info_filter) did.matches_filter(&dial_info_filter)
}) })
.iter() .iter()
.find_map(|did| { .fold(SequenceOrdering::Unordered, |ord, did| {
if peer_b if peer_b
.node_info() .node_info()
.address_types() .address_types()
@ -706,21 +706,17 @@ impl NetworkManager {
.node_info() .node_info()
.outbound_protocols() .outbound_protocols()
.contains(did.dial_info.protocol_type()) .contains(did.dial_info.protocol_type())
&& did.dial_info.protocol_type().is_ordered()
{ {
Some(true) cmp::max(ord, did.dial_info.protocol_type().sequence_ordering())
} else { } else {
None ord
} }
}) });
.unwrap_or(false);
let mut target_node_ref = target_node_ref.filtered_clone( let mut target_node_ref = target_node_ref.filtered_clone(
NodeRefFilter::from(dial_info_filter).with_routing_domain(routing_domain), NodeRefFilter::from(dial_info_filter).with_routing_domain(routing_domain),
); );
if tighten { target_node_ref.set_sequencing(max_ordering.strict_sequencing());
target_node_ref.set_sequencing(Sequencing::EnsureOrdered);
}
Some(NodeContactMethodKind::SignalReverse( Some(NodeContactMethodKind::SignalReverse(
relay_nr, relay_nr,
target_node_ref, target_node_ref,

View file

@ -15,14 +15,14 @@ pub async fn test_signed_node_info() {
for ck in VALID_CRYPTO_KINDS { for ck in VALID_CRYPTO_KINDS {
let vcrypto = crypto.get(ck).unwrap(); let vcrypto = crypto.get(ck).unwrap();
let keypair = vcrypto.generate_keypair(); let keypair = vcrypto.generate_keypair();
let secret_key_group = SecretKeyGroup::from(SecretKey::new(ck, keypair.secret())); let secret_key_group = SecretKeyGroup::from(keypair.secret());
// Build test node info // Build test node info
let node_info = NodeInfo::new( let node_info = NodeInfo::new(
Timestamp::now(), Timestamp::now(),
VALID_ENVELOPE_VERSIONS.to_vec(), VALID_ENVELOPE_VERSIONS.to_vec(),
vec![CryptoInfo::VLD0 { vec![CryptoInfo::VLD0 {
public_key: keypair.key(), public_key: keypair.key().value(),
}], }],
PUBLIC_INTERNET_CAPABILITIES.to_vec(), PUBLIC_INTERNET_CAPABILITIES.to_vec(),
ProtocolTypeSet::all(), ProtocolTypeSet::all(),
@ -82,7 +82,7 @@ pub async fn test_signed_node_info() {
.expect_err("should not validate"); .expect_err("should not validate");
let invalid_crypto_kind = SignatureGroup::from(Signature::new( let invalid_crypto_kind = SignatureGroup::from(Signature::new(
CryptoKind(*b"FOOO"), CryptoKind::new(*b"FOOO"),
BareSignature::default(), BareSignature::default(),
)); ));

View file

@ -62,7 +62,7 @@ impl DialInfoFilter {
} }
pub fn is_ordered_only(&self) -> bool { pub fn is_ordered_only(&self) -> bool {
for pt in self.protocol_type_set { for pt in self.protocol_type_set {
if !pt.is_ordered() { if !matches!(pt.sequence_ordering(), SequenceOrdering::Ordered) {
return false; return false;
} }
} }

View file

@ -29,7 +29,12 @@ impl fmt::Display for Flow {
impl Flow { impl Flow {
pub fn new(remote: PeerAddress, local: SocketAddress) -> Self { pub fn new(remote: PeerAddress, local: SocketAddress) -> Self {
assert!(!remote.protocol_type().is_ordered() || !local.address().is_unspecified()); assert!(
!matches!(
remote.protocol_type().sequence_ordering(),
SequenceOrdering::Ordered
) || !local.address().is_unspecified()
);
Self { Self {
remote, remote,

View file

@ -14,28 +14,12 @@ pub(crate) enum ProtocolType {
} }
impl ProtocolType { impl ProtocolType {
pub fn is_ordered(&self) -> bool { pub fn sequence_ordering(&self) -> SequenceOrdering {
matches!(
self,
ProtocolType::TCP | ProtocolType::WS | ProtocolType::WSS
)
}
#[expect(dead_code)]
pub fn minimum_sequencing(&self) -> Sequencing {
match self { match self {
ProtocolType::UDP => Sequencing::NoPreference, ProtocolType::UDP => SequenceOrdering::Unordered,
ProtocolType::TCP => Sequencing::PreferOrdered, ProtocolType::TCP => SequenceOrdering::Ordered,
ProtocolType::WS => Sequencing::PreferOrdered, ProtocolType::WS => SequenceOrdering::Ordered,
ProtocolType::WSS => Sequencing::PreferOrdered, ProtocolType::WSS => SequenceOrdering::Ordered,
}
}
#[expect(dead_code)]
pub fn maximum_sequencing(&self) -> Sequencing {
match self {
ProtocolType::UDP => Sequencing::PreferOrdered,
ProtocolType::TCP => Sequencing::EnsureOrdered,
ProtocolType::WS => Sequencing::EnsureOrdered,
ProtocolType::WSS => Sequencing::EnsureOrdered,
} }
} }

View file

@ -27,10 +27,10 @@ impl SignalInfo {
receipt, receipt,
peer_info: _, peer_info: _,
} => { } => {
if receipt.len() < MIN_RECEIPT_SIZE { if receipt.len() < RCP0_MIN_RECEIPT_SIZE {
return Err(RPCError::protocol("SignalInfo HolePunch receipt too short")); return Err(RPCError::protocol("SignalInfo HolePunch receipt too short"));
} }
if receipt.len() > MAX_RECEIPT_SIZE { if receipt.len() > RCP0_MAX_RECEIPT_SIZE {
return Err(RPCError::protocol("SignalInfo HolePunch receipt too long")); return Err(RPCError::protocol("SignalInfo HolePunch receipt too long"));
} }
Ok(()) Ok(())
@ -39,12 +39,12 @@ impl SignalInfo {
receipt, receipt,
peer_info: _, peer_info: _,
} => { } => {
if receipt.len() < MIN_RECEIPT_SIZE { if receipt.len() < RCP0_MIN_RECEIPT_SIZE {
return Err(RPCError::protocol( return Err(RPCError::protocol(
"SignalInfo ReverseConnect receipt too short", "SignalInfo ReverseConnect receipt too short",
)); ));
} }
if receipt.len() > MAX_RECEIPT_SIZE { if receipt.len() > RCP0_MAX_RECEIPT_SIZE {
return Err(RPCError::protocol( return Err(RPCError::protocol(
"SignalInfo ReverseConnect receipt too long", "SignalInfo ReverseConnect receipt too long",
)); ));

View file

@ -5,7 +5,7 @@ impl_veilid_log_facility!("rtab");
/// Routing Table Bucket /// Routing Table Bucket
/// Stores map of public keys to entries, which may be in multiple routing tables per crypto kind /// Stores map of public keys to entries, which may be in multiple routing tables per crypto kind
/// Keeps entries at a particular 'dht distance' from this cryptokind's node id /// Keeps entries at a particular 'hash coordinate distance' from this cryptokind's node id
/// Helps to keep managed lists at particular distances so we can evict nodes by priority /// Helps to keep managed lists at particular distances so we can evict nodes by priority
/// where the priority comes from liveness and age of the entry (older is better) /// where the priority comes from liveness and age of the entry (older is better)
pub struct Bucket { pub struct Bucket {

View file

@ -162,10 +162,8 @@ impl fmt::Display for BucketEntryLocalNetwork {
/// The data associated with each bucket entry /// The data associated with each bucket entry
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Serialize, Deserialize)]
pub(crate) struct BucketEntryInner { pub(crate) struct BucketEntryInner {
/// The node ids matching this bucket entry, with the cryptography versions supported by this node as the 'kind' field /// The node ids matching this bucket entry
validated_node_ids: NodeIdGroup, node_ids: NodeIdGroup,
/// The node ids claimed by the remote node that use cryptography versions we do not support
unsupported_node_ids: NodeIdGroup,
/// The set of envelope versions supported by the node inclusive of the requirements of any relay the node may be using /// The set of envelope versions supported by the node inclusive of the requirements of any relay the node may be using
envelope_support: Vec<EnvelopeVersion>, envelope_support: Vec<EnvelopeVersion>,
/// If this node has updated it's SignedNodeInfo since our network /// If this node has updated it's SignedNodeInfo since our network
@ -220,8 +218,7 @@ pub(crate) struct BucketEntryInner {
impl fmt::Display for BucketEntryInner { impl fmt::Display for BucketEntryInner {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(f, "validated_node_ids: {}", self.validated_node_ids)?; writeln!(f, "node_ids: {}", self.node_ids)?;
writeln!(f, "unsupported_node_ids: {}", self.unsupported_node_ids)?;
writeln!(f, "envelope_support: {:?}", self.envelope_support)?; writeln!(f, "envelope_support: {:?}", self.envelope_support)?;
writeln!( writeln!(
f, f,
@ -281,9 +278,46 @@ impl BucketEntryInner {
/// Get all node ids /// Get all node ids
pub fn node_ids(&self) -> NodeIdGroup { pub fn node_ids(&self) -> NodeIdGroup {
let mut node_ids = self.validated_node_ids.clone(); self.node_ids.clone()
node_ids.add_all(&self.unsupported_node_ids); }
node_ids
/// Get public keys
pub fn public_keys(&self, routing_domain: RoutingDomain) -> PublicKeyGroup {
match routing_domain {
RoutingDomain::LocalNetwork => self
.local_network
.peer_info
.as_ref()
.map(|x| x.node_info().public_keys())
.unwrap_or_default(),
RoutingDomain::PublicInternet => self
.public_internet
.peer_info
.as_ref()
.map(|x| x.node_info().public_keys())
.unwrap_or_default(),
}
}
/// Get best node id
pub fn best_node_id(&self) -> Option<NodeId> {
self.node_ids.first().cloned()
}
/// Get best public key
pub fn best_public_key(&self, routing_domain: RoutingDomain) -> Option<PublicKey> {
match routing_domain {
RoutingDomain::LocalNetwork => self
.local_network
.peer_info
.as_ref()
.and_then(|x| x.node_info().public_keys().first().cloned()),
RoutingDomain::PublicInternet => self
.public_internet
.peer_info
.as_ref()
.and_then(|x| x.node_info().public_keys().first().cloned()),
}
} }
/// Add a node id for a particular crypto kind. /// Add a node id for a particular crypto kind.
@ -291,28 +325,17 @@ impl BucketEntryInner {
/// Returns Ok(None) if no previous existing node id was associated with that crypto kind, or one existed but nothing changed. /// Returns Ok(None) if no previous existing node id was associated with that crypto kind, or one existed but nothing changed.
/// Results Err() if this operation would add more crypto kinds than we support /// Results Err() if this operation would add more crypto kinds than we support
pub fn add_node_id(&mut self, node_id: NodeId) -> EyreResult<Option<NodeId>> { pub fn add_node_id(&mut self, node_id: NodeId) -> EyreResult<Option<NodeId>> {
let total_node_id_count = self.validated_node_ids.len() + self.unsupported_node_ids.len(); if let Some(old_node_id) = self.node_ids.get(node_id.kind()) {
let node_ids = if VALID_CRYPTO_KINDS.contains(&node_id.kind()) {
&mut self.validated_node_ids
} else {
&mut self.unsupported_node_ids
};
if let Some(old_node_id) = node_ids.get(node_id.kind()) {
// If this was already there we do nothing // If this was already there we do nothing
if old_node_id == node_id { if old_node_id == node_id {
return Ok(None); return Ok(None);
} }
// Won't change number of crypto kinds, but the node id changed // Won't change number of crypto kinds, but the node id changed
node_ids.add(node_id); self.node_ids.add(node_id);
return Ok(Some(old_node_id)); return Ok(Some(old_node_id));
} }
// Check to ensure we aren't adding more crypto kinds than we support self.node_ids.add(node_id);
if total_node_id_count == MAX_CRYPTO_KINDS {
bail!("too many crypto kinds for this node");
}
node_ids.add(node_id);
Ok(None) Ok(None)
} }
@ -321,17 +344,7 @@ impl BucketEntryInner {
/// Returns Some(node) any previous existing node id associated with that crypto kind /// Returns Some(node) any previous existing node id associated with that crypto kind
/// Returns None if no previous existing node id was associated with that crypto kind /// Returns None if no previous existing node id was associated with that crypto kind
pub fn remove_node_id(&mut self, crypto_kind: CryptoKind) -> Option<NodeId> { pub fn remove_node_id(&mut self, crypto_kind: CryptoKind) -> Option<NodeId> {
let node_ids = if VALID_CRYPTO_KINDS.contains(&crypto_kind) { self.node_ids.remove(crypto_kind)
&mut self.validated_node_ids
} else {
&mut self.unsupported_node_ids
};
node_ids.remove(crypto_kind)
}
pub fn best_node_id(&self) -> Option<NodeId> {
self.validated_node_ids.best()
} }
pub fn relay_ids(&self, routing_domain: RoutingDomain) -> Vec<NodeIdGroup> { pub fn relay_ids(&self, routing_domain: RoutingDomain) -> Vec<NodeIdGroup> {
@ -354,11 +367,11 @@ impl BucketEntryInner {
/// Get crypto kinds /// Get crypto kinds
pub fn crypto_kinds(&self) -> Vec<CryptoKind> { pub fn crypto_kinds(&self) -> Vec<CryptoKind> {
self.validated_node_ids.kinds() self.node_ids.kinds()
} }
/// Compare sets of crypto kinds /// Compare sets of crypto kinds
pub fn common_crypto_kinds(&self, other: &[CryptoKind]) -> Vec<CryptoKind> { pub fn common_crypto_kinds(&self, other: &[CryptoKind]) -> Vec<CryptoKind> {
common_crypto_kinds(&self.validated_node_ids.kinds(), other) common_crypto_kinds(&self.node_ids.kinds(), other)
} }
/// All-of capability check /// All-of capability check
@ -699,7 +712,7 @@ impl BucketEntryInner {
// Check if the connection is still considered live // Check if the connection is still considered live
let alive = let alive =
// Should we check the connection table? // Should we check the connection table?
if v.0.protocol_type().is_ordered() { if matches!(v.0.protocol_type().sequence_ordering(), SequenceOrdering::Ordered) {
// Look the connection up in the connection manager and see if it's still there // Look the connection up in the connection manager and see if it's still there
if let Some(connection_manager) = &opt_connection_manager { if let Some(connection_manager) = &opt_connection_manager {
connection_manager.get_connection(v.0).is_some() connection_manager.get_connection(v.0).is_some()
@ -1095,13 +1108,16 @@ impl BucketEntryInner {
ts: Timestamp, ts: Timestamp,
bytes: ByteCount, bytes: ByteCount,
expects_answer: bool, expects_answer: bool,
ordered: bool, ordering: SequenceOrdering,
) { ) {
self.transfer_stats_accounting.add_up(bytes); self.transfer_stats_accounting.add_up(bytes);
if ordered { match ordering {
self.answer_stats_accounting_ordered.record_question(ts); SequenceOrdering::Ordered => {
} else { self.answer_stats_accounting_ordered.record_question(ts);
self.answer_stats_accounting_unordered.record_question(ts); }
SequenceOrdering::Unordered => {
self.answer_stats_accounting_unordered.record_question(ts);
}
} }
self.peer_stats.rpc_stats.messages_sent += 1; self.peer_stats.rpc_stats.messages_sent += 1;
self.peer_stats.rpc_stats.failed_to_send = 0; self.peer_stats.rpc_stats.failed_to_send = 0;
@ -1125,43 +1141,53 @@ impl BucketEntryInner {
send_ts: Timestamp, send_ts: Timestamp,
recv_ts: Timestamp, recv_ts: Timestamp,
bytes: ByteCount, bytes: ByteCount,
ordered: bool, ordering: SequenceOrdering,
) { ) {
self.transfer_stats_accounting.add_down(bytes); self.transfer_stats_accounting.add_down(bytes);
if ordered {
self.answer_stats_accounting_ordered.record_answer(recv_ts); match ordering {
self.peer_stats.rpc_stats.recent_lost_answers_ordered = 0; SequenceOrdering::Ordered => {
} else { self.answer_stats_accounting_ordered.record_answer(recv_ts);
self.answer_stats_accounting_unordered self.peer_stats.rpc_stats.recent_lost_answers_ordered = 0;
.record_answer(recv_ts); }
self.peer_stats.rpc_stats.recent_lost_answers_unordered = 0; SequenceOrdering::Unordered => {
self.answer_stats_accounting_unordered
.record_answer(recv_ts);
self.peer_stats.rpc_stats.recent_lost_answers_unordered = 0;
}
} }
self.peer_stats.rpc_stats.messages_rcvd += 1; self.peer_stats.rpc_stats.messages_rcvd += 1;
self.peer_stats.rpc_stats.questions_in_flight -= 1; self.peer_stats.rpc_stats.questions_in_flight -= 1;
self.record_latency(recv_ts.saturating_sub(send_ts)); self.record_latency(recv_ts.saturating_sub(send_ts));
self.touch_last_seen(recv_ts); self.touch_last_seen(recv_ts);
} }
pub(super) fn lost_answer(&mut self, ordered: bool) { pub(super) fn lost_answer(&mut self, ordering: SequenceOrdering) {
let cur_ts = Timestamp::now(); let cur_ts = Timestamp::now();
if ordered {
self.answer_stats_accounting_ordered match ordering {
.record_lost_answer(cur_ts); SequenceOrdering::Ordered => {
self.peer_stats.rpc_stats.recent_lost_answers_ordered += 1; self.answer_stats_accounting_ordered
if self.peer_stats.rpc_stats.recent_lost_answers_ordered .record_lost_answer(cur_ts);
> UNRELIABLE_LOST_ANSWERS_ORDERED self.peer_stats.rpc_stats.recent_lost_answers_ordered += 1;
{ if self.peer_stats.rpc_stats.recent_lost_answers_ordered
self.peer_stats.rpc_stats.first_consecutive_seen_ts = None; > UNRELIABLE_LOST_ANSWERS_ORDERED
{
self.peer_stats.rpc_stats.first_consecutive_seen_ts = None;
}
} }
} else { SequenceOrdering::Unordered => {
self.answer_stats_accounting_unordered self.answer_stats_accounting_unordered
.record_lost_answer(cur_ts); .record_lost_answer(cur_ts);
self.peer_stats.rpc_stats.recent_lost_answers_unordered += 1; self.peer_stats.rpc_stats.recent_lost_answers_unordered += 1;
if self.peer_stats.rpc_stats.recent_lost_answers_unordered if self.peer_stats.rpc_stats.recent_lost_answers_unordered
> UNRELIABLE_LOST_ANSWERS_UNORDERED > UNRELIABLE_LOST_ANSWERS_UNORDERED
{ {
self.peer_stats.rpc_stats.first_consecutive_seen_ts = None; self.peer_stats.rpc_stats.first_consecutive_seen_ts = None;
}
} }
} }
self.peer_stats.rpc_stats.questions_in_flight -= 1; self.peer_stats.rpc_stats.questions_in_flight -= 1;
} }
pub(super) fn failed_to_send(&mut self, ts: Timestamp, expects_answer: bool) { pub(super) fn failed_to_send(&mut self, ts: Timestamp, expects_answer: bool) {
@ -1199,8 +1225,7 @@ impl BucketEntry {
let now = Timestamp::now(); let now = Timestamp::now();
let inner = BucketEntryInner { let inner = BucketEntryInner {
validated_node_ids: NodeIdGroup::from(first_node_id), node_ids: NodeIdGroup::from(first_node_id),
unsupported_node_ids: NodeIdGroup::new(),
envelope_support: Vec::new(), envelope_support: Vec::new(),
updated_since_last_network_change: false, updated_since_last_network_change: false,
last_flows: BTreeMap::new(), last_flows: BTreeMap::new(),

View file

@ -339,7 +339,7 @@ impl RoutingTable {
let is_relaying = node let is_relaying = node
.operate(|_rti, e| { .operate(|_rti, e| {
e.node_info(RoutingDomain::PublicInternet) e.node_info(RoutingDomain::PublicInternet)
.map(|ni| our_node_ids.contains_any(&ni.relay_ids())) .map(|ni| our_node_ids.contains_any_from_slice(&ni.relay_ids()))
}) })
.unwrap_or(false); .unwrap_or(false);

View file

@ -8,7 +8,7 @@ impl RoutingTable {
pub fn find_preferred_closest_peers( pub fn find_preferred_closest_peers(
&self, &self,
routing_domain: RoutingDomain, routing_domain: RoutingDomain,
hash_coordinate: &HashDigest, hash_coordinate: HashCoordinate,
capabilities: &[VeilidCapability], capabilities: &[VeilidCapability],
) -> NetworkResult<Vec<Arc<PeerInfo>>> { ) -> NetworkResult<Vec<Arc<PeerInfo>>> {
if Crypto::validate_crypto_kind(hash_coordinate.kind()).is_err() { if Crypto::validate_crypto_kind(hash_coordinate.kind()).is_err() {
@ -71,7 +71,7 @@ impl RoutingTable {
pub fn find_preferred_peers_closer_to_key( pub fn find_preferred_peers_closer_to_key(
&self, &self,
routing_domain: RoutingDomain, routing_domain: RoutingDomain,
hash_coordinate: &HashDigest, hash_coordinate: HashCoordinate,
required_capabilities: Vec<VeilidCapability>, required_capabilities: Vec<VeilidCapability>,
) -> NetworkResult<Vec<Arc<PeerInfo>>> { ) -> NetworkResult<Vec<Arc<PeerInfo>>> {
// add node information for the requesting node to our routing table // add node information for the requesting node to our routing table
@ -80,18 +80,9 @@ impl RoutingTable {
// find N nodes closest to the target node in our routing table // find N nodes closest to the target node in our routing table
// ensure the nodes returned are only the ones closer to the target node than ourself // ensure the nodes returned are only the ones closer to the target node than ourself
let crypto = self.crypto(); let own_distance = own_node_id.to_hash_coordinate().distance(&hash_coordinate);
let Some(vcrypto) = crypto.get(crypto_kind) else {
return NetworkResult::invalid_message("unsupported cryptosystem");
};
let vcrypto = &vcrypto;
let own_distance = vcrypto.distance( let hash_coordinate2 = hash_coordinate.clone();
&BareHashDigest::from(own_node_id.value()),
&hash_coordinate.value(),
);
let value = hash_coordinate.value();
let filter = Box::new( let filter = Box::new(
move |_rti: &RoutingTableInner, opt_entry: Option<Arc<BucketEntry>>| { move |_rti: &RoutingTableInner, opt_entry: Option<Arc<BucketEntry>>| {
// Exclude our own node // Exclude our own node
@ -112,8 +103,9 @@ impl RoutingTable {
let Some(entry_node_id) = e.node_ids().get(crypto_kind) else { let Some(entry_node_id) = e.node_ids().get(crypto_kind) else {
return false; return false;
}; };
let entry_distance = vcrypto let entry_distance = entry_node_id
.distance(&BareHashDigest::from(entry_node_id.value()), &value.clone()); .to_hash_coordinate()
.distance(&hash_coordinate2);
if entry_distance >= own_distance { if entry_distance >= own_distance {
return false; return false;
} }
@ -151,10 +143,9 @@ impl RoutingTable {
// Validate peers returned are, in fact, closer to the key than the node we sent this to // Validate peers returned are, in fact, closer to the key than the node we sent this to
// This same test is used on the other side so we vet things here // This same test is used on the other side so we vet things here
let valid = match Self::verify_peers_closer( let valid = match self.verify_peers_closer(
vcrypto, own_node_id.to_hash_coordinate(),
&own_node_id.clone().into(), hash_coordinate.clone(),
&hash_coordinate.clone(),
&closest_nodes, &closest_nodes,
) { ) {
Ok(v) => v, Ok(v) => v,
@ -175,24 +166,22 @@ impl RoutingTable {
/// Determine if set of peers is closer to key_near than key_far is to key_near /// Determine if set of peers is closer to key_near than key_far is to key_near
#[instrument(level = "trace", target = "rtab", skip_all, err)] #[instrument(level = "trace", target = "rtab", skip_all, err)]
pub fn verify_peers_closer( pub fn verify_peers_closer(
vcrypto: &crypto::CryptoSystemGuard<'_>, &self,
key_far: &HashDigest, hash_coordinate_far: HashCoordinate,
key_near: &HashDigest, hash_coordinate_near: HashCoordinate,
peers: &[Arc<PeerInfo>], peers: &[Arc<PeerInfo>],
) -> EyreResult<bool> { ) -> EyreResult<bool> {
let kind = vcrypto.kind(); if hash_coordinate_far.kind() != hash_coordinate_near.kind() {
if key_far.kind() != kind || key_near.kind() != kind {
bail!("keys all need the same cryptosystem"); bail!("keys all need the same cryptosystem");
} }
let mut closer = true; let mut closer = true;
let d_far = vcrypto.distance(key_far.ref_value(), key_near.ref_value()); let d_far = hash_coordinate_far.distance(&hash_coordinate_near);
for peer in peers { for peer in peers {
let Some(key_peer) = peer.node_ids().get(kind) else { let Some(key_peer) = peer.node_ids().get(hash_coordinate_far.kind()) else {
bail!("peers need to have a key with the same cryptosystem"); bail!("peers need to have a key with the same cryptosystem");
}; };
let d_near = vcrypto.distance(key_near.ref_value(), &key_peer.value().into()); let d_near = hash_coordinate_near.distance(&key_peer.to_hash_coordinate());
if d_far < d_near { if d_far < d_near {
let warning = format!( let warning = format!(
r#"peer: {} r#"peer: {}
@ -202,14 +191,13 @@ far (self): {}
d_far: {} d_far: {}
cmp: {:?}"#, cmp: {:?}"#,
key_peer, key_peer,
key_near, hash_coordinate_near,
key_far, hash_coordinate_far,
d_near, d_near,
d_far, d_far,
d_near.cmp(&d_far) d_near.cmp(&d_far)
); );
let crypto = vcrypto.crypto(); veilid_log!(self warn "{}", warning);
veilid_log!(crypto warn "{}", warning);
closer = false; closer = false;
break; break;
} }

View file

@ -36,9 +36,7 @@ impl_veilid_log_facility!("rtab");
////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////
/// Routing table bucket count (one per bit per 32 byte node id) /// Routing table bucket count (one per bit per 32 byte node id)
pub const BUCKET_COUNT: usize = 256; pub const BUCKET_COUNT: usize = HASH_COORDINATE_LENGTH * 8;
/// Fixed length for NodeId in bytes
pub const NODE_ID_LENGTH: usize = 32;
/// Minimum number of nodes we need, per crypto kind, per routing domain, or we trigger a bootstrap /// Minimum number of nodes we need, per crypto kind, per routing domain, or we trigger a bootstrap
pub const MIN_BOOTSTRAP_CONNECTIVITY_PEERS: usize = 4; pub const MIN_BOOTSTRAP_CONNECTIVITY_PEERS: usize = 4;
@ -375,18 +373,9 @@ impl RoutingTable {
false false
} }
pub fn matches_own_node_id_key(&self, node_id_key: &BareNodeId) -> bool {
for tk in self.node_ids().iter() {
if tk.ref_value() == node_id_key {
return true;
}
}
false
}
/// Produce node id from public key /// Produce node id from public key
pub fn generate_node_id(&self, public_key: &PublicKey) -> VeilidAPIResult<NodeId> { pub fn generate_node_id(&self, public_key: &PublicKey) -> VeilidAPIResult<NodeId> {
if public_key.ref_value().len() == NODE_ID_LENGTH { if public_key.ref_value().len() == HASH_COORDINATE_LENGTH {
return Ok(NodeId::new( return Ok(NodeId::new(
public_key.kind(), public_key.kind(),
BareNodeId::new(public_key.ref_value()), BareNodeId::new(public_key.ref_value()),
@ -399,13 +388,13 @@ impl RoutingTable {
let idhash = vcrypto.generate_hash(public_key.ref_value()); let idhash = vcrypto.generate_hash(public_key.ref_value());
assert!( assert!(
idhash.len() >= NODE_ID_LENGTH, idhash.ref_value().len() >= HASH_COORDINATE_LENGTH,
"generate_hash needs to produce at least {} bytes", "generate_hash needs to produce at least {} bytes",
NODE_ID_LENGTH HASH_COORDINATE_LENGTH
); );
Ok(NodeId::new( Ok(NodeId::new(
public_key.kind(), public_key.kind(),
BareNodeId::new(&idhash[0..NODE_ID_LENGTH]), BareNodeId::new(&idhash.ref_value()[0..HASH_COORDINATE_LENGTH]),
)) ))
} }
@ -413,16 +402,12 @@ impl RoutingTable {
if node_id.ref_value().len() * 8 != BUCKET_COUNT { if node_id.ref_value().len() * 8 != BUCKET_COUNT {
bail!("NodeId should be hashed down to BUCKET_COUNT bits"); bail!("NodeId should be hashed down to BUCKET_COUNT bits");
} }
let crypto = self.crypto(); let self_hash_coordinate = self.node_id(node_id.kind()).to_hash_coordinate();
let self_node_id_key = self.node_id(node_id.kind()).value();
let vcrypto = crypto.get(node_id.kind()).unwrap();
Ok(( Ok((
node_id.kind(), node_id.kind(),
vcrypto node_id
.distance( .to_hash_coordinate()
&BareHashDigest::from(node_id.value()), .distance(&self_hash_coordinate)
&BareHashDigest::from(self_node_id_key),
)
.first_nonzero_bit() .first_nonzero_bit()
.unwrap(), .unwrap(),
)) ))
@ -615,6 +600,7 @@ impl RoutingTable {
self.inner.read().dial_info_details(domain) self.inner.read().dial_info_details(domain)
} }
#[expect(dead_code)]
pub fn all_filtered_dial_info_details( pub fn all_filtered_dial_info_details(
&self, &self,
routing_domain_set: RoutingDomainSet, routing_domain_set: RoutingDomainSet,
@ -676,6 +662,7 @@ impl RoutingTable {
} }
/// Return a list of the current valid bootstrap peers in a particular routing domain /// Return a list of the current valid bootstrap peers in a particular routing domain
#[expect(dead_code)]
pub fn get_bootstrap_peers(&self, routing_domain: RoutingDomain) -> Vec<NodeRef> { pub fn get_bootstrap_peers(&self, routing_domain: RoutingDomain) -> Vec<NodeRef> {
self.inner.read().get_bootstrap_peers(routing_domain) self.inner.read().get_bootstrap_peers(routing_domain)
} }
@ -932,26 +919,30 @@ impl RoutingTable {
pub fn find_preferred_closest_nodes<'a, T, O>( pub fn find_preferred_closest_nodes<'a, T, O>(
&self, &self,
node_count: usize, node_count: usize,
node_id: HashDigest, hash_coordinate: HashCoordinate,
filters: VecDeque<RoutingTableEntryFilter>, filters: VecDeque<RoutingTableEntryFilter>,
transform: T, transform: T,
) -> VeilidAPIResult<Vec<O>> ) -> VeilidAPIResult<Vec<O>>
where where
T: for<'r> FnMut(&'r RoutingTableInner, Option<Arc<BucketEntry>>) -> O + Send, T: for<'r> FnMut(&'r RoutingTableInner, Option<Arc<BucketEntry>>) -> O + Send,
{ {
self.inner self.inner.read().find_preferred_closest_nodes(
.read() node_count,
.find_preferred_closest_nodes(node_count, node_id, filters, transform) hash_coordinate,
filters,
transform,
)
} }
#[expect(dead_code)]
pub fn sort_and_clean_closest_noderefs( pub fn sort_and_clean_closest_noderefs(
&self, &self,
node_id: HashDigest, hash_coordinate: HashCoordinate,
closest_nodes: &[NodeRef], closest_nodes: &[NodeRef],
) -> Vec<NodeRef> { ) -> Vec<NodeRef> {
self.inner self.inner
.read() .read()
.sort_and_clean_closest_noderefs(node_id, closest_nodes) .sort_and_clean_closest_noderefs(hash_coordinate, closest_nodes)
} }
#[instrument(level = "trace", skip(self, peer_info_list))] #[instrument(level = "trace", skip(self, peer_info_list))]
@ -1076,6 +1067,7 @@ impl RoutingTable {
} }
#[instrument(level = "trace", skip(self, filter, metric), ret)] #[instrument(level = "trace", skip(self, filter, metric), ret)]
#[expect(dead_code)]
pub fn find_fastest_node( pub fn find_fastest_node(
&self, &self,
cur_ts: Timestamp, cur_ts: Timestamp,
@ -1099,6 +1091,7 @@ impl RoutingTable {
} }
#[instrument(level = "trace", skip(self, filter, metric), ret)] #[instrument(level = "trace", skip(self, filter, metric), ret)]
#[expect(dead_code)]
pub fn get_node_speed_percentile( pub fn get_node_speed_percentile(
&self, &self,
node_id: NodeId, node_id: NodeId,

View file

@ -39,7 +39,6 @@ impl<'a, N: NodeRefAccessorsTrait + NodeRefOperateTrait + fmt::Debug + fmt::Disp
} }
} }
#[expect(dead_code)]
pub fn unlocked(&self) -> N { pub fn unlocked(&self) -> N {
self.nr.clone() self.nr.clone()
} }

View file

@ -50,9 +50,15 @@ pub(crate) trait NodeRefCommonTrait: NodeRefAccessorsTrait + NodeRefOperateTrait
fn node_ids(&self) -> NodeIdGroup { fn node_ids(&self) -> NodeIdGroup {
self.operate(|_rti, e| e.node_ids()) self.operate(|_rti, e| e.node_ids())
} }
fn public_keys(&self, routing_domain: RoutingDomain) -> PublicKeyGroup {
self.operate(|_rti, e| e.public_keys(routing_domain))
}
fn best_node_id(&self) -> Option<NodeId> { fn best_node_id(&self) -> Option<NodeId> {
self.operate(|_rti, e| e.best_node_id()) self.operate(|_rti, e| e.best_node_id())
} }
fn best_public_key(&self, routing_domain: RoutingDomain) -> Option<PublicKey> {
self.operate(|_rti, e| e.best_public_key(routing_domain))
}
fn relay_ids(&self, routing_domain: RoutingDomain) -> Vec<NodeIdGroup> { fn relay_ids(&self, routing_domain: RoutingDomain) -> Vec<NodeIdGroup> {
self.operate(|_rti, e| e.relay_ids(routing_domain)) self.operate(|_rti, e| e.relay_ids(routing_domain))
@ -254,7 +260,7 @@ pub(crate) trait NodeRefCommonTrait: NodeRefAccessorsTrait + NodeRefOperateTrait
return false; return false;
}; };
let our_node_ids = rti.routing_table().node_ids(); let our_node_ids = rti.routing_table().node_ids();
our_node_ids.contains_any(relay_ids.as_slice()) our_node_ids.contains_any_from_slice(relay_ids.as_slice())
}) })
} }
@ -284,11 +290,11 @@ pub(crate) trait NodeRefCommonTrait: NodeRefAccessorsTrait + NodeRefOperateTrait
ts: Timestamp, ts: Timestamp,
bytes: ByteCount, bytes: ByteCount,
expects_answer: bool, expects_answer: bool,
ordered: bool, ordering: SequenceOrdering,
) { ) {
self.operate_mut(|rti, e| { self.operate_mut(|rti, e| {
rti.transfer_stats_accounting().add_up(bytes); rti.transfer_stats_accounting().add_up(bytes);
e.question_sent(ts, bytes, expects_answer, ordered); e.question_sent(ts, bytes, expects_answer, ordering);
}) })
} }
fn stats_question_rcvd(&self, ts: Timestamp, bytes: ByteCount) { fn stats_question_rcvd(&self, ts: Timestamp, bytes: ByteCount) {
@ -308,18 +314,18 @@ pub(crate) trait NodeRefCommonTrait: NodeRefAccessorsTrait + NodeRefOperateTrait
send_ts: Timestamp, send_ts: Timestamp,
recv_ts: Timestamp, recv_ts: Timestamp,
bytes: ByteCount, bytes: ByteCount,
ordered: bool, ordering: SequenceOrdering,
) { ) {
self.operate_mut(|rti, e| { self.operate_mut(|rti, e| {
rti.transfer_stats_accounting().add_down(bytes); rti.transfer_stats_accounting().add_down(bytes);
rti.latency_stats_accounting() rti.latency_stats_accounting()
.record_latency(recv_ts.saturating_sub(send_ts)); .record_latency(recv_ts.saturating_sub(send_ts));
e.answer_rcvd(send_ts, recv_ts, bytes, ordered); e.answer_rcvd(send_ts, recv_ts, bytes, ordering);
}) })
} }
fn stats_lost_answer(&self, ordered: bool) { fn stats_lost_answer(&self, ordering: SequenceOrdering) {
self.operate_mut(|_rti, e| { self.operate_mut(|_rti, e| {
e.lost_answer(ordered); e.lost_answer(ordering);
}) })
} }
fn stats_failed_to_send(&self, ts: Timestamp, expects_answer: bool) { fn stats_failed_to_send(&self, ts: Timestamp, expects_answer: bool) {

View file

@ -8,7 +8,7 @@ impl_veilid_log_facility!("rtab");
#[derive(Clone)] #[derive(Clone)]
pub(crate) struct RouteHopData { pub(crate) struct RouteHopData {
/// The nonce used in the encryption ENC(Xn,DH(PKn,SKapr)) /// The nonce used in the encryption ENC(Xn,DH(PKn,SKapr))
pub nonce: BareNonce, pub nonce: Nonce,
/// The encrypted blob /// The encrypted blob
pub blob: Vec<u8>, pub blob: Vec<u8>,
} }
@ -62,7 +62,7 @@ impl RouteNode {
RouteNode::NodeId(id) => { RouteNode::NodeId(id) => {
format!("{}", id) format!("{}", id)
} }
RouteNode::PeerInfo(pi) => match pi.node_ids().best() { RouteNode::PeerInfo(pi) => match pi.node_ids().first() {
Some(id) => format!("{}", id), Some(id) => format!("{}", id),
None => { None => {
format!("?({})", pi.node_ids()) format!("?({})", pi.node_ids())

View file

@ -272,7 +272,7 @@ impl RouteSpecStore {
} }
// Exclude nodes we have specifically chosen to avoid // Exclude nodes we have specifically chosen to avoid
if e.node_ids().contains_any(avoid_nodes) { if e.node_ids().contains_any_from_slice(avoid_nodes) {
return false; return false;
} }
@ -375,11 +375,11 @@ impl RouteSpecStore {
// Relay check // Relay check
for their_relay_info in their_ni.relay_info_list() { for their_relay_info in their_ni.relay_info_list() {
// Exclude nodes whose relays we have chosen to avoid // Exclude nodes whose relays we have chosen to avoid
if their_relay_info.node_ids().contains_any(avoid_nodes) { if their_relay_info.node_ids().contains_any_from_slice(avoid_nodes) {
return false; return false;
} }
// Exclude nodes whose relay is our own relay if we have one // Exclude nodes whose relay is our own relay if we have one
if their_relay_info.node_ids().contains_any(&own_relay_ids) { if their_relay_info.node_ids().contains_any_from_slice(&own_relay_ids) {
return false; return false;
} }
} }
@ -535,7 +535,7 @@ impl RouteSpecStore {
} }
// Ensure this route is viable by checking that each node can contact the next one // Ensure this route is viable by checking that each node can contact the next one
let mut can_do_sequenced = true; let mut orderings = SequenceOrderingSet::all();
if directions.contains(Direction::Outbound) { if directions.contains(Direction::Outbound) {
let mut previous_node = published_peer_info.clone(); let mut previous_node = published_peer_info.clone();
let mut reachable = true; let mut reachable = true;
@ -554,18 +554,18 @@ impl RouteSpecStore {
break; break;
} }
// Check if we can do sequenced specifically // Check if we can do each ordering strictly
if can_do_sequenced { for ordering in orderings {
let cm = rti.get_contact_method( let cm = rti.get_contact_method(
RoutingDomain::PublicInternet, RoutingDomain::PublicInternet,
previous_node.clone(), previous_node.clone(),
current_node.clone(), current_node.clone(),
DialInfoFilter::all(), DialInfoFilter::all(),
Sequencing::EnsureOrdered, ordering.strict_sequencing(),
None, None,
); );
if matches!(cm, ContactMethod::Unreachable) { if matches!(cm, ContactMethod::Unreachable) {
can_do_sequenced = false; orderings.remove(ordering);
} }
} }
@ -593,18 +593,18 @@ impl RouteSpecStore {
break; break;
} }
// Check if we can do sequenced specifically // Check if we can do each ordering strictly
if can_do_sequenced { for ordering in orderings {
let cm = rti.get_contact_method( let cm = rti.get_contact_method(
RoutingDomain::PublicInternet, RoutingDomain::PublicInternet,
next_node.clone(), next_node.clone(),
current_node.clone(), current_node.clone(),
DialInfoFilter::all(), DialInfoFilter::all(),
Sequencing::EnsureOrdered, ordering.strict_sequencing(),
None, None,
); );
if matches!(cm, ContactMethod::Unreachable) { if matches!(cm, ContactMethod::Unreachable) {
can_do_sequenced = false; orderings.remove(ordering);
} }
} }
next_node = current_node; next_node = current_node;
@ -615,19 +615,19 @@ impl RouteSpecStore {
} }
// Keep this route // Keep this route
let route_nodes = permutation.to_vec(); let route_nodes = permutation.to_vec();
Some((route_nodes, can_do_sequenced)) Some((route_nodes, orderings))
}) as PermFunc; }) as PermFunc;
let mut route_nodes: Vec<usize> = Vec::new(); let mut route_nodes: Vec<usize> = Vec::new();
let mut can_do_sequenced: bool = true; let mut orderings = SequenceOrderingSet::new();
for start in 0..(nodes.len() - safety_spec.hop_count) { for start in 0..(nodes.len() - safety_spec.hop_count) {
// Try the permutations available starting with 'start' // Try the permutations available starting with 'start'
if let Some((rn, cds)) = if let Some((rn, ord)) =
with_route_permutations(safety_spec.hop_count, start, &mut perm_func) with_route_permutations(safety_spec.hop_count, start, &mut perm_func)
{ {
route_nodes = rn; route_nodes = rn;
can_do_sequenced = cds; orderings = ord;
break; break;
} }
} }
@ -639,7 +639,7 @@ impl RouteSpecStore {
// Got a unique route, lets build the details, register it, and return it // Got a unique route, lets build the details, register it, and return it
let hop_node_refs: Vec<NodeRef> = route_nodes.iter().map(|k| nodes[*k].clone()).collect(); let hop_node_refs: Vec<NodeRef> = route_nodes.iter().map(|k| nodes[*k].clone()).collect();
let mut route_set = BTreeMap::<BarePublicKey, RouteSpecDetail>::new(); let mut route_set = BTreeMap::<PublicKey, RouteSpecDetail>::new();
let crypto = self.crypto(); let crypto = self.crypto();
for crypto_kind in crypto_kinds.iter().copied() { for crypto_kind in crypto_kinds.iter().copied() {
let vcrypto = crypto.get(crypto_kind).unwrap(); let vcrypto = crypto.get(crypto_kind).unwrap();
@ -652,7 +652,6 @@ impl RouteSpecStore {
route_set.insert( route_set.insert(
keypair.key(), keypair.key(),
RouteSpecDetail { RouteSpecDetail {
crypto_kind,
secret_key: keypair.secret(), secret_key: keypair.secret(),
hops, hops,
}, },
@ -665,7 +664,7 @@ impl RouteSpecStore {
hop_node_refs, hop_node_refs,
directions, directions,
safety_spec.stability, safety_spec.stability,
can_do_sequenced, orderings,
automatic, automatic,
); );
@ -697,12 +696,8 @@ impl RouteSpecStore {
{ {
let inner = &*self.inner.lock(); let inner = &*self.inner.lock();
let crypto = self.crypto(); let crypto = self.crypto();
let Some(vcrypto) = crypto.get(public_key.kind()) else {
veilid_log!(self debug "can't handle route with public key: {:?}", public_key);
return None;
};
let Some(rsid) = inner.content.get_id_by_key(public_key.ref_value()) else { let Some(rsid) = inner.content.get_id_by_key(public_key) else {
veilid_log!(self debug target: "network_result", "route id does not exist: {:?}", public_key.ref_value()); veilid_log!(self debug target: "network_result", "route id does not exist: {:?}", public_key.ref_value());
return None; return None;
}; };
@ -710,7 +705,7 @@ impl RouteSpecStore {
veilid_log!(self debug "route detail does not exist: {:?}", rsid); veilid_log!(self debug "route detail does not exist: {:?}", rsid);
return None; return None;
}; };
let Some(rsd) = rssd.get_route_by_key(public_key.ref_value()) else { let Some(rsd) = rssd.get_route_by_key(public_key) else {
veilid_log!(self debug "route set {:?} does not have key: {:?}", rsid, public_key.ref_value()); veilid_log!(self debug "route set {:?} does not have key: {:?}", rsid, public_key.ref_value());
return None; return None;
}; };
@ -723,28 +718,35 @@ impl RouteSpecStore {
} }
// Validate signatures to ensure the route was handled by the nodes and not messed with // Validate signatures to ensure the route was handled by the nodes and not messed with
// This is in private route (reverse) order as we are receiving over the route // This is in private route (reverse) order as we are receiving over the route
for (hop_n, hop_node_id) in rsd.hops.iter().rev().enumerate() { for (hop_n, hop_node_ref) in rssd.hop_node_refs().iter().rev().enumerate() {
// The last hop is not signed, as the whole packet is signed // The last hop is not signed, as the whole packet is signed
if hop_n == signatures.len() { if hop_n == signatures.len() {
// Verify the node we received the routed operation from is the last hop in our route // Verify the node we received the routed operation from is the last hop in our route
if hop_node_id != last_hop_id { if !hop_node_ref.node_ids().contains(last_hop_id) {
veilid_log!(self debug "received routed operation from the wrong hop ({} should be {}) on private route {}", hop_node_id, last_hop_id, public_key); veilid_log!(self debug "received routed operation from the wrong hop ({} should be {}) on private route {}", hop_node_ref, last_hop_id, public_key);
return None; return None;
} }
} else { } else {
let Some(hop_public_key) = hop_node_ref
.public_keys(RoutingDomain::PublicInternet)
.get(signatures[hop_n].kind())
else {
veilid_log!(self debug "no hop public key matching signature kind {} at hop {} for routed operation on private route {}", signatures[hop_n].kind(), hop_n, public_key);
return None;
};
// Verify a signature for a hop node along the route // Verify a signature for a hop node along the route
match vcrypto.verify( let Some(vcrypto) = crypto.get(hop_public_key.kind()) else {
&hop_node_id.ref_value().clone().into(), veilid_log!(self debug "can't handle route hop with public key: {:?}", hop_public_key.kind());
data, return None;
signatures[hop_n].ref_value(), };
) { match vcrypto.verify(&hop_public_key, data, &signatures[hop_n]) {
Ok(true) => {} Ok(true) => {}
Ok(false) => { Ok(false) => {
veilid_log!(self debug "invalid signature for hop {} at {} on private route {}", hop_n, hop_node_id, public_key); veilid_log!(self debug "invalid signature for hop {} at {} on private route {}", hop_n, hop_node_ref, public_key);
return None; return None;
} }
Err(e) => { Err(e) => {
veilid_log!(self debug "error verifying signature for hop {} at {} on private route {}: {}", hop_n, hop_node_id, public_key, e); veilid_log!(self debug "error verifying signature for hop {} at {} on private route {}: {}", hop_n, hop_node_ref, public_key, e);
return None; return None;
} }
} }
@ -773,7 +775,7 @@ impl RouteSpecStore {
}; };
// Get the hops so we can match the route's hop length for safety // Get the hops so we can match the route's hop length for safety
// route length as well as marking nodes as unreliable if this fails // route length as well as marking nodes as unreliable if this fails
let hops = rssd.hops_node_refs(); let hops = rssd.hop_node_refs();
(key, hops) (key, hops)
}; };
@ -952,7 +954,10 @@ impl RouteSpecStore {
&& rssd.hop_count() >= min_hop_count && rssd.hop_count() >= min_hop_count
&& rssd.hop_count() <= max_hop_count && rssd.hop_count() <= max_hop_count
&& rssd.get_directions().is_superset(directions) && rssd.get_directions().is_superset(directions)
&& rssd.get_route_set_keys().kinds().contains(&crypto_kind) && rssd
.get_route_set_keys()
.iter()
.any(|x| x.kind() == crypto_kind)
&& !rssd.is_published() && !rssd.is_published()
&& !rssd.contains_nodes(avoid_nodes) && !rssd.contains_nodes(avoid_nodes)
{ {
@ -1072,7 +1077,7 @@ impl RouteSpecStore {
let Some(vcrypto) = crypto.get(crypto_kind) else { let Some(vcrypto) = crypto.get(crypto_kind) else {
apibail_generic!("crypto not supported for route"); apibail_generic!("crypto not supported for route");
}; };
let pr_pubkey = private_route.public_key.value(); let pr_pubkey = private_route.public_key.clone();
// See if we are using a safety route, if not, short circuit this operation // See if we are using a safety route, if not, short circuit this operation
let safety_spec = match safety_selection { let safety_spec = match safety_selection {
@ -1114,7 +1119,7 @@ impl RouteSpecStore {
routing_table.public_key(crypto_kind), routing_table.public_key(crypto_kind),
private_route, private_route,
), ),
secret: routing_table.secret_key(crypto_kind).value(), secret: routing_table.secret_key(crypto_kind),
first_hop, first_hop,
}); });
} }
@ -1209,12 +1214,42 @@ impl RouteSpecStore {
// Each loop mutates 'nonce', and 'blob_data' // Each loop mutates 'nonce', and 'blob_data'
let mut nonce = vcrypto.random_nonce(); let mut nonce = vcrypto.random_nonce();
// Forward order (safety route), but inside-out // Forward order (safety route), but inside-out
for h in (1..safety_rsd.hops.len()).rev() { for h in (1..safety_rssd.hop_node_refs().len()).rev() {
let hop_node_ref = safety_rssd.hop_node_ref(h).unwrap();
let Some(hop_node_id) = hop_node_ref.locked(rti).node_ids().get(crypto_kind) else {
apibail_invalid_argument!(
"no hop node id for route hop",
"crypto_kind",
crypto_kind
);
};
let Some(hop_public_key) = hop_node_ref
.locked(rti)
.public_keys(RoutingDomain::PublicInternet)
.get(crypto_kind)
else {
apibail_invalid_argument!(
"no hop public key for route hop",
"crypto_kind",
crypto_kind
);
};
let Some(hop_peer_info) = hop_node_ref
.locked(rti)
.get_peer_info(RoutingDomain::PublicInternet)
else {
apibail_invalid_argument!(
"no hop peer info for route hop",
"crypto_kind",
crypto_kind
);
};
// Get blob to encrypt for next hop // Get blob to encrypt for next hop
blob_data = { blob_data = {
// Encrypt the previous blob ENC(nonce, DH(PKhop,SKsr)) // Encrypt the previous blob ENC(nonce, DH(PKhop,SKsr))
let dh_secret = vcrypto let dh_secret = vcrypto
.cached_dh(&safety_rsd.hops[h].value().into(), &safety_rsd.secret_key) .cached_dh(&hop_public_key, &safety_rsd.secret_key)
.map_err(VeilidAPIError::internal)?; .map_err(VeilidAPIError::internal)?;
let enc_msg_data = vcrypto let enc_msg_data = vcrypto
.encrypt_aead(blob_data.as_slice(), &nonce, &dh_secret, None) .encrypt_aead(blob_data.as_slice(), &nonce, &dh_secret, None)
@ -1230,21 +1265,10 @@ impl RouteSpecStore {
let route_hop = RouteHop { let route_hop = RouteHop {
node: if optimize { node: if optimize {
// Optimized, no peer info, just the dht key // Optimized, no peer info, just the dht key
RouteNode::NodeId(safety_rsd.hops[h].clone()) RouteNode::NodeId(hop_node_id)
} else { } else {
// Full peer info, required until we are sure the route has been fully established // Full peer info, required until we are sure the route has been fully established
let node_id = safety_rsd.hops[h].clone(); RouteNode::PeerInfo(hop_peer_info)
let pi = rti
.with_node_entry(node_id, |entry| {
entry.with(rti, |_rti, e| {
e.get_peer_info(RoutingDomain::PublicInternet)
})
})
.flatten();
if pi.is_none() {
apibail_internal!("peer info should exist for route but doesn't");
}
RouteNode::PeerInfo(pi.unwrap())
}, },
next_hop: Some(route_hop_data), next_hop: Some(route_hop_data),
}; };
@ -1265,8 +1289,21 @@ impl RouteSpecStore {
} }
// Encode first RouteHopData // Encode first RouteHopData
let hop_node_ref = safety_rssd.hop_node_ref(0).unwrap();
let Some(hop_public_key) = hop_node_ref
.locked(rti)
.public_keys(RoutingDomain::PublicInternet)
.get(crypto_kind)
else {
apibail_invalid_argument!(
"no hop public key for route hop",
"crypto_kind",
crypto_kind
);
};
let dh_secret = vcrypto let dh_secret = vcrypto
.cached_dh(&safety_rsd.hops[0].value().into(), &safety_rsd.secret_key) .cached_dh(&hop_public_key, &safety_rsd.secret_key)
.map_err(VeilidAPIError::internal)?; .map_err(VeilidAPIError::internal)?;
let enc_msg_data = vcrypto let enc_msg_data = vcrypto
.encrypt_aead(blob_data.as_slice(), &nonce, &dh_secret, None) .encrypt_aead(blob_data.as_slice(), &nonce, &dh_secret, None)
@ -1282,7 +1319,7 @@ impl RouteSpecStore {
// Build safety route // Build safety route
let safety_route = SafetyRoute { let safety_route = SafetyRoute {
public_key: PublicKey::new(crypto_kind, sr_pubkey.clone()), public_key: sr_pubkey,
hops, hops,
}; };
@ -1315,7 +1352,7 @@ impl RouteSpecStore {
safety_spec: &SafetySpec, safety_spec: &SafetySpec,
direction: DirectionSet, direction: DirectionSet,
avoid_nodes: &[NodeId], avoid_nodes: &[NodeId],
) -> VeilidAPIResult<BarePublicKey> { ) -> VeilidAPIResult<PublicKey> {
// Ensure the total hop count isn't too long for our config // Ensure the total hop count isn't too long for our config
let max_route_hop_count = self.max_route_hop_count; let max_route_hop_count = self.max_route_hop_count;
if safety_spec.hop_count == 0 { if safety_spec.hop_count == 0 {
@ -1340,7 +1377,7 @@ impl RouteSpecStore {
if let Some(preferred_key) = preferred_rssd.get_route_set_keys().get(crypto_kind) { if let Some(preferred_key) = preferred_rssd.get_route_set_keys().get(crypto_kind) {
// Only use the preferred route if it doesn't contain the avoid nodes // Only use the preferred route if it doesn't contain the avoid nodes
if !preferred_rssd.contains_nodes(avoid_nodes) { if !preferred_rssd.contains_nodes(avoid_nodes) {
return Ok(preferred_key.value()); return Ok(preferred_key);
} }
} }
} }
@ -1378,8 +1415,7 @@ impl RouteSpecStore {
.unwrap() .unwrap()
.get_route_set_keys() .get_route_set_keys()
.get(crypto_kind) .get(crypto_kind)
.unwrap() .unwrap();
.value();
Ok(sr_pubkey) Ok(sr_pubkey)
} }
@ -1391,7 +1427,7 @@ impl RouteSpecStore {
crypto_kind: CryptoKind, crypto_kind: CryptoKind,
safety_spec: &SafetySpec, safety_spec: &SafetySpec,
avoid_nodes: &[NodeId], avoid_nodes: &[NodeId],
) -> VeilidAPIResult<BarePublicKey> { ) -> VeilidAPIResult<PublicKey> {
let inner = &mut *self.inner.lock(); let inner = &mut *self.inner.lock();
let routing_table = self.routing_table(); let routing_table = self.routing_table();
let rti = &mut *routing_table.inner.write(); let rti = &mut *routing_table.inner.write();
@ -1408,8 +1444,9 @@ impl RouteSpecStore {
fn assemble_private_route_inner( fn assemble_private_route_inner(
&self, &self,
key: &BarePublicKey, key: &PublicKey,
rsd: &RouteSpecDetail, rsd: &RouteSpecDetail,
rssd: &RouteSetSpecDetail,
optimized: bool, optimized: bool,
) -> VeilidAPIResult<PrivateRoute> { ) -> VeilidAPIResult<PrivateRoute> {
let routing_table = self.routing_table(); let routing_table = self.routing_table();
@ -1417,12 +1454,9 @@ impl RouteSpecStore {
// Ensure we get the crypto for it // Ensure we get the crypto for it
let crypto = routing_table.network_manager().crypto(); let crypto = routing_table.network_manager().crypto();
let Some(vcrypto) = crypto.get(rsd.crypto_kind) else { let crypto_kind = key.kind();
apibail_invalid_argument!( let Some(vcrypto) = crypto.get(crypto_kind) else {
"crypto not supported for route", apibail_invalid_argument!("crypto not supported for route", "crypto_kind", crypto_kind);
"rsd.crypto_kind",
rsd.crypto_kind
);
}; };
// Ensure our network class is valid before attempting to assemble any routes // Ensure our network class is valid before attempting to assemble any routes
@ -1434,11 +1468,11 @@ impl RouteSpecStore {
// Make innermost route hop to our own node // Make innermost route hop to our own node
let mut route_hop = RouteHop { let mut route_hop = RouteHop {
node: if optimized { node: if optimized {
let Some(node_id) = routing_table.node_ids().get(rsd.crypto_kind) else { let Some(node_id) = routing_table.node_ids().get(crypto_kind) else {
apibail_invalid_argument!( apibail_invalid_argument!(
"missing node id for crypto kind", "missing node id for crypto kind",
"rsd.crypto_kind", "crypto_kind",
rsd.crypto_kind crypto_kind
); );
}; };
RouteNode::NodeId(node_id) RouteNode::NodeId(node_id)
@ -1448,10 +1482,37 @@ impl RouteSpecStore {
next_hop: None, next_hop: None,
}; };
// Loop for each hop // Iterate hops in private route order (reverse, but inside out)
let hop_count = rsd.hops.len(); for hop_node_ref in rssd.hop_node_refs() {
// iterate hops in private route order (reverse, but inside out) let hop_node_ref = hop_node_ref.locked(rti);
for h in 0..hop_count {
let Some(hop_node_id) = hop_node_ref.node_ids().get(crypto_kind) else {
apibail_invalid_argument!(
"no hop node id for route hop",
"crypto_kind",
crypto_kind
);
};
let Some(hop_public_key) = hop_node_ref
.public_keys(RoutingDomain::PublicInternet)
.get(crypto_kind)
else {
apibail_invalid_argument!(
"no hop public key for route hop",
"crypto_kind",
crypto_kind
);
};
let Some(hop_peer_info) = hop_node_ref.get_peer_info(RoutingDomain::PublicInternet)
else {
apibail_invalid_argument!(
"no hop peer info for route hop",
"crypto_kind",
crypto_kind
);
};
// Encrypt the previous blob ENC(nonce, DH(PKhop,SKpr))
let nonce = vcrypto.random_nonce(); let nonce = vcrypto.random_nonce();
let blob_data = { let blob_data = {
@ -1461,8 +1522,7 @@ impl RouteSpecStore {
message_builder_to_vec(rh_message)? message_builder_to_vec(rh_message)?
}; };
// Encrypt the previous blob ENC(nonce, DH(PKhop,SKpr)) let dh_secret = vcrypto.cached_dh(&hop_public_key, &rsd.secret_key)?;
let dh_secret = vcrypto.cached_dh(&rsd.hops[h].value().into(), &rsd.secret_key)?;
let enc_msg_data = let enc_msg_data =
vcrypto.encrypt_aead(blob_data.as_slice(), &nonce, &dh_secret, None)?; vcrypto.encrypt_aead(blob_data.as_slice(), &nonce, &dh_secret, None)?;
let route_hop_data = RouteHopData { let route_hop_data = RouteHopData {
@ -1473,28 +1533,17 @@ impl RouteSpecStore {
route_hop = RouteHop { route_hop = RouteHop {
node: if optimized { node: if optimized {
// Optimized, no peer info, just the dht key // Optimized, no peer info, just the dht key
RouteNode::NodeId(rsd.hops[h].clone()) RouteNode::NodeId(hop_node_id)
} else { } else {
// Full peer info, required until we are sure the route has been fully established // Full peer info, required until we are sure the route has been fully established
let node_id = rsd.hops[h].clone(); RouteNode::PeerInfo(hop_peer_info)
let pi = rti
.with_node_entry(node_id, |entry| {
entry.with(rti, |_rti, e| {
e.get_peer_info(RoutingDomain::PublicInternet)
})
})
.flatten();
if pi.is_none() {
apibail_internal!("peer info should exist for route but doesn't");
}
RouteNode::PeerInfo(pi.unwrap())
}, },
next_hop: Some(route_hop_data), next_hop: Some(route_hop_data),
} }
} }
let private_route = PrivateRoute { let private_route = PrivateRoute {
public_key: PublicKey::new(rsd.crypto_kind, key.clone()), public_key: key.clone(),
hops: PrivateRouteHops::FirstHop(Box::new(route_hop)), hops: PrivateRouteHops::FirstHop(Box::new(route_hop)),
}; };
Ok(private_route) Ok(private_route)
@ -1505,7 +1554,7 @@ impl RouteSpecStore {
#[instrument(level = "trace", target = "route", skip_all)] #[instrument(level = "trace", target = "route", skip_all)]
pub fn assemble_private_route( pub fn assemble_private_route(
&self, &self,
key: &BarePublicKey, key: &PublicKey,
optimized: Option<bool>, optimized: Option<bool>,
) -> VeilidAPIResult<PrivateRoute> { ) -> VeilidAPIResult<PrivateRoute> {
let inner: &RouteSpecStoreInner = &self.inner.lock(); let inner: &RouteSpecStoreInner = &self.inner.lock();
@ -1525,7 +1574,7 @@ impl RouteSpecStore {
.get_route_by_key(key) .get_route_by_key(key)
.expect("route key index is broken"); .expect("route key index is broken");
self.assemble_private_route_inner(key, rsd, optimized) self.assemble_private_route_inner(key, rsd, rssd, optimized)
} }
/// Assemble private route set for publication /// Assemble private route set for publication
@ -1547,7 +1596,7 @@ impl RouteSpecStore {
let mut out = Vec::new(); let mut out = Vec::new();
for (key, rsd) in rssd.iter_route_set() { for (key, rsd) in rssd.iter_route_set() {
out.push(self.assemble_private_route_inner(key, rsd, optimized)?); out.push(self.assemble_private_route_inner(key, rsd, rssd, optimized)?);
} }
Ok(out) Ok(out)
} }
@ -1631,7 +1680,7 @@ impl RouteSpecStore {
} }
/// Get a route id for a route's public key /// Get a route id for a route's public key
pub fn get_route_id_for_key(&self, key: &BarePublicKey) -> Option<RouteId> { pub fn get_route_id_for_key(&self, key: &PublicKey) -> Option<RouteId> {
let inner = &mut *self.inner.lock(); let inner = &mut *self.inner.lock();
// Check for local route // Check for local route
if let Some(id) = inner.content.get_id_by_key(key) { if let Some(id) = inner.content.get_id_by_key(key) {
@ -1650,7 +1699,7 @@ impl RouteSpecStore {
/// This happens when you communicate with a private route without a safety route /// This happens when you communicate with a private route without a safety route
pub fn has_remote_private_route_seen_our_node_info( pub fn has_remote_private_route_seen_our_node_info(
&self, &self,
key: &BarePublicKey, key: &PublicKey,
published_peer_info: &PeerInfo, published_peer_info: &PeerInfo,
) -> bool { ) -> bool {
let inner = &*self.inner.lock(); let inner = &*self.inner.lock();
@ -1680,7 +1729,7 @@ impl RouteSpecStore {
/// was that node that had the private route. /// was that node that had the private route.
pub fn mark_remote_private_route_seen_our_node_info( pub fn mark_remote_private_route_seen_our_node_info(
&self, &self,
key: &BarePublicKey, key: &PublicKey,
cur_ts: Timestamp, cur_ts: Timestamp,
) -> VeilidAPIResult<()> { ) -> VeilidAPIResult<()> {
let Some(our_node_info_ts) = self let Some(our_node_info_ts) = self
@ -1711,22 +1760,14 @@ impl RouteSpecStore {
} }
/// Get the route statistics for any route we know about, local or remote /// Get the route statistics for any route we know about, local or remote
pub fn with_route_stats_mut<F, R>( pub fn with_route_stats_mut<F, R>(&self, cur_ts: Timestamp, key: &PublicKey, f: F) -> Option<R>
&self,
cur_ts: Timestamp,
key: &BarePublicKey,
f: F,
) -> Option<R>
where where
F: FnOnce(&mut RouteStats) -> R, F: FnOnce(&mut RouteStats) -> R,
{ {
let inner = &mut *self.inner.lock(); let inner = &mut *self.inner.lock();
// Check for stub route // Check for stub route
if self if self.routing_table().public_keys().contains(key) {
.routing_table()
.matches_own_node_id_key(&key.clone().into())
{
return None; return None;
} }
@ -1893,7 +1934,7 @@ impl RouteSpecStore {
Ok(RouteId::new( Ok(RouteId::new(
vcrypto.kind(), vcrypto.kind(),
BareRouteId::new(vcrypto.generate_hash(&pkbytes).bytes()), BareRouteId::new(vcrypto.generate_hash(&pkbytes).ref_value()),
)) ))
} }
@ -1927,7 +1968,7 @@ impl RouteSpecStore {
Ok(RouteId::new( Ok(RouteId::new(
vcrypto.kind(), vcrypto.kind(),
BareRouteId::new(vcrypto.generate_hash(&pkbytes).bytes()), BareRouteId::new(vcrypto.generate_hash(&pkbytes).ref_value()),
)) ))
} }
} }

View file

@ -15,7 +15,7 @@ fn _get_route_permutation_count(hop_count: usize) -> usize {
// hop_count = 4 -> 3! -> 6 // hop_count = 4 -> 3! -> 6
(3..hop_count).fold(2usize, |acc, x| acc * x) (3..hop_count).fold(2usize, |acc, x| acc * x)
} }
pub type PermReturnType = (Vec<usize>, bool); pub type PermReturnType = (Vec<usize>, SequenceOrderingSet);
pub type PermFunc<'t> = Box<dyn FnMut(&[usize]) -> Option<PermReturnType> + Send + 't>; pub type PermFunc<'t> = Box<dyn FnMut(&[usize]) -> Option<PermReturnType> + Send + 't>;
/// get the route permutation at particular 'perm' index, starting at the 'start' index /// get the route permutation at particular 'perm' index, starting at the 'start' index

View file

@ -2,18 +2,16 @@ use super::*;
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
pub struct RouteSpecDetail { pub struct RouteSpecDetail {
/// Crypto kind
pub crypto_kind: CryptoKind,
/// Secret key /// Secret key
pub secret_key: BareSecretKey, pub secret_key: SecretKey,
/// Route hops (node id keys) /// Route hop node ids
pub hops: Vec<NodeId>, pub hops: Vec<NodeId>,
} }
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
pub struct RouteSetSpecDetail { pub struct RouteSetSpecDetail {
/// Route set per crypto kind /// Routes in the set
route_set: BTreeMap<BarePublicKey, RouteSpecDetail>, route_set: BTreeMap<PublicKey, RouteSpecDetail>,
/// Route noderefs /// Route noderefs
#[serde(skip)] #[serde(skip)]
hop_node_refs: Vec<NodeRef>, hop_node_refs: Vec<NodeRef>,
@ -26,7 +24,7 @@ pub struct RouteSetSpecDetail {
/// Stability preference (prefer reliable nodes over faster) /// Stability preference (prefer reliable nodes over faster)
stability: Stability, stability: Stability,
/// Sequencing capability (connection oriented protocols vs datagram) /// Sequencing capability (connection oriented protocols vs datagram)
can_do_sequenced: bool, orderings: SequenceOrderingSet,
/// Stats /// Stats
stats: RouteStats, stats: RouteStats,
/// Automatically allocated route vs manually allocated route /// Automatically allocated route vs manually allocated route
@ -36,11 +34,11 @@ pub struct RouteSetSpecDetail {
impl RouteSetSpecDetail { impl RouteSetSpecDetail {
pub fn new( pub fn new(
cur_ts: Timestamp, cur_ts: Timestamp,
route_set: BTreeMap<BarePublicKey, RouteSpecDetail>, route_set: BTreeMap<PublicKey, RouteSpecDetail>,
hop_node_refs: Vec<NodeRef>, hop_node_refs: Vec<NodeRef>,
directions: DirectionSet, directions: DirectionSet,
stability: Stability, stability: Stability,
can_do_sequenced: bool, orderings: SequenceOrderingSet,
automatic: bool, automatic: bool,
) -> Self { ) -> Self {
Self { Self {
@ -49,32 +47,50 @@ impl RouteSetSpecDetail {
published: false, published: false,
directions, directions,
stability, stability,
can_do_sequenced, orderings,
stats: RouteStats::new(cur_ts), stats: RouteStats::new(cur_ts),
automatic, automatic,
} }
} }
pub fn get_route_by_key(&self, key: &BarePublicKey) -> Option<&RouteSpecDetail> { #[expect(dead_code)]
pub fn len(&self) -> usize {
self.route_set.len()
}
#[expect(dead_code)]
pub fn is_empty(&self) -> bool {
self.route_set.is_empty()
}
pub fn get_route_by_key(&self, key: &PublicKey) -> Option<&RouteSpecDetail> {
self.route_set.get(key) self.route_set.get(key)
} }
pub fn get_route_set_keys(&self) -> PublicKeyGroup { pub fn get_route_set_keys(&self) -> PublicKeyGroup {
let mut tks = PublicKeyGroup::new(); let mut tks = PublicKeyGroup::new();
for (k, v) in &self.route_set { for k in self.route_set.keys() {
tks.add(PublicKey::new(v.crypto_kind, k.clone())); tks.add(k.clone());
} }
tks tks
} }
pub fn get_best_route_set_key(&self) -> Option<BarePublicKey> { pub fn get_best_route_set_key(&self) -> Option<PublicKey> {
self.get_route_set_keys().best().map(|k| k.value()) self.get_route_set_keys().first().cloned()
} }
pub fn set_hop_node_refs(&mut self, node_refs: Vec<NodeRef>) { pub fn set_hop_node_refs(&mut self, node_refs: Vec<NodeRef>) {
self.hop_node_refs = node_refs; self.hop_node_refs = node_refs;
} }
pub fn iter_route_set( pub fn iter_route_set(
&self, &self,
) -> alloc::collections::btree_map::Iter<'_, BarePublicKey, RouteSpecDetail> { ) -> alloc::collections::btree_map::Iter<'_, PublicKey, RouteSpecDetail> {
self.route_set.iter() self.route_set.iter()
} }
#[expect(dead_code)]
pub fn iter_route_set_mut(
&mut self,
) -> alloc::collections::btree_map::IterMut<'_, PublicKey, RouteSpecDetail> {
self.route_set.iter_mut()
}
#[expect(dead_code)]
pub fn remove_route(&mut self, key: &PublicKey) {
self.route_set.remove(key);
}
pub fn get_stats(&self) -> &RouteStats { pub fn get_stats(&self) -> &RouteStats {
&self.stats &self.stats
} }
@ -90,7 +106,7 @@ impl RouteSetSpecDetail {
pub fn hop_count(&self) -> usize { pub fn hop_count(&self) -> usize {
self.hop_node_refs.len() self.hop_node_refs.len()
} }
pub fn hops_node_refs(&self) -> Vec<NodeRef> { pub fn hop_node_refs(&self) -> Vec<NodeRef> {
self.hop_node_refs.clone() self.hop_node_refs.clone()
} }
pub fn hop_node_ref(&self, idx: usize) -> Option<NodeRef> { pub fn hop_node_ref(&self, idx: usize) -> Option<NodeRef> {
@ -103,16 +119,17 @@ impl RouteSetSpecDetail {
self.directions self.directions
} }
pub fn is_sequencing_match(&self, sequencing: Sequencing) -> bool { pub fn is_sequencing_match(&self, sequencing: Sequencing) -> bool {
match sequencing { for ordering in self.orderings.iter() {
Sequencing::NoPreference => true, if sequencing.matches_ordering(ordering) {
Sequencing::PreferOrdered => true, return true;
Sequencing::EnsureOrdered => self.can_do_sequenced, }
} }
false
} }
pub fn contains_nodes(&self, nodes: &[NodeId]) -> bool { pub fn contains_nodes(&self, nodes: &[NodeId]) -> bool {
for tk in nodes { for tk in nodes {
for rsd in self.route_set.values() { for rsd in self.route_set.values() {
if rsd.crypto_kind == tk.kind() && rsd.hops.contains(tk) { if rsd.hops.contains(tk) {
return true; return true;
} }
} }
@ -122,23 +139,15 @@ impl RouteSetSpecDetail {
pub fn is_automatic(&self) -> bool { pub fn is_automatic(&self) -> bool {
self.automatic self.automatic
} }
/// Generate a key for the cache that can be used to uniquely identify this route's contents /// Generate a key for the cache that can be used to uniquely identify this route's contents
pub fn make_cache_key(&self, rti: &RoutingTableInner) -> Option<Vec<u8>> { pub fn make_cache_key(&self, rti: &RoutingTableInner) -> Vec<u8> {
let hops = &self.hop_node_refs; let hops = &self.hop_node_refs;
let mut cache: Vec<u8> = Vec::with_capacity(hops.len() * 32); // xxx hack: this code is going away soon anyway
let mut cachelen = 0usize;
let mut nodebytes = Vec::<BareNodeId>::with_capacity(hops.len());
for hop in hops { for hop in hops {
let b = hop.locked(rti).best_node_id()?.value(); if let Some(b) = hop.locked(rti).best_node_id() {
cachelen += b.len(); cache.extend_from_slice(b.ref_value());
nodebytes.push(b); }
} }
let mut cache: Vec<u8> = Vec::with_capacity(cachelen); cache
for b in nodebytes {
cache.extend_from_slice(&b);
}
Some(cache)
} }
} }

View file

@ -4,8 +4,8 @@ impl_veilid_log_facility!("rtab");
// Compiled route key for caching // Compiled route key for caching
#[derive(Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)] #[derive(Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)]
struct CompiledRouteCacheKey { struct CompiledRouteCacheKey {
sr_pubkey: BarePublicKey, sr_pubkey: PublicKey,
pr_pubkey: BarePublicKey, pr_pubkey: PublicKey,
} }
/// Compiled route (safety route + private route) /// Compiled route (safety route + private route)
@ -14,7 +14,7 @@ pub struct CompiledRoute {
/// The safety route attached to the private route /// The safety route attached to the private route
pub safety_route: SafetyRoute, pub safety_route: SafetyRoute,
/// The secret used to encrypt the message payload /// The secret used to encrypt the message payload
pub secret: BareSecretKey, pub secret: SecretKey,
/// The node ref to the first hop in the compiled route /// The node ref to the first hop in the compiled route
/// filtered to the safetyselection it was compiled with /// filtered to the safetyselection it was compiled with
pub first_hop: FilteredNodeRef, pub first_hop: FilteredNodeRef,
@ -34,7 +34,7 @@ pub struct RouteSpecStoreCache {
/// Remote private routes we've imported and statistics /// Remote private routes we've imported and statistics
remote_private_route_set_cache: LruCache<RouteId, RemotePrivateRouteInfo>, remote_private_route_set_cache: LruCache<RouteId, RemotePrivateRouteInfo>,
/// Remote private route ids indexed by route's public key /// Remote private route ids indexed by route's public key
remote_private_routes_by_key: HashMap<BarePublicKey, RouteId>, remote_private_routes_by_key: HashMap<PublicKey, RouteId>,
/// Compiled route cache /// Compiled route cache
compiled_route_cache: LruCache<CompiledRouteCacheKey, SafetyRoute>, compiled_route_cache: LruCache<CompiledRouteCacheKey, SafetyRoute>,
/// List of dead allocated routes /// List of dead allocated routes
@ -62,9 +62,7 @@ impl RouteSpecStoreCache {
/// add an allocated route set to our cache via its cache key /// add an allocated route set to our cache via its cache key
pub fn add_to_cache(&mut self, rti: &RoutingTableInner, rssd: &RouteSetSpecDetail) { pub fn add_to_cache(&mut self, rti: &RoutingTableInner, rssd: &RouteSetSpecDetail) {
let Some(cache_key) = rssd.make_cache_key(rti) else { let cache_key = rssd.make_cache_key(rti);
panic!("all routes should have a cache key");
};
if !self.hop_cache.insert(cache_key) { if !self.hop_cache.insert(cache_key) {
panic!("route should never be inserted twice"); panic!("route should never be inserted twice");
} }
@ -94,9 +92,7 @@ impl RouteSpecStoreCache {
id: RouteId, id: RouteId,
rssd: &RouteSetSpecDetail, rssd: &RouteSetSpecDetail,
) -> bool { ) -> bool {
let Some(cache_key) = rssd.make_cache_key(rti) else { let cache_key = rssd.make_cache_key(rti);
panic!("all routes should have a cache key");
};
// Remove from hop cache // Remove from hop cache
if !self.hop_cache.remove(&cache_key) { if !self.hop_cache.remove(&cache_key) {
@ -161,7 +157,7 @@ impl RouteSpecStoreCache {
// also store in id by key table // also store in id by key table
for private_route in rprinfo.get_private_routes() { for private_route in rprinfo.get_private_routes() {
self.remote_private_routes_by_key self.remote_private_routes_by_key
.insert(private_route.public_key.value(), id.clone()); .insert(private_route.public_key.clone(), id.clone());
} }
let mut dead = None; let mut dead = None;
@ -179,9 +175,9 @@ impl RouteSpecStoreCache {
for dead_private_route in dead_rpri.get_private_routes() { for dead_private_route in dead_rpri.get_private_routes() {
let _ = self let _ = self
.remote_private_routes_by_key .remote_private_routes_by_key
.remove(dead_private_route.public_key.ref_value()) .remove(&dead_private_route.public_key)
.unwrap(); .unwrap();
self.invalidate_compiled_route_cache(dead_private_route.public_key.ref_value()); self.invalidate_compiled_route_cache(&dead_private_route.public_key);
} }
self.dead_remote_routes.push(dead_id); self.dead_remote_routes.push(dead_id);
} }
@ -265,7 +261,7 @@ impl RouteSpecStoreCache {
} }
/// look up a remote private route id by one of the route public keys /// look up a remote private route id by one of the route public keys
pub fn get_remote_private_route_id_by_key(&self, key: &BarePublicKey) -> Option<RouteId> { pub fn get_remote_private_route_id_by_key(&self, key: &PublicKey) -> Option<RouteId> {
self.remote_private_routes_by_key.get(key).cloned() self.remote_private_routes_by_key.get(key).cloned()
} }
@ -307,22 +303,18 @@ impl RouteSpecStoreCache {
for private_route in rprinfo.get_private_routes() { for private_route in rprinfo.get_private_routes() {
let _ = self let _ = self
.remote_private_routes_by_key .remote_private_routes_by_key
.remove(private_route.public_key.ref_value()) .remove(&private_route.public_key)
.unwrap(); .unwrap();
self.invalidate_compiled_route_cache(private_route.public_key.ref_value()); self.invalidate_compiled_route_cache(&private_route.public_key);
} }
self.dead_remote_routes.push(id); self.dead_remote_routes.push(id);
true true
} }
/// Stores a compiled 'safety + private' route so we don't have to compile it again later /// Stores a compiled 'safety + private' route so we don't have to compile it again later
pub fn add_to_compiled_route_cache( pub fn add_to_compiled_route_cache(&mut self, pr_pubkey: PublicKey, safety_route: SafetyRoute) {
&mut self,
pr_pubkey: BarePublicKey,
safety_route: SafetyRoute,
) {
let key = CompiledRouteCacheKey { let key = CompiledRouteCacheKey {
sr_pubkey: safety_route.public_key.value(), sr_pubkey: safety_route.public_key.clone(),
pr_pubkey: pr_pubkey.clone(), pr_pubkey: pr_pubkey.clone(),
}; };
@ -334,8 +326,8 @@ impl RouteSpecStoreCache {
/// Looks up an existing compiled route from the safety and private route components /// Looks up an existing compiled route from the safety and private route components
pub fn lookup_compiled_route_cache( pub fn lookup_compiled_route_cache(
&mut self, &mut self,
sr_pubkey: BarePublicKey, sr_pubkey: PublicKey,
pr_pubkey: BarePublicKey, pr_pubkey: PublicKey,
) -> Option<SafetyRoute> { ) -> Option<SafetyRoute> {
let key = CompiledRouteCacheKey { let key = CompiledRouteCacheKey {
sr_pubkey, sr_pubkey,
@ -345,7 +337,7 @@ impl RouteSpecStoreCache {
} }
/// When routes are dropped, they should be removed from the compiled route cache /// When routes are dropped, they should be removed from the compiled route cache
fn invalidate_compiled_route_cache(&mut self, dead_key: &BarePublicKey) { fn invalidate_compiled_route_cache(&mut self, dead_key: &PublicKey) {
let mut dead_entries = Vec::new(); let mut dead_entries = Vec::new();
for (k, _v) in self.compiled_route_cache.iter() { for (k, _v) in self.compiled_route_cache.iter() {
if k.sr_pubkey == *dead_key || k.pr_pubkey == *dead_key { if k.sr_pubkey == *dead_key || k.pr_pubkey == *dead_key {

View file

@ -4,7 +4,7 @@ use super::*;
#[derive(Debug, Clone, Default, Serialize, Deserialize)] #[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub(super) struct RouteSpecStoreContent { pub(super) struct RouteSpecStoreContent {
/// All of the route sets we have allocated so far indexed by key (many to one) /// All of the route sets we have allocated so far indexed by key (many to one)
id_by_key: HashMap<BarePublicKey, RouteId>, id_by_key: HashMap<PublicKey, RouteId>,
/// All of the route sets we have allocated so far /// All of the route sets we have allocated so far
details: HashMap<RouteId, RouteSetSpecDetail>, details: HashMap<RouteId, RouteSetSpecDetail>,
} }
@ -44,8 +44,9 @@ impl RouteSpecStoreContent {
// Apply noderefs // Apply noderefs
rssd.set_hop_node_refs(hop_node_refs); rssd.set_hop_node_refs(hop_node_refs);
} }
for id in dead_ids { for id in dead_ids {
veilid_log!(table_store trace "no entry, killing off private route: {}", id); veilid_log!(table_store trace "no entry, killing route set: {}", id);
content.remove_detail(&id); content.remove_detail(&id);
} }
@ -86,7 +87,7 @@ impl RouteSpecStoreContent {
pub fn get_detail_mut(&mut self, id: &RouteId) -> Option<&mut RouteSetSpecDetail> { pub fn get_detail_mut(&mut self, id: &RouteId) -> Option<&mut RouteSetSpecDetail> {
self.details.get_mut(id) self.details.get_mut(id)
} }
pub fn get_id_by_key(&self, key: &BarePublicKey) -> Option<RouteId> { pub fn get_id_by_key(&self, key: &PublicKey) -> Option<RouteId> {
self.id_by_key.get(key).cloned() self.id_by_key.get(key).cloned()
} }
// pub fn iter_ids(&self) -> std::collections::hash_map::Keys<RouteId, RouteSetSpecDetail> { // pub fn iter_ids(&self) -> std::collections::hash_map::Keys<RouteId, RouteSetSpecDetail> {

View file

@ -840,6 +840,7 @@ impl RoutingTableInner {
} }
/// Resolve an existing routing table entry and call a function on its entry without using a noderef /// Resolve an existing routing table entry and call a function on its entry without using a noderef
#[expect(dead_code)]
pub fn with_node_entry<F, R>(&self, node_id: NodeId, f: F) -> Option<R> pub fn with_node_entry<F, R>(&self, node_id: NodeId, f: F) -> Option<R>
where where
F: FnOnce(Arc<BucketEntry>) -> R, F: FnOnce(Arc<BucketEntry>) -> R,
@ -886,7 +887,7 @@ impl RoutingTableInner {
let node_info = peer_info.node_info(); let node_info = peer_info.node_info();
let relay_ids = node_info.relay_ids(); let relay_ids = node_info.relay_ids();
let node_ids = peer_info.node_ids().clone(); let node_ids = peer_info.node_ids().clone();
if node_ids.contains_any(&relay_ids) { if node_ids.contains_any_from_slice(&relay_ids) {
bail!("node can not be its own relay"); bail!("node can not be its own relay");
} }
@ -972,7 +973,7 @@ impl RoutingTableInner {
"routing domains should be the same here", "routing domains should be the same here",
); );
let mut node_ids = old_pi.node_ids().clone(); let mut node_ids = old_pi.node_ids().clone();
node_ids.add_all(new_pi.node_ids()); node_ids.add_all_from_slice(new_pi.node_ids());
(new_pi.routing_domain(), node_ids) (new_pi.routing_domain(), node_ids)
} }
}; };
@ -986,7 +987,7 @@ impl RoutingTableInner {
.iter() .iter()
.flat_map(|rdr| rdr.relay_node.locked(rti).node_ids().to_vec()) .flat_map(|rdr| rdr.relay_node.locked(rti).node_ids().to_vec())
.collect::<Vec<_>>(); .collect::<Vec<_>>();
if node_ids.contains_any(&our_relay_node_ids) { if node_ids.contains_any_from_slice(&our_relay_node_ids) {
rd.refresh(); rd.refresh();
rd.publish_peer_info(rti); rd.publish_peer_info(rti);
} }
@ -1102,6 +1103,7 @@ impl RoutingTableInner {
} }
#[instrument(level = "trace", skip_all)] #[instrument(level = "trace", skip_all)]
#[expect(dead_code)]
pub fn transform_to_peer_info( pub fn transform_to_peer_info(
&self, &self,
routing_domain: RoutingDomain, routing_domain: RoutingDomain,
@ -1260,7 +1262,7 @@ impl RoutingTableInner {
pub fn find_preferred_closest_nodes<T, O>( pub fn find_preferred_closest_nodes<T, O>(
&self, &self,
node_count: usize, node_count: usize,
hash_coordinate: HashDigest, hash_coordinate: HashCoordinate,
mut filters: VecDeque<RoutingTableEntryFilter>, mut filters: VecDeque<RoutingTableEntryFilter>,
transform: T, transform: T,
) -> VeilidAPIResult<Vec<O>> ) -> VeilidAPIResult<Vec<O>>
@ -1272,10 +1274,6 @@ impl RoutingTableInner {
// Get the crypto kind // Get the crypto kind
let crypto_kind = hash_coordinate.kind(); let crypto_kind = hash_coordinate.kind();
let crypto = self.crypto();
let Some(vcrypto) = crypto.get(crypto_kind) else {
apibail_generic!("invalid crypto kind");
};
// Filter to ensure entries support the crypto kind in use // Filter to ensure entries support the crypto kind in use
// always filter out dead and punished nodes // always filter out dead and punished nodes
@ -1334,14 +1332,14 @@ impl RoutingTableInner {
}; };
// distance is the next metric, closer nodes first // distance is the next metric, closer nodes first
let da = vcrypto.distance( let da = a_key
&BareHashDigest::from(a_key.value()), .ref_value()
hash_coordinate.ref_value(), .to_bare_hash_coordinate()
); .distance(hash_coordinate.ref_value());
let db = vcrypto.distance( let db = b_key
&BareHashDigest::from(b_key.value()), .ref_value()
hash_coordinate.ref_value(), .to_bare_hash_coordinate()
); .distance(hash_coordinate.ref_value());
da.cmp(&db) da.cmp(&db)
}; };
@ -1354,11 +1352,11 @@ impl RoutingTableInner {
#[instrument(level = "trace", skip_all)] #[instrument(level = "trace", skip_all)]
pub fn sort_and_clean_closest_noderefs( pub fn sort_and_clean_closest_noderefs(
&self, &self,
node_id: HashDigest, hash_coordinate: HashCoordinate,
closest_nodes: &[NodeRef], closest_nodes: &[NodeRef],
) -> Vec<NodeRef> { ) -> Vec<NodeRef> {
// Lock all noderefs // Lock all noderefs
let kind = node_id.kind(); let kind = hash_coordinate.kind();
let mut closest_nodes_locked: Vec<LockedNodeRef> = closest_nodes let mut closest_nodes_locked: Vec<LockedNodeRef> = closest_nodes
.iter() .iter()
.filter_map(|nr| { .filter_map(|nr| {
@ -1372,8 +1370,7 @@ impl RoutingTableInner {
.collect(); .collect();
// Sort closest // Sort closest
let crypto = self.crypto(); let sort = make_closest_noderef_sort(hash_coordinate);
let sort = make_closest_noderef_sort(&crypto, node_id);
closest_nodes_locked.sort_by(sort); closest_nodes_locked.sort_by(sort);
// Unlock noderefs // Unlock noderefs
@ -1530,14 +1527,10 @@ impl RoutingTableInner {
} }
} }
#[instrument(level = "trace", skip_all)] pub fn make_closest_noderef_sort(
pub fn make_closest_noderef_sort<'a>( hash_coordinate: HashCoordinate,
crypto: &'a Crypto, ) -> impl Fn(&LockedNodeRef, &LockedNodeRef) -> core::cmp::Ordering {
hash_coordinate: HashDigest,
) -> impl Fn(&LockedNodeRef, &LockedNodeRef) -> core::cmp::Ordering + 'a {
let kind = hash_coordinate.kind(); let kind = hash_coordinate.kind();
// Get cryptoversion to check distance with
let vcrypto = crypto.get(kind).unwrap();
move |a: &LockedNodeRef, b: &LockedNodeRef| -> core::cmp::Ordering { move |a: &LockedNodeRef, b: &LockedNodeRef| -> core::cmp::Ordering {
// same nodes are always the same // same nodes are always the same
@ -1552,38 +1545,42 @@ pub fn make_closest_noderef_sort<'a>(
let b_key = b_entry.node_ids().get(kind).unwrap(); let b_key = b_entry.node_ids().get(kind).unwrap();
// distance is the next metric, closer nodes first // distance is the next metric, closer nodes first
let da = vcrypto.distance( let da = a_key
&BareHashDigest::from(a_key.value()), .ref_value()
hash_coordinate.ref_value(), .to_bare_hash_coordinate()
); .distance(hash_coordinate.ref_value());
let db = vcrypto.distance( let db = b_key
&BareHashDigest::from(b_key.value()), .ref_value()
hash_coordinate.ref_value(), .to_bare_hash_coordinate()
); .distance(hash_coordinate.ref_value());
da.cmp(&db) da.cmp(&db)
}) })
}) })
} }
} }
pub fn make_closest_node_id_sort( pub fn make_closest_bare_node_id_sort(
crypto: &Crypto, bare_hash_coordinate: BareHashCoordinate,
hash_coordinate: HashDigest, ) -> impl Fn(&BareNodeId, &BareNodeId) -> core::cmp::Ordering {
) -> impl Fn(&BareNodeId, &BareNodeId) -> core::cmp::Ordering + '_ {
let kind = hash_coordinate.kind();
// Get cryptoversion to check distance with
let vcrypto = crypto.get(kind).unwrap();
move |a: &BareNodeId, b: &BareNodeId| -> core::cmp::Ordering { move |a: &BareNodeId, b: &BareNodeId| -> core::cmp::Ordering {
// distance is the next metric, closer nodes first let da = a.to_bare_hash_coordinate().distance(&bare_hash_coordinate);
let da = vcrypto.distance( let db = b.to_bare_hash_coordinate().distance(&bare_hash_coordinate);
&BareHashDigest::from(a.bytes()), da.cmp(&db)
hash_coordinate.ref_value(), }
); }
let db = vcrypto.distance(
&BareHashDigest::from(b.bytes()), pub fn make_closest_node_id_sort(
hash_coordinate.ref_value(), hash_coordinate: HashCoordinate,
); ) -> impl Fn(&NodeId, &NodeId) -> core::cmp::Ordering {
move |a: &NodeId, b: &NodeId| -> core::cmp::Ordering {
let da = a
.ref_value()
.to_bare_hash_coordinate()
.distance(hash_coordinate.ref_value());
let db = b
.ref_value()
.to_bare_hash_coordinate()
.distance(hash_coordinate.ref_value());
da.cmp(&db) da.cmp(&db)
} }
} }

View file

@ -58,7 +58,6 @@ pub trait RoutingDomainDetail {
) -> ContactMethod; ) -> ContactMethod;
// Bootstrap peers // Bootstrap peers
#[expect(dead_code)]
fn get_bootstrap_peers(&self) -> Vec<NodeRef>; fn get_bootstrap_peers(&self) -> Vec<NodeRef>;
fn clear_bootstrap_peers(&self); fn clear_bootstrap_peers(&self);
fn add_bootstrap_peer(&self, bootstrap_peer: NodeRef); fn add_bootstrap_peer(&self, bootstrap_peer: NodeRef);

View file

@ -69,7 +69,10 @@ impl PublicInternetRoutingDomainDetail {
// Note that relay_peer_info could be node_a, in which case a connection already exists // Note that relay_peer_info could be node_a, in which case a connection already exists
// and we only get here if the connection had dropped, in which case node_a is unreachable until // and we only get here if the connection had dropped, in which case node_a is unreachable until
// it gets a new relay connection up // it gets a new relay connection up
if node_b_relay.node_ids().contains_any(ctx.peer_a.node_ids()) { if node_b_relay
.node_ids()
.contains_any_from_slice(ctx.peer_a.node_ids())
{
return Some(ContactMethod::Existing); return Some(ContactMethod::Existing);
} }
@ -163,7 +166,10 @@ impl PublicInternetRoutingDomainDetail {
// Note that relay_peer_info could be node_a, in which case a connection already exists // Note that relay_peer_info could be node_a, in which case a connection already exists
// and we only get here if the connection had dropped, in which case node_b is unreachable until // and we only get here if the connection had dropped, in which case node_b is unreachable until
// it gets a new relay connection up // it gets a new relay connection up
if node_b_relay.node_ids().contains_any(ctx.peer_a.node_ids()) { if node_b_relay
.node_ids()
.contains_any_from_slice(ctx.peer_a.node_ids())
{
return Some(ContactMethod::Existing); return Some(ContactMethod::Existing);
} }

View file

@ -27,7 +27,7 @@ pub struct RelayStatus {
/// All protocol/address types requiring inbound relays for this node /// All protocol/address types requiring inbound relays for this node
pub need_relay_protocols: HashSet<(ProtocolType, AddressType)>, pub need_relay_protocols: HashSet<(ProtocolType, AddressType)>,
/// Ordering modes we still need for relaying, per address type /// Ordering modes we still need for relaying, per address type
pub need_relay_orderings: HashSet<(bool, AddressType)>, pub need_relay_orderings: HashSet<(SequenceOrdering, AddressType)>,
/// All protocol/address types we can offer inbound relaying for from this node /// All protocol/address types we can offer inbound relaying for from this node
#[expect(dead_code)] #[expect(dead_code)]
pub can_relay_protocols: HashSet<(ProtocolType, AddressType)>, pub can_relay_protocols: HashSet<(ProtocolType, AddressType)>,
@ -71,7 +71,7 @@ impl RelayStatus {
let ordering_modes = node_info let ordering_modes = node_info
.outbound_protocols() .outbound_protocols()
.iter() .iter()
.map(|x| x.is_ordered()) .map(|x| x.sequence_ordering())
.collect::<HashSet<_>>(); .collect::<HashSet<_>>();
// Get the dial info list in preferred deterministic order // Get the dial info list in preferred deterministic order
@ -108,7 +108,7 @@ impl RelayStatus {
for at in AddressTypeSet::all() { for at in AddressTypeSet::all() {
for pt in ProtocolTypeSet::all() { for pt in ProtocolTypeSet::all() {
// if we can't use this protocol because we don't have its ordering mode enabled at all, then we should exclude it // if we can't use this protocol because we don't have its ordering mode enabled at all, then we should exclude it
if !ordering_modes.contains(&pt.is_ordered()) { if !ordering_modes.contains(&pt.sequence_ordering()) {
continue; continue;
} }
@ -117,7 +117,7 @@ impl RelayStatus {
can_relay_protocols.insert((pt, at)); can_relay_protocols.insert((pt, at));
// Note the relay ordering // Note the relay ordering
need_relay_orderings.remove(&(pt.is_ordered(), at)); need_relay_orderings.remove(&(pt.sequence_ordering(), at));
} }
if needs_hairpin_nat_support || !direct_did_protoaddrs.contains(&(pt, at)) { if needs_hairpin_nat_support || !direct_did_protoaddrs.contains(&(pt, at)) {
@ -228,7 +228,7 @@ impl RelayStatus {
// Determine for this relay, if there are dialinfo that are reachable with our node's // Determine for this relay, if there are dialinfo that are reachable with our node's
// dialinfo filter, and which ordering modes can be satisfied by those flows // dialinfo filter, and which ordering modes can be satisfied by those flows
let mut possible_ordering_modes = HashSet::<bool>::new(); let mut possible_ordering_modes = SequenceOrderingSet::new();
for did in &dial_info_list { for did in &dial_info_list {
if did.class.requires_signal() { if did.class.requires_signal() {
continue; continue;
@ -238,7 +238,7 @@ impl RelayStatus {
// If this dial info can be contacted directly, then it can be used for receiving // If this dial info can be contacted directly, then it can be used for receiving
// relaying and satsifying an ordering mode // relaying and satsifying an ordering mode
if did.dial_info.matches_filter(&self.dial_info_filter) { if did.dial_info.matches_filter(&self.dial_info_filter) {
possible_ordering_modes.insert(didpa.0.is_ordered()); possible_ordering_modes.insert(didpa.0.sequence_ordering());
} }
} }
@ -267,7 +267,7 @@ impl RelayStatus {
// Mark this ordering mode as satisfied // Mark this ordering mode as satisfied
self.need_relay_orderings self.need_relay_orderings
.remove(&(didpa.0.is_ordered(), didpa.1)); .remove(&(didpa.0.sequence_ordering(), didpa.1));
} }
} }
@ -284,8 +284,8 @@ impl RelayStatus {
let mut add_ping = false; let mut add_ping = false;
// See if we should add the ping for ordering mode coverage // See if we should add the ping for ordering mode coverage
let is_ordered = didpa.0.is_ordered(); let ordering = didpa.0.sequence_ordering();
add_ping |= possible_ordering_modes.remove(&is_ordered); add_ping |= possible_ordering_modes.remove(ordering);
// See if we should add the ping for low level port mapping coverage // See if we should add the ping for low level port mapping coverage
if let Some((llpt, port)) = self if let Some((llpt, port)) = self

View file

@ -1,318 +0,0 @@
use super::*;
#[derive(Debug, Clone)]
pub struct RelayPing {
pub node_ref: FilteredNodeRef,
}
impl PartialEq for RelayPing {
fn eq(&self, other: &Self) -> bool {
self.node_ref.equivalent(&other.node_ref)
}
}
impl Eq for RelayPing {}
/// The current node's relaying capabilities and requirements
#[derive(Debug, Clone)]
pub struct RelayStatus {
/// Routing domain this is for
pub routing_domain: RoutingDomain,
/// Low level port info for this node
/// This is which ports are mapped externally that we may need keepalive pings for
pub low_level_port_info: LowLevelPortInfo,
/// This node's outbound dial info filter
/// Used to determine if a relay's dialinfo is directly reachable
pub dial_info_filter: DialInfoFilter,
/// All protocol/address types requiring inbound relays for this node
pub need_relay_protocols: HashSet<(ProtocolType, AddressType)>,
/// Ordering modes we still need for relaying, per address type
pub need_relay_orderings: HashSet<(bool, AddressType)>,
/// All protocol/address types we can offer inbound relaying for from this node
#[expect(dead_code)]
pub can_relay_protocols: HashSet<(ProtocolType, AddressType)>,
/// All the low level protocols and ports that require nat keepalive pings
pub wants_nat_keepalives: LowLevelProtocolPorts,
/// All of the relays and their configuration currently included in our status
pub relays: Vec<RoutingDomainRelay>,
}
impl RelayStatus {
pub fn new_from_routing_domain_detail(
rdd: &dyn RoutingDomainDetail,
needs_hairpin_nat_support: bool,
) -> Self {
// Make temporary nodeinfo without relay info or keys
let node_info = NodeInfo::new(
Timestamp::now(),
VALID_ENVELOPE_VERSIONS.to_vec(),
vec![],
rdd.capabilities(),
rdd.outbound_protocols(),
rdd.address_types(),
rdd.dial_info_details().clone(),
vec![],
);
Self::new_from_node_info(rdd.routing_domain(), &node_info, needs_hairpin_nat_support)
}
fn new_from_node_info(
routing_domain: RoutingDomain,
node_info: &NodeInfo,
needs_hairpin_nat_support: bool,
) -> Self {
let low_level_port_info = node_info.get_low_level_port_info();
let dial_info_filter = DialInfoFilter::all()
.with_protocol_type_set(node_info.outbound_protocols())
.with_address_type_set(node_info.address_types());
// Determine ordering modes we need relaying for
let ordering_modes = node_info
.outbound_protocols()
.iter()
.map(|x| x.is_ordered())
.collect::<HashSet<_>>();
// Get the dial info list in preferred deterministic order
let mut dial_info_list = node_info.dial_info_detail_list().to_vec();
dial_info_list.sort_by(DialInfoDetail::ordered_sequencing_sort);
// Figure out which dial info combinations we have that are direct-capable
let mut direct_did_protoaddrs = HashSet::<(ProtocolType, AddressType)>::new();
let mut wants_nat_keepalives = LowLevelProtocolPorts::new();
for did in dial_info_list {
if !did.class.requires_signal() {
direct_did_protoaddrs
.insert((did.dial_info.protocol_type(), did.dial_info.address_type()));
}
if did.class.wants_nat_keepalive() {
wants_nat_keepalives.insert((
did.dial_info.protocol_type().low_level_protocol_type(),
did.dial_info.address_type(),
did.dial_info.port(),
));
}
}
// Calculate which address/protocol type combinations we require
// relays for, and which we can offer relay support for
let mut need_relay_protocols = HashSet::<(ProtocolType, AddressType)>::new();
let mut can_relay_protocols = HashSet::<(ProtocolType, AddressType)>::new();
for at in AddressTypeSet::all() {
for pt in ProtocolTypeSet::all() {
// if we can't use this protocol because we don't have its ordering mode enabled at all, then we should exclude it
if !ordering_modes.contains(&pt.is_ordered()) {
continue;
}
if direct_did_protoaddrs.contains(&(pt, at)) {
// We can relay this combination
can_relay_protocols.insert((pt, at));
}
if needs_hairpin_nat_support || !direct_did_protoaddrs.contains(&(pt, at)) {
// We can't relay this, so we must need a relay for it ourselves
// Or we want to allocate a hairpin NAT relay
need_relay_protocols.insert((pt, at));
}
}
}
// Get the ordering modes per address type we need, at a minimum, to be able to publish peer info
let need_relay_orderings = AddressTypeSet::all()
.iter()
.flat_map(|at| ordering_modes.iter().map(move |om| (*om, at)))
.collect::<HashSet<_>>();
RelayStatus {
routing_domain,
low_level_port_info,
dial_info_filter,
need_relay_protocols,
need_relay_orderings,
can_relay_protocols,
wants_nat_keepalives,
relays: vec![],
}
}
/// Check if we would like more relays
pub fn wants_more_relays(&self) -> bool {
!self.need_relay_protocols.is_empty() || !self.wants_nat_keepalives.is_empty()
}
/// Check if we need more relays before publication
pub fn needs_more_relays(&self) -> bool {
!self.need_relay_orderings.is_empty() || !self.wants_nat_keepalives.is_empty()
}
/// Get the routing domain relays list when we're done
pub fn get_sorted_relays_list(&self) -> Vec<RoutingDomainRelay> {
let mut relays = self.relays.clone();
// Sort things in order of relay preference, using least-capable relays first
relays.sort_by(|ardr, brdr| {
// Get address types and protocol types to sort by
let mut aats = AddressTypeSet::new();
let mut apts = ProtocolTypeSet::new();
for did in &ardr.dial_info_details {
aats |= did.dial_info.address_type();
apts |= did.dial_info.protocol_type();
}
let mut bats = AddressTypeSet::new();
let mut bpts = ProtocolTypeSet::new();
for did in &brdr.dial_info_details {
bats |= did.dial_info.address_type();
bpts |= did.dial_info.protocol_type();
}
// Compare by address type set first (fewer address types is less)
let c = aats.len().cmp(&bats.len());
if c != cmp::Ordering::Equal {
return c;
}
for at in AddressTypeSet::all() {
let a = aats.contains(at);
let b = bats.contains(at);
let c = a.cmp(&b);
if c != cmp::Ordering::Equal {
return c;
}
}
// Compare by protocol types set second (fewer protocol types is less)
let c = apts.len().cmp(&bpts.len());
if c != cmp::Ordering::Equal {
return c;
}
for pt in ProtocolTypeSet::all() {
let a = apts.contains(pt);
let b = bpts.contains(pt);
let c = a.cmp(&b);
if c != cmp::Ordering::Equal {
return c;
}
}
// Then just compare by node id lists, so things are stable
let a_nodes = ardr.relay_node.node_ids().to_vec();
let b_nodes = brdr.relay_node.node_ids().to_vec();
a_nodes.cmp(&b_nodes)
});
relays
}
/// Remove a relay's capabilities from our current requirements and determine which
/// pings should be performed.
/// Returns true if the requirements changed, or false if applying the relay had no effect
pub fn apply_relay(&mut self, mut relay: RoutingDomainRelay) -> bool {
// Make sure this relay is the correct routing domain and has peer info
let Some(relay_peer_info) = relay.relay_node.get_peer_info(self.routing_domain) else {
return false;
};
// Clear out the dial info details and the pings because we'll add new ones
relay.dial_info_details.clear();
relay.pings.clear();
// For all for the relay's dial info, see if it matches a protocol+address type we need covered
let mut dial_info_list = relay_peer_info.node_info().dial_info_detail_list().to_vec();
dial_info_list.sort_by(DialInfoDetail::ordered_sequencing_sort);
// Determine for this relay, if there are dialinfo that are reachable with our node's
// dialinfo filter, and which ordering modes can be satisfied by those flows
let mut possible_ordering_modes = HashSet::<bool>::new();
for did in &dial_info_list {
if did.class.requires_signal() {
continue;
}
let didpa = (did.dial_info.protocol_type(), did.dial_info.address_type());
// If this dial info can be contacted directly, then it can be used for receiving
// relaying and satsifying an ordering mode
if did.dial_info.matches_filter(&self.dial_info_filter) {
possible_ordering_modes.insert(didpa.0.is_ordered());
}
}
// If we did not get a single ordering mode we need for relaying, then this relay is disqualified
// because we can't connect to it with our outbound protocols/address types directly
if possible_ordering_modes.is_empty() {
return false;
}
let mut useful = false;
// Determine relay dial infos we can use from this relay out of our set of needed relay combinations
// Builds up a set of needed ordering modes to keep flows open for the dial infos we are getting relayed
for did in &dial_info_list {
if did.class.requires_signal() {
continue;
}
let didpa = (did.dial_info.protocol_type(), did.dial_info.address_type());
if self.need_relay_protocols.remove(&didpa) {
// Still needed this protocol+address type
useful = true;
// Mark this dial info as one we're using
relay.dial_info_details.push(did.clone());
// Mark this ordering mode as needed
self.need_relay_orderings
.remove(&(didpa.0.is_ordered(), didpa.1));
}
}
// Collect pings we can use from this relay
for did in &dial_info_list {
if did.class.requires_signal() {
continue;
}
let didpa = (did.dial_info.protocol_type(), did.dial_info.address_type());
// If this dial info can be contacted directly, then it is a ping candidate
if did.dial_info.matches_filter(&self.dial_info_filter) {
// See if we should add this ping
let mut add_ping = false;
// See if we should add the ping for ordering mode coverage
let is_ordered = didpa.0.is_ordered();
add_ping |= possible_ordering_modes.remove(&is_ordered);
// See if we should add the ping for low level port mapping coverage
if let Some((llpt, port)) = self
.low_level_port_info
.protocol_to_port
.get(&didpa)
.copied()
{
let wnk = (llpt, didpa.1, port);
add_ping |= self.wants_nat_keepalives.remove(&wnk);
}
// Add the ping if we determined we could use it
if add_ping {
relay.pings.push(RelayPing {
node_ref: relay.relay_node.unfiltered().custom_filtered(
NodeRefFilter::new()
.with_routing_domain(self.routing_domain)
.with_dial_info_filter(did.dial_info.make_filter()),
),
});
}
}
}
// Add a relay info to our list if it turned out to be useful
if useful {
self.relays.push(relay);
}
useful
}
}

View file

@ -55,7 +55,7 @@ impl RoutingTable {
let noderefs = self let noderefs = self
.find_preferred_closest_nodes( .find_preferred_closest_nodes(
CLOSEST_PEERS_REQUEST_COUNT, CLOSEST_PEERS_REQUEST_COUNT,
self_node_id.into(), self_node_id.to_hash_coordinate(),
filters, filters,
|_rti, entry: Option<Arc<BucketEntry>>| { |_rti, entry: Option<Arc<BucketEntry>>| {
NodeRef::new(self.registry(), entry.unwrap().clone()) NodeRef::new(self.registry(), entry.unwrap().clone())

View file

@ -16,7 +16,6 @@ impl RoutingTable {
_last_ts: Timestamp, _last_ts: Timestamp,
cur_ts: Timestamp, cur_ts: Timestamp,
) -> EyreResult<()> { ) -> EyreResult<()> {
let crypto = self.crypto();
let kick_queue: Vec<BucketIndex> = core::mem::take(&mut *self.kick_queue.lock()) let kick_queue: Vec<BucketIndex> = core::mem::take(&mut *self.kick_queue.lock())
.into_iter() .into_iter()
.collect(); .collect();
@ -30,7 +29,8 @@ impl RoutingTable {
let Some(buckets) = inner.buckets.get(&kind) else { let Some(buckets) = inner.buckets.get(&kind) else {
continue; continue;
}; };
let sort = make_closest_node_id_sort(&crypto, our_node_id.into()); let sort =
make_closest_bare_node_id_sort(our_node_id.ref_value().to_bare_hash_coordinate());
let mut closest_peers = BTreeSet::<BareNodeId>::new(); let mut closest_peers = BTreeSet::<BareNodeId>::new();
let mut closest_unreliable_count = 0usize; let mut closest_unreliable_count = 0usize;

View file

@ -274,10 +274,7 @@ impl RoutingTable {
let ordering_modes = self let ordering_modes = self
.get_current_peer_info(RoutingDomain::PublicInternet) .get_current_peer_info(RoutingDomain::PublicInternet)
.node_info() .node_info()
.outbound_protocols() .outbound_sequence_orderings();
.iter()
.map(|x| x.is_ordered())
.collect::<HashSet<_>>();
// Just do a single ping with the best protocol for all the other nodes to check for liveness // Just do a single ping with the best protocol for all the other nodes to check for liveness
for nr in node_refs { for nr in node_refs {
@ -286,11 +283,8 @@ impl RoutingTable {
let all_noderefs = if nr.operate(|_rti, e| !relay_node_filter(e)) { let all_noderefs = if nr.operate(|_rti, e| !relay_node_filter(e)) {
let mut nrs = vec![]; let mut nrs = vec![];
if ordering_modes.contains(&false) { for ordering in ordering_modes {
nrs.push(nr.sequencing_clone(Sequencing::NoPreference)); nrs.push(nr.sequencing_clone(ordering.strict_sequencing()));
}
if ordering_modes.contains(&true) {
nrs.push(nr.sequencing_clone(Sequencing::EnsureOrdered));
} }
nrs nrs
} else { } else {

View file

@ -194,10 +194,7 @@ impl RoutingTable {
let mut local_unpublished_route_count = 0usize; let mut local_unpublished_route_count = 0usize;
self.route_spec_store().list_allocated_routes(|_k, v| { self.route_spec_store().list_allocated_routes(|_k, v| {
if !v.is_published() if !v.is_published() && v.hop_count() == default_route_hop_count {
&& v.hop_count() == default_route_hop_count
&& v.get_route_set_keys().kinds() == VALID_CRYPTO_KINDS
{
local_unpublished_route_count += 1; local_unpublished_route_count += 1;
} }
Option::<()>::None Option::<()>::None

View file

@ -144,7 +144,7 @@ impl RoutingTable {
if rdr if rdr
.relay_node .relay_node
.node_ids() .node_ids()
.contains_any(outbound_relay_peerinfo.node_ids()) .contains_any_from_slice(outbound_relay_peerinfo.node_ids())
{ {
has_outbound_relay = true; has_outbound_relay = true;
true true
@ -282,7 +282,7 @@ impl RoutingTable {
// Exclude any nodes that are relaying directly through us // Exclude any nodes that are relaying directly through us
if own_peer_info if own_peer_info
.node_ids() .node_ids()
.contains_any(&peer_info.node_info().relay_ids()) .contains_any_from_slice(&peer_info.node_info().relay_ids())
{ {
return false; return false;
} }

View file

@ -53,7 +53,7 @@ pub fn fix_typed_node_id_group(valid_kinds: bool, unknown: bool) -> NodeIdGroup
}); });
} }
if unknown { if unknown {
tks.add(fix_typed_node_id(CryptoKind([1, 2, 3, 4]), 0)); tks.add(fix_typed_node_id(CryptoKind::new([1, 2, 3, 4]), 0));
} }
tks tks
} }
@ -129,7 +129,7 @@ pub fn fix_peer_info(
) -> VeilidAPIResult<PeerInfo> { ) -> VeilidAPIResult<PeerInfo> {
let node_info = NodeInfo::new( let node_info = NodeInfo::new(
Timestamp::new(0), Timestamp::new(0),
vec![ENVELOPE_VERSION_VLD0], vec![ENVELOPE_VERSION_ENV0],
crypto_info_list, crypto_info_list,
PUBLIC_INTERNET_CAPABILITIES.to_vec(), PUBLIC_INTERNET_CAPABILITIES.to_vec(),
ProtocolTypeSet::new(), ProtocolTypeSet::new(),
@ -152,7 +152,7 @@ pub fn fix_unsigned_peer_info(
) -> VeilidAPIResult<PeerInfo> { ) -> VeilidAPIResult<PeerInfo> {
let node_info = NodeInfo::new( let node_info = NodeInfo::new(
Timestamp::new(0), Timestamp::new(0),
vec![ENVELOPE_VERSION_VLD0], vec![ENVELOPE_VERSION_ENV0],
crypto_info_list, crypto_info_list,
PUBLIC_INTERNET_CAPABILITIES.to_vec(), PUBLIC_INTERNET_CAPABILITIES.to_vec(),
ProtocolTypeSet::new(), ProtocolTypeSet::new(),

View file

@ -0,0 +1,60 @@
use super::*;
pub const HASH_COORDINATE_LENGTH: usize = 32;
// Internal types
impl HashCoordinate {
pub(crate) fn distance(&self, other: &HashCoordinate) -> HashDistance {
assert_eq!(self.kind(), other.kind());
self.ref_value().distance(other.ref_value())
}
}
impl NodeId {
pub(crate) fn to_hash_coordinate(&self) -> HashCoordinate {
HashCoordinate::new(self.kind(), self.ref_value().to_bare_hash_coordinate())
}
}
impl BareNodeId {
pub(crate) fn to_bare_hash_coordinate(&self) -> BareHashCoordinate {
BareHashCoordinate::new(self)
}
}
impl RecordKey {
pub(crate) fn to_hash_coordinate(&self) -> HashCoordinate {
HashCoordinate::new(self.kind(), self.ref_value().to_bare_hash_coordinate())
}
}
impl BareRecordKey {
pub(crate) fn to_bare_hash_coordinate(&self) -> BareHashCoordinate {
BareHashCoordinate::new(self)
}
}
impl HashDigest {
pub(crate) fn to_hash_coordinate(&self) -> HashCoordinate {
HashCoordinate::new(self.kind(), self.ref_value().to_bare_hash_coordinate())
}
}
impl BareHashDigest {
pub(crate) fn to_bare_hash_coordinate(&self) -> BareHashCoordinate {
BareHashCoordinate::new(self)
}
}
impl BareHashCoordinate {
pub(crate) fn distance(&self, other: &BareHashCoordinate) -> HashDistance {
assert_eq!(self.len(), HASH_COORDINATE_LENGTH);
assert_eq!(other.len(), HASH_COORDINATE_LENGTH);
let mut bytes = [0u8; HASH_COORDINATE_LENGTH];
(0..HASH_COORDINATE_LENGTH).for_each(|n| {
bytes[n] = self[n] ^ other[n];
});
HashDistance::new(&bytes)
}
}

View file

@ -5,6 +5,7 @@ mod direction;
mod events; mod events;
#[cfg(feature = "geolocation")] #[cfg(feature = "geolocation")]
mod geolocation_info; mod geolocation_info;
mod hash_coordinate;
mod low_level_port_info; mod low_level_port_info;
mod node_info; mod node_info;
mod node_status; mod node_status;
@ -22,6 +23,7 @@ pub use direction::*;
pub use events::*; pub use events::*;
#[cfg(feature = "geolocation")] #[cfg(feature = "geolocation")]
pub use geolocation_info::*; pub use geolocation_info::*;
pub use hash_coordinate::*;
pub use low_level_port_info::*; pub use low_level_port_info::*;
pub use node_info::*; pub use node_info::*;
pub use node_status::*; pub use node_status::*;

View file

@ -80,6 +80,13 @@ impl NodeInfo {
pub fn outbound_protocols(&self) -> ProtocolTypeSet { pub fn outbound_protocols(&self) -> ProtocolTypeSet {
self.outbound_protocols self.outbound_protocols
} }
pub fn outbound_sequence_orderings(&self) -> SequenceOrderingSet {
self.outbound_protocols
.iter()
.map(|x| x.sequence_ordering())
.collect::<SequenceOrderingSet>()
}
pub fn address_types(&self) -> AddressTypeSet { pub fn address_types(&self) -> AddressTypeSet {
self.address_types self.address_types
} }
@ -240,13 +247,8 @@ impl HasDialInfoDetailList for NodeInfo {
fn has_sequencing_matched_dial_info(&self, sequencing: Sequencing) -> bool { fn has_sequencing_matched_dial_info(&self, sequencing: Sequencing) -> bool {
// Check our dial info // Check our dial info
for did in self.dial_info_detail_list() { for did in self.dial_info_detail_list() {
match sequencing { if sequencing.matches_ordering(did.dial_info.protocol_type().sequence_ordering()) {
Sequencing::NoPreference | Sequencing::PreferOrdered => return true, return true;
Sequencing::EnsureOrdered => {
if did.dial_info.protocol_type().is_ordered() {
return true;
}
}
} }
} }
// Check our relays // Check our relays

View file

@ -76,7 +76,7 @@ impl PeerInfo {
let signatures = SignatureGroup::from(crypto.generate_signatures( let signatures = SignatureGroup::from(crypto.generate_signatures(
&node_info_message, &node_info_message,
&keypairs, &keypairs,
|kp, sig| Signature::new(kp.kind(), sig), |_kp, sig| sig,
)?); )?);
// Extract node ids for convenience // Extract node ids for convenience

View file

@ -89,13 +89,8 @@ impl HasDialInfoDetailList for RelayInfo {
fn has_sequencing_matched_dial_info(&self, sequencing: Sequencing) -> bool { fn has_sequencing_matched_dial_info(&self, sequencing: Sequencing) -> bool {
// Check our dial info // Check our dial info
for did in self.dial_info_detail_list() { for did in self.dial_info_detail_list() {
match sequencing { if sequencing.matches_ordering(did.dial_info.protocol_type().sequence_ordering()) {
Sequencing::NoPreference | Sequencing::PreferOrdered => return true, return true;
Sequencing::EnsureOrdered => {
if did.dial_info.protocol_type().is_ordered() {
return true;
}
}
} }
} }

View file

@ -1,16 +1,16 @@
use super::*; use super::*;
fourcc_type!(VeilidCapability); fourcc_type!(VeilidCapability);
pub const CAP_ROUTE: VeilidCapability = VeilidCapability(*b"ROUT"); pub const CAP_ROUTE: VeilidCapability = VeilidCapability::new(*b"ROUT");
#[cfg(feature = "unstable-tunnels")] #[cfg(feature = "unstable-tunnels")]
pub const CAP_TUNNEL: VeilidCapability = VeilidCapability(*b"TUNL"); pub const CAP_TUNNEL: VeilidCapability = VeilidCapability::new(*b"TUNL");
pub const CAP_SIGNAL: VeilidCapability = VeilidCapability(*b"SGNL"); pub const CAP_SIGNAL: VeilidCapability = VeilidCapability::new(*b"SGNL");
pub const CAP_RELAY: VeilidCapability = VeilidCapability(*b"RLAY"); pub const CAP_RELAY: VeilidCapability = VeilidCapability::new(*b"RLAY");
pub const CAP_VALIDATE_DIAL_INFO: VeilidCapability = VeilidCapability(*b"DIAL"); pub const CAP_VALIDATE_DIAL_INFO: VeilidCapability = VeilidCapability::new(*b"DIAL");
pub const CAP_DHT: VeilidCapability = VeilidCapability(*b"DHTV"); pub const CAP_DHT: VeilidCapability = VeilidCapability::new(*b"DHTV");
pub const CAP_APPMESSAGE: VeilidCapability = VeilidCapability(*b"APPM"); pub const CAP_APPMESSAGE: VeilidCapability = VeilidCapability::new(*b"APPM");
#[cfg(feature = "unstable-blockstore")] #[cfg(feature = "unstable-blockstore")]
pub const CAP_BLOCKSTORE: VeilidCapability = VeilidCapability(*b"BLOC"); pub const CAP_BLOCKSTORE: VeilidCapability = VeilidCapability::new(*b"BLOC");
pub const DISTANCE_METRIC_CAPABILITIES: &[VeilidCapability] = &[CAP_DHT]; pub const DISTANCE_METRIC_CAPABILITIES: &[VeilidCapability] = &[CAP_DHT];
pub const CONNECTIVITY_CAPABILITIES: &[VeilidCapability] = pub const CONNECTIVITY_CAPABILITIES: &[VeilidCapability] =

View file

@ -5,14 +5,14 @@ pub struct Answer<T> {
/// Hpw long it took to get this answer /// Hpw long it took to get this answer
pub _latency: TimestampDuration, pub _latency: TimestampDuration,
/// The private route requested to receive the reply /// The private route requested to receive the reply
pub reply_private_route: Option<BarePublicKey>, pub reply_private_route: Option<PublicKey>,
/// The answer itself /// The answer itself
pub answer: T, pub answer: T,
} }
impl<T> Answer<T> { impl<T> Answer<T> {
pub fn new( pub fn new(
latency: TimestampDuration, latency: TimestampDuration,
reply_private_route: Option<BarePublicKey>, reply_private_route: Option<PublicKey>,
answer: T, answer: T,
) -> Self { ) -> Self {
Self { Self {

View file

@ -22,7 +22,7 @@ macro_rules! define_typed_byte_data_coder {
$capnp_name: &$rust_name, $capnp_name: &$rust_name,
builder: &mut veilid_capnp::$capnp_name::Builder, builder: &mut veilid_capnp::$capnp_name::Builder,
) { ) {
builder.set_kind(u32::from_be_bytes($capnp_name.kind().0)); builder.set_kind(u32::from($capnp_name.kind()));
builder.set_value($capnp_name.ref_value()); builder.set_value($capnp_name.ref_value());
} }
} }
@ -69,4 +69,4 @@ define_typed_byte_data_coder!(route_id, RouteId);
define_typed_byte_data_coder!(signature, Signature); define_typed_byte_data_coder!(signature, Signature);
// Nonce // Nonce
define_untyped_byte_data_coder!(nonce, BareNonce); define_untyped_byte_data_coder!(nonce, Nonce);

View file

@ -146,7 +146,8 @@ pub fn encode_node_info(
let capvec: Vec<u32> = node_info let capvec: Vec<u32> = node_info
.capabilities() .capabilities()
.iter() .iter()
.map(|x| u32::from_be_bytes(x.0)) .copied()
.map(u32::from)
.collect(); .collect();
s.clone_from_slice(&capvec); s.clone_from_slice(&capvec);

View file

@ -66,11 +66,7 @@ impl RPCOperationFindNodeQ {
.reborrow() .reborrow()
.init_capabilities(self.capabilities.len() as u32); .init_capabilities(self.capabilities.len() as u32);
if let Some(s) = cap_builder.as_slice() { if let Some(s) = cap_builder.as_slice() {
let capvec: Vec<u32> = self let capvec: Vec<u32> = self.capabilities.iter().copied().map(u32::from).collect();
.capabilities
.iter()
.map(|x| u32::from_be_bytes(x.0))
.collect();
s.clone_from_slice(&capvec); s.clone_from_slice(&capvec);
} }

View file

@ -7,10 +7,10 @@ pub(in crate::rpc_processor) struct RPCOperationReturnReceipt {
impl RPCOperationReturnReceipt { impl RPCOperationReturnReceipt {
pub fn new(receipt: Vec<u8>) -> Result<Self, RPCError> { pub fn new(receipt: Vec<u8>) -> Result<Self, RPCError> {
if receipt.len() < MIN_RECEIPT_SIZE { if receipt.len() < RCP0_MIN_RECEIPT_SIZE {
return Err(RPCError::protocol("ReturnReceipt receipt too short to set")); return Err(RPCError::protocol("ReturnReceipt receipt too short to set"));
} }
if receipt.len() > MAX_RECEIPT_SIZE { if receipt.len() > RCP0_MAX_RECEIPT_SIZE {
return Err(RPCError::protocol("ReturnReceipt receipt too long to set")); return Err(RPCError::protocol("ReturnReceipt receipt too long to set"));
} }
@ -34,7 +34,7 @@ impl RPCOperationReturnReceipt {
) -> Result<Self, RPCError> { ) -> Result<Self, RPCError> {
rpc_ignore_missing_property!(reader, receipt); rpc_ignore_missing_property!(reader, receipt);
let rr = reader.get_receipt()?; let rr = reader.get_receipt()?;
rpc_ignore_min_max_len!(rr, MIN_RECEIPT_SIZE, MAX_RECEIPT_SIZE); rpc_ignore_min_max_len!(rr, RCP0_MIN_RECEIPT_SIZE, RCP0_MAX_RECEIPT_SIZE);
Ok(Self { Ok(Self {
receipt: rr.to_vec(), receipt: rr.to_vec(),

View file

@ -5,7 +5,7 @@ pub(in crate::rpc_processor) struct RoutedOperation {
routing_domain: RoutingDomain, routing_domain: RoutingDomain,
sequencing: Sequencing, sequencing: Sequencing,
signatures: Vec<Signature>, signatures: Vec<Signature>,
nonce: BareNonce, nonce: Nonce,
data: Vec<u8>, data: Vec<u8>,
} }
@ -25,7 +25,7 @@ impl RoutedOperation {
pub fn new( pub fn new(
routing_domain: RoutingDomain, routing_domain: RoutingDomain,
sequencing: Sequencing, sequencing: Sequencing,
nonce: BareNonce, nonce: Nonce,
data: Vec<u8>, data: Vec<u8>,
) -> Self { ) -> Self {
Self { Self {
@ -54,7 +54,7 @@ impl RoutedOperation {
self.signatures.push(signature); self.signatures.push(signature);
} }
pub fn nonce(&self) -> &BareNonce { pub fn nonce(&self) -> &Nonce {
&self.nonce &self.nonce
} }
pub fn data(&self) -> &[u8] { pub fn data(&self) -> &[u8] {

View file

@ -9,12 +9,12 @@ pub(in crate::rpc_processor) struct RPCOperationValidateDialInfo {
impl RPCOperationValidateDialInfo { impl RPCOperationValidateDialInfo {
pub fn new(dial_info: DialInfo, receipt: Vec<u8>, redirect: bool) -> Result<Self, RPCError> { pub fn new(dial_info: DialInfo, receipt: Vec<u8>, redirect: bool) -> Result<Self, RPCError> {
if receipt.len() < MIN_RECEIPT_SIZE { if receipt.len() < RCP0_MIN_RECEIPT_SIZE {
return Err(RPCError::protocol( return Err(RPCError::protocol(
"ValidateDialInfo receipt too short to set", "ValidateDialInfo receipt too short to set",
)); ));
} }
if receipt.len() > MAX_RECEIPT_SIZE { if receipt.len() > RCP0_MAX_RECEIPT_SIZE {
return Err(RPCError::protocol( return Err(RPCError::protocol(
"ValidateDialInfo receipt too long to set", "ValidateDialInfo receipt too long to set",
)); ));
@ -53,7 +53,7 @@ impl RPCOperationValidateDialInfo {
rpc_ignore_missing_property!(reader, receipt); rpc_ignore_missing_property!(reader, receipt);
let rcpt_reader = reader.get_receipt()?; let rcpt_reader = reader.get_receipt()?;
rpc_ignore_min_max_len!(rcpt_reader, MIN_RECEIPT_SIZE, MAX_RECEIPT_SIZE); rpc_ignore_min_max_len!(rcpt_reader, RCP0_MIN_RECEIPT_SIZE, RCP0_MAX_RECEIPT_SIZE);
let receipt = rcpt_reader.to_vec(); let receipt = rcpt_reader.to_vec();
let redirect = reader.get_redirect(); let redirect = reader.get_redirect();

View file

@ -34,16 +34,9 @@ impl RPCOperationWatchValueQ {
} }
let signature_data = Self::make_signature_data(&key, &subkeys, expiration, count, watch_id); let signature_data = Self::make_signature_data(&key, &subkeys, expiration, count, watch_id);
let signature = Signature::new( let signature = vcrypto
vcrypto.kind(), .sign(&watcher.key(), &watcher.secret(), &signature_data)
vcrypto .map_err(RPCError::protocol)?;
.sign(
watcher.ref_value().ref_key(),
watcher.ref_value().ref_secret(),
&signature_data,
)
.map_err(RPCError::protocol)?,
);
Ok(Self { Ok(Self {
key, key,
@ -51,7 +44,7 @@ impl RPCOperationWatchValueQ {
expiration, expiration,
count, count,
watch_id, watch_id,
watcher: PublicKey::new(watcher.kind(), watcher.ref_value().key()), watcher: watcher.key(),
signature, signature,
}) })
} }
@ -68,7 +61,7 @@ impl RPCOperationWatchValueQ {
let mut sig_data = let mut sig_data =
Vec::with_capacity(key.ref_value().len() + 4 + (subkeys_ranges_len * 8) + 8 + 8); Vec::with_capacity(key.ref_value().len() + 4 + (subkeys_ranges_len * 8) + 8 + 8);
sig_data.extend_from_slice(&key.kind().0); sig_data.extend_from_slice(key.kind().bytes());
sig_data.extend_from_slice(key.ref_value()); sig_data.extend_from_slice(key.ref_value());
for sk in subkeys.ranges() { for sk in subkeys.ranges() {
sig_data.extend_from_slice(&sk.start().to_le_bytes()); sig_data.extend_from_slice(&sk.start().to_le_bytes());
@ -96,11 +89,7 @@ impl RPCOperationWatchValueQ {
self.watch_id, self.watch_id,
); );
if !vcrypto if !vcrypto
.verify( .verify(&self.watcher, &sig_data, &self.signature)
self.watcher.ref_value(),
&sig_data,
self.signature.ref_value(),
)
.map_err(RPCError::protocol)? .map_err(RPCError::protocol)?
{ {
return Err(RPCError::protocol("failed to validate watcher signature")); return Err(RPCError::protocol("failed to validate watcher signature"));

View file

@ -391,7 +391,7 @@ impl RPCProcessor {
.kind(); .kind();
let mut avoid_nodes = relay.node_ids(); let mut avoid_nodes = relay.node_ids();
avoid_nodes.add_all(&target.node_ids()); avoid_nodes.add_all_from_slice(&target.node_ids());
let pr_key = network_result_try!(rss let pr_key = network_result_try!(rss
.get_private_route_for_safety_spec(crypto_kind, safety_spec, &avoid_nodes,) .get_private_route_for_safety_spec(crypto_kind, safety_spec, &avoid_nodes,)
.to_rpc_network_result()?); .to_rpc_network_result()?);
@ -430,7 +430,7 @@ impl RPCProcessor {
// Determine if we can use optimized nodeinfo // Determine if we can use optimized nodeinfo
let route_node = if rss.has_remote_private_route_seen_our_node_info( let route_node = if rss.has_remote_private_route_seen_our_node_info(
private_route.public_key.ref_value(), &private_route.public_key,
&published_peer_info, &published_peer_info,
) { ) {
RouteNode::NodeId(routing_table.node_id(crypto_kind)) RouteNode::NodeId(routing_table.node_id(crypto_kind))
@ -440,7 +440,7 @@ impl RPCProcessor {
Ok(NetworkResult::value(RespondTo::PrivateRoute( Ok(NetworkResult::value(RespondTo::PrivateRoute(
PrivateRoute::new_stub( PrivateRoute::new_stub(
routing_table.node_id(crypto_kind).into(), routing_table.public_key(crypto_kind),
route_node, route_node,
), ),
))) )))
@ -450,12 +450,12 @@ impl RPCProcessor {
// Check for loopback test // Check for loopback test
let opt_private_route_id = let opt_private_route_id =
rss.get_route_id_for_key(private_route.public_key.ref_value()); rss.get_route_id_for_key(&private_route.public_key);
let pr_key = if opt_private_route_id.is_some() let pr_key = if opt_private_route_id.is_some()
&& safety_spec.preferred_route == opt_private_route_id && safety_spec.preferred_route == opt_private_route_id
{ {
// Private route is also safety route during loopback test // Private route is also safety route during loopback test
private_route.public_key.value() private_route.public_key.clone()
} else { } else {
// Get the private route to respond to that matches the safety route spec we sent the request with // Get the private route to respond to that matches the safety route spec we sent the request with
network_result_try!(rss network_result_try!(rss

View file

@ -52,7 +52,6 @@ impl RPCError {
pub fn map_network<M: ToString, X: ToString>(message: M) -> impl FnOnce(X) -> Self { pub fn map_network<M: ToString, X: ToString>(message: M) -> impl FnOnce(X) -> Self {
move |x| Self::Network(format!("{}: {}", message.to_string(), x.to_string())) move |x| Self::Network(format!("{}: {}", message.to_string(), x.to_string()))
} }
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), expect(dead_code))]
pub fn try_again<X: ToString>(x: X) -> Self { pub fn try_again<X: ToString>(x: X) -> Self {
Self::TryAgain(x.to_string()) Self::TryAgain(x.to_string())
} }

View file

@ -146,7 +146,7 @@ pub fn capability_fanout_peer_info_filter(caps: Vec<VeilidCapability>) -> Fanout
/// in the given time /// in the given time
pub(crate) struct FanoutCall<'a> { pub(crate) struct FanoutCall<'a> {
routing_table: &'a RoutingTable, routing_table: &'a RoutingTable,
hash_coordinate: HashDigest, hash_coordinate: HashCoordinate,
node_count: usize, node_count: usize,
fanout_tasks: usize, fanout_tasks: usize,
consensus_count: usize, consensus_count: usize,
@ -166,7 +166,7 @@ impl<'a> FanoutCall<'a> {
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
pub fn new( pub fn new(
routing_table: &'a RoutingTable, routing_table: &'a RoutingTable,
hash_coordinate: HashDigest, hash_coordinate: HashCoordinate,
node_count: usize, node_count: usize,
fanout_tasks: usize, fanout_tasks: usize,
consensus_count: usize, consensus_count: usize,
@ -259,10 +259,7 @@ impl<'a> FanoutCall<'a> {
} }
#[instrument(level = "trace", target = "fanout", skip_all)] #[instrument(level = "trace", target = "fanout", skip_all)]
async fn fanout_processor<'b>( async fn fanout_processor(&self, context: &Mutex<FanoutContext<'_>>) -> Result<bool, RPCError> {
&self,
context: &Mutex<FanoutContext<'b>>,
) -> Result<bool, RPCError> {
// Make a work request channel // Make a work request channel
let (work_sender, work_receiver) = flume::bounded(1); let (work_sender, work_receiver) = flume::bounded(1);
@ -407,27 +404,7 @@ impl<'a> FanoutCall<'a> {
#[instrument(level = "trace", target = "fanout", skip_all)] #[instrument(level = "trace", target = "fanout", skip_all)]
pub async fn run(&self, init_fanout_queue: Vec<NodeRef>) -> Result<FanoutResult, RPCError> { pub async fn run(&self, init_fanout_queue: Vec<NodeRef>) -> Result<FanoutResult, RPCError> {
// Create context for this run // Create context for this run
let crypto = self.routing_table.crypto(); let node_sort = Box::new(make_closest_node_id_sort(self.hash_coordinate.clone()));
let Some(vcrypto) = crypto.get(self.hash_coordinate.kind()) else {
return Err(RPCError::internal(
"should not try this on crypto we don't support",
));
};
let node_sort = Box::new(
|a_key: &CryptoTyped<BareNodeId>,
b_key: &CryptoTyped<BareNodeId>|
-> core::cmp::Ordering {
let da = vcrypto.distance(
&BareHashDigest::from(a_key.value()),
self.hash_coordinate.ref_value(),
);
let db = vcrypto.distance(
&BareHashDigest::from(b_key.value()),
self.hash_coordinate.ref_value(),
);
da.cmp(&db)
},
);
let context = Arc::new(Mutex::new(FanoutContext { let context = Arc::new(Mutex::new(FanoutContext {
fanout_queue: FanoutQueue::new( fanout_queue: FanoutQueue::new(
self.routing_table.registry(), self.routing_table.registry(),

View file

@ -20,7 +20,7 @@ pub(in crate::rpc_processor) struct RPCMessageHeaderDetailSafetyRouted {
/// Direct header /// Direct header
pub direct: RPCMessageHeaderDetailDirect, pub direct: RPCMessageHeaderDetailDirect,
/// Remote safety route used /// Remote safety route used
pub remote_safety_route: BarePublicKey, pub remote_safety_route: PublicKey,
/// The sequencing used for this route /// The sequencing used for this route
pub sequencing: Sequencing, pub sequencing: Sequencing,
} }
@ -31,9 +31,9 @@ pub(in crate::rpc_processor) struct RPCMessageHeaderDetailPrivateRouted {
/// Direct header /// Direct header
pub direct: RPCMessageHeaderDetailDirect, pub direct: RPCMessageHeaderDetailDirect,
/// Remote safety route used (or possibly node id the case of no safety route) /// Remote safety route used (or possibly node id the case of no safety route)
pub remote_safety_route: BarePublicKey, pub remote_safety_route: PublicKey,
/// The private route we received the rpc over /// The private route we received the rpc over
pub private_route: BarePublicKey, pub private_route: PublicKey,
// The safety spec for replying to this private routed rpc // The safety spec for replying to this private routed rpc
pub safety_spec: SafetySpec, pub safety_spec: SafetySpec,
} }

View file

@ -80,9 +80,9 @@ struct WaitableReplyContext {
node_ref: NodeRef, node_ref: NodeRef,
send_ts: Timestamp, send_ts: Timestamp,
send_data_result: SendDataResult, send_data_result: SendDataResult,
safety_route: Option<BarePublicKey>, safety_route: Option<PublicKey>,
remote_private_route: Option<BarePublicKey>, remote_private_route: Option<PublicKey>,
reply_private_route: Option<BarePublicKey>, reply_private_route: Option<PublicKey>,
} }
#[derive(Debug)] #[derive(Debug)]
@ -434,7 +434,7 @@ impl RPCProcessor {
let routing_table = self.routing_table(); let routing_table = self.routing_table();
let fanout_call = FanoutCall::new( let fanout_call = FanoutCall::new(
&routing_table, &routing_table,
node_id.into(), node_id.to_hash_coordinate(),
count, count,
fanout, fanout,
0, 0,
@ -570,10 +570,10 @@ impl RPCProcessor {
)); ));
} }
RPCMessageHeaderDetail::SafetyRouted(sr) => { RPCMessageHeaderDetail::SafetyRouted(sr) => {
let node_id = self let public_key = self
.routing_table() .routing_table()
.node_id(sr.direct.envelope.get_crypto_kind()); .public_key(sr.direct.envelope.get_crypto_kind());
if node_id.value() != reply_private_route.into() { if public_key != reply_private_route {
return Err(RPCError::protocol( return Err(RPCError::protocol(
"should have received reply from safety route to a stub", "should have received reply from safety route to a stub",
)); ));
@ -600,7 +600,7 @@ impl RPCProcessor {
routing_domain: RoutingDomain, routing_domain: RoutingDomain,
safety_selection: SafetySelection, safety_selection: SafetySelection,
remote_private_route: PrivateRoute, remote_private_route: PrivateRoute,
reply_private_route: Option<BarePublicKey>, reply_private_route: Option<PublicKey>,
message_data: Vec<u8>, message_data: Vec<u8>,
) -> RPCNetworkResult<RenderedOperation> { ) -> RPCNetworkResult<RenderedOperation> {
let routing_table = self.routing_table(); let routing_table = self.routing_table();
@ -609,7 +609,7 @@ impl RPCProcessor {
// Get useful private route properties // Get useful private route properties
let pr_is_stub = remote_private_route.is_stub(); let pr_is_stub = remote_private_route.is_stub();
let pr_pubkey = remote_private_route.public_key.value(); let pr_pubkey = remote_private_route.public_key.clone();
let crypto_kind = remote_private_route.crypto_kind(); let crypto_kind = remote_private_route.crypto_kind();
let Some(vcrypto) = crypto.get(crypto_kind) else { let Some(vcrypto) = crypto.get(crypto_kind) else {
return Err(RPCError::internal( return Err(RPCError::internal(
@ -623,7 +623,7 @@ impl RPCProcessor {
.compile_safety_route(safety_selection, remote_private_route) .compile_safety_route(safety_selection, remote_private_route)
.to_rpc_network_result()?); .to_rpc_network_result()?);
let sr_is_stub = compiled_route.safety_route.is_stub(); let sr_is_stub = compiled_route.safety_route.is_stub();
let sr_pubkey = compiled_route.safety_route.public_key.value(); let sr_pubkey = compiled_route.safety_route.public_key.clone();
// Encrypt routed operation // Encrypt routed operation
// Xmsg + ENC(Xmsg, DH(PKapr, SKbsr)) // Xmsg + ENC(Xmsg, DH(PKapr, SKbsr))
@ -689,7 +689,7 @@ impl RPCProcessor {
let reply_private_route = match operation.kind() { let reply_private_route = match operation.kind() {
RPCOperationKind::Question(q) => match q.respond_to() { RPCOperationKind::Question(q) => match q.respond_to() {
RespondTo::Sender => None, RespondTo::Sender => None,
RespondTo::PrivateRoute(pr) => Some(pr.public_key.value()), RespondTo::PrivateRoute(pr) => Some(pr.public_key.clone()),
}, },
RPCOperationKind::Statement(_) | RPCOperationKind::Answer(_) => None, RPCOperationKind::Statement(_) | RPCOperationKind::Answer(_) => None,
}; };
@ -759,8 +759,8 @@ impl RPCProcessor {
Some(pi) => pi, Some(pi) => pi,
}; };
let private_route = PrivateRoute::new_stub( let private_route = PrivateRoute::new_stub(
match destination_node_ref.best_node_id() { match destination_node_ref.best_public_key(routing_domain) {
Some(nid) => nid.into(), Some(pk) => pk,
None => { None => {
return Ok(NetworkResult::no_connection_other( return Ok(NetworkResult::no_connection_other(
"No best node id for stub private route", "No best node id for stub private route",
@ -859,8 +859,8 @@ impl RPCProcessor {
rpc_kind: RPCKind, rpc_kind: RPCKind,
send_ts: Timestamp, send_ts: Timestamp,
node_ref: NodeRef, node_ref: NodeRef,
safety_route: Option<BarePublicKey>, safety_route: Option<PublicKey>,
remote_private_route: Option<BarePublicKey>, remote_private_route: Option<PublicKey>,
) { ) {
let wants_answer = matches!(rpc_kind, RPCKind::Question); let wants_answer = matches!(rpc_kind, RPCKind::Question);
@ -896,7 +896,7 @@ impl RPCProcessor {
if context.safety_route.is_none() && context.remote_private_route.is_none() { if context.safety_route.is_none() && context.remote_private_route.is_none() {
context context
.node_ref .node_ref
.stats_lost_answer(context.send_data_result.is_ordered()); .stats_lost_answer(context.send_data_result.sequence_ordering());
// Also clear the last_connections for the entry so we make a new connection next time // Also clear the last_connections for the entry so we make a new connection next time
context.node_ref.clear_last_flows(); context.node_ref.clear_last_flows();
@ -936,9 +936,9 @@ impl RPCProcessor {
send_ts: Timestamp, send_ts: Timestamp,
bytes: ByteCount, bytes: ByteCount,
node_ref: NodeRef, node_ref: NodeRef,
safety_route: Option<BarePublicKey>, safety_route: Option<PublicKey>,
remote_private_route: Option<BarePublicKey>, remote_private_route: Option<PublicKey>,
ordered: bool, ordering: SequenceOrdering,
) { ) {
// Record for node if this was not sent via a route // Record for node if this was not sent via a route
if safety_route.is_none() && remote_private_route.is_none() { if safety_route.is_none() && remote_private_route.is_none() {
@ -948,7 +948,7 @@ impl RPCProcessor {
if is_answer { if is_answer {
node_ref.stats_answer_sent(bytes); node_ref.stats_answer_sent(bytes);
} else { } else {
node_ref.stats_question_sent(send_ts, bytes, wants_answer, ordered); node_ref.stats_question_sent(send_ts, bytes, wants_answer, ordering);
} }
return; return;
} }
@ -990,7 +990,7 @@ impl RPCProcessor {
context.send_ts, context.send_ts,
recv_ts, recv_ts,
bytes, bytes,
context.send_data_result.is_ordered(), context.send_data_result.sequence_ordering(),
); );
return; return;
} }
@ -1193,7 +1193,7 @@ impl RPCProcessor {
node_ref.unfiltered(), node_ref.unfiltered(),
safety_route.clone(), safety_route.clone(),
remote_private_route.clone(), remote_private_route.clone(),
send_data_result.is_ordered(), send_data_result.sequence_ordering(),
); );
// Ref the connection so it doesn't go away until we're done with the waitable reply // Ref the connection so it doesn't go away until we're done with the waitable reply
@ -1284,7 +1284,7 @@ impl RPCProcessor {
node_ref.unfiltered(), node_ref.unfiltered(),
safety_route, safety_route,
remote_private_route, remote_private_route,
send_data_result.is_ordered(), send_data_result.sequence_ordering(),
); );
Ok(NetworkResult::value(())) Ok(NetworkResult::value(()))
@ -1357,7 +1357,7 @@ impl RPCProcessor {
node_ref.unfiltered(), node_ref.unfiltered(),
safety_route, safety_route,
remote_private_route, remote_private_route,
send_data_result.is_ordered(), send_data_result.sequence_ordering(),
); );
Ok(NetworkResult::value(())) Ok(NetworkResult::value(()))

View file

@ -9,11 +9,11 @@ pub struct RenderedOperation {
/// Node to send envelope to (may not be destination node in case of relay) /// Node to send envelope to (may not be destination node in case of relay)
pub node_ref: FilteredNodeRef, pub node_ref: FilteredNodeRef,
/// The safety route used to send the message /// The safety route used to send the message
pub safety_route: Option<BarePublicKey>, pub safety_route: Option<PublicKey>,
/// The private route used to send the message /// The private route used to send the message
pub remote_private_route: Option<BarePublicKey>, pub remote_private_route: Option<PublicKey>,
/// The private route requested to receive the reply /// The private route requested to receive the reply
pub reply_private_route: Option<BarePublicKey>, pub reply_private_route: Option<PublicKey>,
} }
impl fmt::Debug for RenderedOperation { impl fmt::Debug for RenderedOperation {

View file

@ -120,7 +120,7 @@ impl RPCProcessor {
let closest_nodes = network_result_try!(routing_table.find_preferred_closest_peers( let closest_nodes = network_result_try!(routing_table.find_preferred_closest_peers(
routing_domain, routing_domain,
&node_id.clone().into(), node_id.to_hash_coordinate(),
&capabilities &capabilities
)); ));

View file

@ -141,10 +141,9 @@ impl RPCProcessor {
} }
// Validate peers returned are, in fact, closer to the key than the node we sent this to // Validate peers returned are, in fact, closer to the key than the node we sent this to
let valid = match RoutingTable::verify_peers_closer( let valid = match self.routing_table().verify_peers_closer(
&vcrypto, target_node_id.to_hash_coordinate(),
&target_node_id.clone().into(), record_key.to_hash_coordinate(),
&record_key.clone().into(),
&peers, &peers,
) { ) {
Ok(v) => v, Ok(v) => v,
@ -226,7 +225,7 @@ impl RPCProcessor {
let closer_to_key_peers = network_result_try!(routing_table let closer_to_key_peers = network_result_try!(routing_table
.find_preferred_peers_closer_to_key( .find_preferred_peers_closer_to_key(
routing_domain, routing_domain,
&record_key.clone().into(), record_key.to_hash_coordinate(),
vec![CAP_DHT] vec![CAP_DHT]
)); ));

View file

@ -138,10 +138,9 @@ impl RPCProcessor {
} }
// Validate peers returned are, in fact, closer to the key than the node we sent this to // Validate peers returned are, in fact, closer to the key than the node we sent this to
let valid = match RoutingTable::verify_peers_closer( let valid = match self.routing_table().verify_peers_closer(
&vcrypto, target_node_id.to_hash_coordinate(),
&target_node_id.clone().into(), record_key.to_hash_coordinate(),
&record_key.clone().into(),
&peers, &peers,
) { ) {
Ok(v) => v, Ok(v) => v,
@ -214,7 +213,7 @@ impl RPCProcessor {
let closer_to_key_peers = network_result_try!(routing_table let closer_to_key_peers = network_result_try!(routing_table
.find_preferred_peers_closer_to_key( .find_preferred_peers_closer_to_key(
routing_domain, routing_domain,
&record_key.clone().into(), record_key.to_hash_coordinate(),
vec![CAP_DHT] vec![CAP_DHT]
)); ));

View file

@ -89,11 +89,8 @@ impl RPCProcessor {
) -> RPCNetworkResult<()> { ) -> RPCNetworkResult<()> {
// Now that things are valid, decrypt the routed operation with DEC(nonce, DH(the SR's public key, the PR's (or node's) secret) // Now that things are valid, decrypt the routed operation with DEC(nonce, DH(the SR's public key, the PR's (or node's) secret)
// xxx: punish nodes that send messages that fail to decrypt eventually? How to do this for safety routes? // xxx: punish nodes that send messages that fail to decrypt eventually? How to do this for safety routes?
let secret_key = self let secret_key = self.routing_table().secret_key(remote_sr_pubkey.kind());
.routing_table() let Ok(dh_secret) = vcrypto.cached_dh(&remote_sr_pubkey, &secret_key) else {
.secret_key(remote_sr_pubkey.kind())
.value();
let Ok(dh_secret) = vcrypto.cached_dh(remote_sr_pubkey.ref_value(), &secret_key) else {
return Ok(NetworkResult::invalid_message( return Ok(NetworkResult::invalid_message(
"dh failed for remote safety route for safety routed operation", "dh failed for remote safety route for safety routed operation",
)); ));
@ -116,7 +113,7 @@ impl RPCProcessor {
// Pass message to RPC system // Pass message to RPC system
self.enqueue_safety_routed_message( self.enqueue_safety_routed_message(
detail, detail,
remote_sr_pubkey.value(), remote_sr_pubkey,
routed_operation.sequencing(), routed_operation.sequencing(),
body, body,
) )
@ -146,7 +143,7 @@ impl RPCProcessor {
// Ensure the route is validated, and construct a return safetyspec that matches the inbound preferences // Ensure the route is validated, and construct a return safetyspec that matches the inbound preferences
let routing_table = self.routing_table(); let routing_table = self.routing_table();
let rss = routing_table.route_spec_store(); let rss = routing_table.route_spec_store();
let preferred_route = rss.get_route_id_for_key(pr_pubkey.ref_value()); let preferred_route = rss.get_route_id_for_key(&pr_pubkey);
let Some((secret_key, safety_spec)) = rss.with_signature_validated_route( let Some((secret_key, safety_spec)) = rss.with_signature_validated_route(
&pr_pubkey, &pr_pubkey,
@ -172,7 +169,7 @@ impl RPCProcessor {
// Now that things are valid, decrypt the routed operation with DEC(nonce, DH(the SR's public key, the PR's (or node's) secret) // Now that things are valid, decrypt the routed operation with DEC(nonce, DH(the SR's public key, the PR's (or node's) secret)
// xxx: punish nodes that send messages that fail to decrypt eventually. How to do this for private routes? // xxx: punish nodes that send messages that fail to decrypt eventually. How to do this for private routes?
let Ok(dh_secret) = vcrypto.cached_dh(remote_sr_pubkey.ref_value(), &secret_key) else { let Ok(dh_secret) = vcrypto.cached_dh(&remote_sr_pubkey, &secret_key) else {
return Ok(NetworkResult::invalid_message( return Ok(NetworkResult::invalid_message(
"dh failed for remote safety route for private routed operation", "dh failed for remote safety route for private routed operation",
)); ));
@ -189,14 +186,8 @@ impl RPCProcessor {
}; };
// Pass message to RPC system // Pass message to RPC system
self.enqueue_private_routed_message( self.enqueue_private_routed_message(detail, remote_sr_pubkey, pr_pubkey, safety_spec, body)
detail, .map_err(RPCError::internal)?;
remote_sr_pubkey.value(),
pr_pubkey.value(),
safety_spec,
body,
)
.map_err(RPCError::internal)?;
Ok(NetworkResult::value(())) Ok(NetworkResult::value(()))
} }
@ -210,13 +201,9 @@ impl RPCProcessor {
remote_sr_pubkey: PublicKey, remote_sr_pubkey: PublicKey,
pr_pubkey: PublicKey, pr_pubkey: PublicKey,
) -> RPCNetworkResult<()> { ) -> RPCNetworkResult<()> {
// If the private route public key is our node id, then this was sent via safety route to our node directly // If the private route public key is our public key, then this was sent via safety route to our node directly
// so there will be no signatures to validate // so there will be no signatures to validate
if self if self.routing_table().public_keys().contains(&pr_pubkey) {
.routing_table()
.node_ids()
.contains(&pr_pubkey.clone().into())
{
// The private route was a stub // The private route was a stub
self.process_safety_routed_operation( self.process_safety_routed_operation(
detail, detail,
@ -310,9 +297,9 @@ impl RPCProcessor {
}; };
// Decrypt the blob with DEC(nonce, DH(the PR's public key, this hop's secret) // Decrypt the blob with DEC(nonce, DH(the PR's public key, this hop's secret)
let secret_key = self.routing_table().secret_key(crypto_kind).value(); let secret_key = self.routing_table().secret_key(crypto_kind);
let dh_secret = vcrypto let dh_secret = vcrypto
.cached_dh(pr_pubkey.ref_value(), &secret_key) .cached_dh(pr_pubkey, &secret_key)
.map_err(RPCError::protocol)?; .map_err(RPCError::protocol)?;
let dec_blob_data = match vcrypto.decrypt_aead( let dec_blob_data = match vcrypto.decrypt_aead(
&route_hop_data.blob, &route_hop_data.blob,
@ -343,18 +330,11 @@ impl RPCProcessor {
// Sign the operation if this is not our last hop // Sign the operation if this is not our last hop
// as the last hop is already signed by the envelope // as the last hop is already signed by the envelope
if route_hop.next_hop.is_some() { if route_hop.next_hop.is_some() {
let node_id = self.routing_table().node_id(crypto_kind); let public_key = self.routing_table().public_key(crypto_kind);
let secret_key = self.routing_table().secret_key(crypto_kind).value(); let secret_key = self.routing_table().secret_key(crypto_kind);
let sig = Signature::new( let sig = vcrypto
vcrypto.kind(), .sign(&public_key, &secret_key, routed_operation.data())
vcrypto .map_err(RPCError::internal)?;
.sign(
&node_id.value().into(),
&secret_key,
routed_operation.data(),
)
.map_err(RPCError::internal)?,
);
routed_operation.add_signature(sig); routed_operation.add_signature(sig);
} }
@ -413,10 +393,8 @@ impl RPCProcessor {
// There is a safety route hop // There is a safety route hop
SafetyRouteHops::Data(ref route_hop_data) => { SafetyRouteHops::Data(ref route_hop_data) => {
// Decrypt the blob with DEC(nonce, DH(the SR's public key, this hop's secret) // Decrypt the blob with DEC(nonce, DH(the SR's public key, this hop's secret)
let secret_key = self.routing_table().secret_key(crypto_kind).value(); let secret_key = self.routing_table().secret_key(crypto_kind);
let Ok(dh_secret) = let Ok(dh_secret) = vcrypto.cached_dh(&safety_route.public_key, &secret_key) else {
vcrypto.cached_dh(safety_route.public_key.ref_value(), &secret_key)
else {
return Ok(NetworkResult::invalid_message( return Ok(NetworkResult::invalid_message(
"dh failed for safety route hop", "dh failed for safety route hop",
)); ));

View file

@ -154,10 +154,9 @@ impl RPCProcessor {
} }
// Validate peers returned are, in fact, closer to the key than the node we sent this to // Validate peers returned are, in fact, closer to the key than the node we sent this to
let valid = match RoutingTable::verify_peers_closer( let valid = match self.routing_table().verify_peers_closer(
&vcrypto, target_node_id.to_hash_coordinate(),
&target_node_id.clone().into(), record_key.to_hash_coordinate(),
&record_key.clone().into(),
&peers, &peers,
) { ) {
Ok(v) => v, Ok(v) => v,
@ -241,7 +240,7 @@ impl RPCProcessor {
let closer_to_key_peers = network_result_try!(routing_table let closer_to_key_peers = network_result_try!(routing_table
.find_preferred_peers_closer_to_key( .find_preferred_peers_closer_to_key(
routing_domain, routing_domain,
&record_key.clone().into(), record_key.to_hash_coordinate(),
vec![CAP_DHT] vec![CAP_DHT]
)); ));

View file

@ -5,6 +5,7 @@ impl_veilid_log_facility!("rpc");
impl RPCProcessor { impl RPCProcessor {
// Can only be sent directly, not via relays or routes // Can only be sent directly, not via relays or routes
#[instrument(level = "trace", target = "rpc", skip(self), ret, err(level=Level::DEBUG))] #[instrument(level = "trace", target = "rpc", skip(self), ret, err(level=Level::DEBUG))]
#[cfg_attr(all(target_arch = "wasm32", target_os = "unknown"), expect(dead_code))]
pub async fn rpc_call_validate_dial_info( pub async fn rpc_call_validate_dial_info(
&self, &self,
peer: NodeRef, peer: NodeRef,

View file

@ -62,10 +62,10 @@ impl RPCProcessor {
"not processing value change over safety route", "not processing value change over safety route",
)); ));
} }
RPCMessageHeaderDetail::PrivateRouted(p) => NodeId::new( RPCMessageHeaderDetail::PrivateRouted(p) => self
p.direct.envelope.get_crypto_kind(), .routing_table()
p.remote_safety_route.clone().into(), .generate_node_id(&p.remote_safety_route)
), .map_err(RPCError::internal)?,
}; };
if debug_target_enabled!("dht") { if debug_target_enabled!("dht") {

View file

@ -153,10 +153,9 @@ impl RPCProcessor {
} }
// Validate peers returned are, in fact, closer to the key than the node we sent this to // Validate peers returned are, in fact, closer to the key than the node we sent this to
let valid = match RoutingTable::verify_peers_closer( let valid = match self.routing_table().verify_peers_closer(
&vcrypto, target_node_id.to_hash_coordinate(),
&target_node_id.clone().into(), record_key.to_hash_coordinate(),
&record_key.clone().into(),
&peers, &peers,
) { ) {
Ok(v) => v, Ok(v) => v,
@ -266,7 +265,7 @@ impl RPCProcessor {
let closer_to_key_peers = network_result_try!(routing_table let closer_to_key_peers = network_result_try!(routing_table
.find_preferred_peers_closer_to_key( .find_preferred_peers_closer_to_key(
routing_domain, routing_domain,
&record_key.clone().into(), record_key.to_hash_coordinate(),
vec![CAP_DHT] vec![CAP_DHT]
)); ));

View file

@ -158,7 +158,7 @@ impl RPCProcessor {
pub(super) fn enqueue_safety_routed_message( pub(super) fn enqueue_safety_routed_message(
&self, &self,
direct: RPCMessageHeaderDetailDirect, direct: RPCMessageHeaderDetailDirect,
remote_safety_route: BarePublicKey, remote_safety_route: PublicKey,
sequencing: Sequencing, sequencing: Sequencing,
body: Vec<u8>, body: Vec<u8>,
) -> EyreResult<()> { ) -> EyreResult<()> {
@ -203,8 +203,8 @@ impl RPCProcessor {
pub(super) fn enqueue_private_routed_message( pub(super) fn enqueue_private_routed_message(
&self, &self,
direct: RPCMessageHeaderDetailDirect, direct: RPCMessageHeaderDetailDirect,
remote_safety_route: BarePublicKey, remote_safety_route: PublicKey,
private_route: BarePublicKey, private_route: PublicKey,
safety_spec: SafetySpec, safety_spec: SafetySpec,
body: Vec<u8>, body: Vec<u8>,
) -> EyreResult<()> { ) -> EyreResult<()> {

View file

@ -259,7 +259,7 @@ impl StorageManager {
let routing_table = registry.routing_table(); let routing_table = registry.routing_table();
let fanout_call = FanoutCall::new( let fanout_call = FanoutCall::new(
&routing_table, &routing_table,
record_key.clone().into(), record_key.to_hash_coordinate(),
key_count, key_count,
fanout, fanout,
consensus_count, consensus_count,
@ -374,18 +374,11 @@ impl StorageManager {
return Ok(None); return Ok(None);
}; };
// Get cryptosystem
let crypto = self.crypto();
let Some(vcrypto) = crypto.get(record_key.kind()) else {
apibail_generic!("unsupported cryptosystem");
};
// Keep the list of nodes that returned a value for later reference // Keep the list of nodes that returned a value for later reference
let mut inner = self.inner.lock().await; let mut inner = self.inner.lock().await;
Self::process_fanout_results_inner( Self::process_fanout_results_inner(
&mut inner, &mut inner,
&vcrypto,
record_key.clone(), record_key.clone(),
core::iter::once((ValueSubkeyRangeSet::single(subkey), result.fanout_result)), core::iter::once((ValueSubkeyRangeSet::single(subkey), result.fanout_result)),
false, false,

View file

@ -294,7 +294,7 @@ impl StorageManager {
let routing_table = self.routing_table(); let routing_table = self.routing_table();
let fanout_call = FanoutCall::new( let fanout_call = FanoutCall::new(
&routing_table, &routing_table,
record_key.into(), record_key.to_hash_coordinate(),
key_count, key_count,
fanout, fanout,
consensus_count, consensus_count,

View file

@ -198,7 +198,7 @@ impl StorageManager {
for ck in VALID_CRYPTO_KINDS { for ck in VALID_CRYPTO_KINDS {
let vcrypto = crypto.get(ck).unwrap(); let vcrypto = crypto.get(ck).unwrap();
let kp = vcrypto.generate_keypair(); let kp = vcrypto.generate_keypair();
anonymous_watch_keys.add(KeyPair::new(ck, kp)); anonymous_watch_keys.add(kp);
} }
let inner = Self::new_inner(); let inner = Self::new_inner();
@ -530,11 +530,11 @@ impl StorageManager {
schema_data: &[u8], schema_data: &[u8],
) -> RecordKey { ) -> RecordKey {
let mut hash_data = Vec::<u8>::with_capacity(owner_key.len() + 4 + schema_data.len()); let mut hash_data = Vec::<u8>::with_capacity(owner_key.len() + 4 + schema_data.len());
hash_data.extend_from_slice(&vcrypto.kind().0); hash_data.extend_from_slice(vcrypto.kind().bytes());
hash_data.extend_from_slice(owner_key); hash_data.extend_from_slice(owner_key);
hash_data.extend_from_slice(schema_data); hash_data.extend_from_slice(schema_data);
let hash = vcrypto.generate_hash(&hash_data); let hash = vcrypto.generate_hash(&hash_data);
RecordKey::new(vcrypto.kind(), BareRecordKey::from(hash)) RecordKey::new(vcrypto.kind(), BareRecordKey::new(hash.ref_value()))
} }
/// Create a local record from scratch with a new owner key, open it, and return the opened descriptor /// Create a local record from scratch with a new owner key, open it, and return the opened descriptor
@ -894,7 +894,7 @@ impl StorageManager {
&descriptor.owner(), &descriptor.owner(),
subkey, subkey,
&vcrypto, &vcrypto,
&writer.bare_secret(), &writer.secret(),
)?); )?);
// Check if we are offline // Check if we are offline
@ -1293,12 +1293,6 @@ impl StorageManager {
subkeys subkeys
}; };
// Get cryptosystem
let crypto = self.crypto();
let Some(vcrypto) = crypto.get(record_key.kind()) else {
apibail_generic!("unsupported cryptosystem");
};
let mut inner = self.inner.lock().await; let mut inner = self.inner.lock().await;
let safety_selection = { let safety_selection = {
let Some(opened_record) = inner.opened_records.get(&record_key) else { let Some(opened_record) = inner.opened_records.get(&record_key) else {
@ -1390,7 +1384,6 @@ impl StorageManager {
Self::process_fanout_results_inner( Self::process_fanout_results_inner(
&mut inner, &mut inner,
&vcrypto,
record_key.clone(), record_key.clone(),
results_iter, results_iter,
false, false,
@ -1552,7 +1545,7 @@ impl StorageManager {
} }
owner owner
} else { } else {
KeyPair::new(vcrypto.kind(), vcrypto.generate_keypair()) vcrypto.generate_keypair()
}; };
// Calculate dht key // Calculate dht key
@ -1563,7 +1556,7 @@ impl StorageManager {
owner.key(), owner.key(),
schema_data, schema_data,
&vcrypto, &vcrypto,
owner.bare_secret(), owner.secret(),
)?); )?);
// Add new local value record // Add new local value record
@ -1704,7 +1697,7 @@ impl StorageManager {
// Otherwise this is just another subkey writer // Otherwise this is just another subkey writer
let owner_secret = if let Some(writer) = writer.clone() { let owner_secret = if let Some(writer) = writer.clone() {
if writer.key() == owner { if writer.key() == owner {
Some(writer.bare_secret()) Some(writer.secret())
} else { } else {
None None
} }
@ -1753,7 +1746,7 @@ impl StorageManager {
// Otherwise this is just another subkey writer // Otherwise this is just another subkey writer
let owner_secret = if let Some(writer) = &writer { let owner_secret = if let Some(writer) = &writer {
if writer.key() == owner { if writer.key() == owner {
Some(writer.bare_secret()) Some(writer.secret())
} else { } else {
None None
} }
@ -1807,12 +1800,7 @@ impl StorageManager {
d.nodes d.nodes
.keys() .keys()
.cloned() .cloned()
.filter_map(|x| { .filter_map(|nr| routing_table.lookup_node_ref(nr).ok().flatten())
routing_table
.lookup_node_ref(NodeId::new(record_key.kind(), x))
.ok()
.flatten()
})
.collect() .collect()
}); });
@ -1822,7 +1810,6 @@ impl StorageManager {
#[instrument(level = "trace", target = "stor", skip_all)] #[instrument(level = "trace", target = "stor", skip_all)]
fn process_fanout_results_inner<I: IntoIterator<Item = (ValueSubkeyRangeSet, FanoutResult)>>( fn process_fanout_results_inner<I: IntoIterator<Item = (ValueSubkeyRangeSet, FanoutResult)>>(
inner: &mut StorageManagerInner, inner: &mut StorageManagerInner,
vcrypto: &CryptoSystemGuard<'_>,
record_key: RecordKey, record_key: RecordKey,
subkey_results_iter: I, subkey_results_iter: I,
is_set: bool, is_set: bool,
@ -1839,7 +1826,7 @@ impl StorageManager {
for node_id in fanout_result for node_id in fanout_result
.value_nodes .value_nodes
.iter() .iter()
.filter_map(|x| x.node_ids().get(record_key.kind()).map(|k| k.value())) .filter_map(|x| x.node_ids().get(record_key.kind()))
{ {
let pnd = d.nodes.entry(node_id).or_default(); let pnd = d.nodes.entry(node_id).or_default();
if is_set || pnd.last_set == Timestamp::default() { if is_set || pnd.last_set == Timestamp::default() {
@ -1863,14 +1850,13 @@ impl StorageManager {
return res; return res;
} }
// Distance is the next metric, closer nodes first // Distance is the next metric, closer nodes first
let da = vcrypto.distance( let da =
&BareHashDigest::from(a.0.clone()), a.0.to_hash_coordinate()
&BareHashDigest::from(record_key.value()), .distance(&record_key.to_hash_coordinate());
);
let db = vcrypto.distance( let db =
&BareHashDigest::from(b.0.clone()), b.0.to_hash_coordinate()
&BareHashDigest::from(record_key.value()), .distance(&record_key.to_hash_coordinate());
);
da.cmp(&db) da.cmp(&db)
}); });
@ -1969,7 +1955,7 @@ impl StorageManager {
} }
#[instrument(level = "trace", target = "stor", skip_all, err)] #[instrument(level = "trace", target = "stor", skip_all, err)]
pub(super) async fn handle_inspect_local_value_inner( async fn handle_inspect_local_value_inner(
&self, &self,
inner: &mut StorageManagerInner, inner: &mut StorageManagerInner,
record_key: RecordKey, record_key: RecordKey,
@ -1998,7 +1984,7 @@ impl StorageManager {
} }
#[instrument(level = "trace", target = "stor", skip_all, err)] #[instrument(level = "trace", target = "stor", skip_all, err)]
pub(super) async fn handle_get_remote_value_inner( async fn handle_get_remote_value_inner(
inner: &mut StorageManagerInner, inner: &mut StorageManagerInner,
record_key: RecordKey, record_key: RecordKey,
subkey: ValueSubkey, subkey: ValueSubkey,

Some files were not shown because too many files have changed in this diff Show more