break everything

This commit is contained in:
John Smith 2023-02-07 21:44:50 -05:00
parent 9d826b27db
commit a58a87719c
61 changed files with 1278 additions and 863 deletions

View File

@ -27,13 +27,24 @@ struct Nonce24 @0xb6260db25d8d7dfc {
u2 @2 :UInt64;
}
using NodeID = Key256;
using RoutePublicKey = Key256;
using ValueID = Key256;
using Nonce = Nonce24;
using Signature = Signature512;
using BlockID = Key256;
using TunnelID = UInt64;
using PublicKey = Key256; # Node id / DHT key / Route id, etc
using Nonce = Nonce24; # One-time encryption nonce
using Signature = Signature512; # Signature block
using TunnelID = UInt64; # Id for tunnels
using CryptoKind = UInt32; # FOURCC code for cryptography type
using ValueSeqNum = UInt32; # sequence numbers for values
using ValueSchema = UInt32; # FOURCC code for schema (0 = freeform, SUB0 = subkey control v0)
using Subkey = UInt32; # subkey index for dht
struct TypedKey {
kind @0 :CryptoKind;
key @1 :PublicKey;
}
struct TypedSignature {
kind @0 :CryptoKind;
signature @1 :Signature;
}
# Node Dial Info
################################################################
@ -123,7 +134,7 @@ struct RouteHopData @0x8ce231f9d1b7adf2 {
struct RouteHop @0xf8f672d75cce0c3b {
node :union {
nodeId @0 :NodeID; # node id only for established routes
nodeId @0 :TypedKey; # node id only for established routes
peerInfo @1 :PeerInfo; # full peer info for this hop to establish the route
}
nextHop @2 :RouteHopData; # optional: If this the end of a private route, this field will not exist
@ -131,7 +142,7 @@ struct RouteHop @0xf8f672d75cce0c3b {
}
struct PrivateRoute @0x8a83fccb0851e776 {
publicKey @0 :RoutePublicKey; # private route public key (unique per private route)
publicKey @0 :TypedKey; # private route public key (unique per private route)
hopCount @1 :UInt8; # Count of hops left in the private route (for timeout calculation purposes only)
hops :union {
firstHop @2 :RouteHop; # first hop of a private route is unencrypted (hopcount > 0)
@ -141,7 +152,7 @@ struct PrivateRoute @0x8a83fccb0851e776 {
}
struct SafetyRoute @0xf554734d07cb5d59 {
publicKey @0 :RoutePublicKey; # safety route public key (unique per safety route)
publicKey @0 :TypedKey; # safety route public key (unique per safety route)
hopCount @1 :UInt8; # Count of hops left in the safety route (for timeout calculation purposes only)
hops :union {
data @2 :RouteHopData; # safety route has more hops
@ -152,16 +163,20 @@ struct SafetyRoute @0xf554734d07cb5d59 {
# Values
##############################
using ValueSeqNum = UInt32; # sequence numbers for values
struct SubkeyRange {
start @0 :Subkey; # the start of a subkey range
end @1 :Subkey; # the end of a subkey range
}
struct ValueKey @0xe64b0992c21a0736 {
publicKey @0 :ValueID; # the location of the value
subkey @1 :Text; # the name of the subkey (or empty for the default subkey)
publicKey @0 :TypedKey; # the location of the value
subkey @1 :Subkey; # the index of the subkey (0 for the default subkey)
}
struct ValueData @0xb4b7416f169f2a3d {
seq @0 :ValueSeqNum; # sequence number of value
data @1 :Data; # value or subvalue contents
schema @1 :ValueSchema; # fourcc code of schema for value
data @2 :Data; # value or subvalue contents
}
# Operations
@ -234,7 +249,7 @@ struct NodeInfo @0xe125d847e3f9f419 {
networkClass @0 :NetworkClass; # network class of this node
outboundProtocols @1 :ProtocolTypeSet; # protocols that can go outbound
addressTypes @2 :AddressTypeSet; # address types supported
minVersion @3 :UInt8; # minimum protocol version for rpc
envelopeSupport @3 :UInt8; # minimum protocol version for rpc
maxVersion @4 :UInt8; # maximum protocol version for rpc
dialInfoDetailList @5 :List(DialInfoDetail); # inbound dial info details for this node
}
@ -242,15 +257,15 @@ struct NodeInfo @0xe125d847e3f9f419 {
struct SignedDirectNodeInfo @0xe0e7ea3e893a3dd7 {
nodeInfo @0 :NodeInfo; # node info
timestamp @1 :UInt64; # when signed node info was generated
signature @2 :Signature; # signature
signatures @2 :List(TypedSignature); # signatures
}
struct SignedRelayedNodeInfo @0xb39e8428ccd87cbb {
nodeInfo @0 :NodeInfo; # node info
relayId @1 :NodeID; # node id for relay
relayId @1 :List(TypedKey); # node ids for relay
relayInfo @2 :SignedDirectNodeInfo; # signed node info for relay
timestamp @3 :UInt64; # when signed node info was generated
signature @4 :Signature; # signature
signatures @4 :List(TypedSignature); # signatures
}
struct SignedNodeInfo @0xd2478ce5f593406a {
@ -261,16 +276,15 @@ struct SignedNodeInfo @0xd2478ce5f593406a {
}
struct PeerInfo @0xfe2d722d5d3c4bcb {
nodeId @0 :NodeID; # node id for 'closer peer'
nodeIds @0 :List(TypedKey); # node ids for 'closer peer'
signedNodeInfo @1 :SignedNodeInfo; # signed node info for 'closer peer'
}
struct RoutedOperation @0xcbcb8535b839e9dd {
version @0 :UInt8; # crypto version in use for the data
sequencing @1 :Sequencing; # sequencing preference to use to pass the message along
signatures @2 :List(Signature); # signatures from nodes that have handled the private route
nonce @3 :Nonce; # nonce Xmsg
data @4 :Data; # operation encrypted with ENC(Xmsg,DH(PKapr,SKbsr))
sequencing @0 :Sequencing; # sequencing preference to use to pass the message along
signatures @1 :List(TypedSignature); # signatures from nodes that have handled the private route
nonce @2 :Nonce; # nonce Xmsg
data @3 :Data; # operation encrypted with ENC(Xmsg,DH(PKapr,SKbsr))
}
struct OperationStatusQ @0x865d80cea70d884a {
@ -293,7 +307,7 @@ struct OperationReturnReceipt @0xeb0fb5b5a9160eeb {
}
struct OperationFindNodeQ @0xfdef788fe9623bcd {
nodeId @0 :NodeID; # node id to locate
nodeId @0 :TypedKey; # node id to locate
}
struct OperationFindNodeA @0xa84cf2fb40c77089 {
@ -301,24 +315,25 @@ struct OperationFindNodeA @0xa84cf2fb40c77089 {
}
struct OperationRoute @0x96741859ce6ac7dd {
safetyRoute @0 :SafetyRoute; # Where this should go
operation @1 :RoutedOperation; # The operation to be routed
safetyRoute @0 :SafetyRoute; # where this should go
operation @1 :RoutedOperation; # the operation to be routed
}
struct OperationAppCallQ @0xade67b9f09784507 {
message @0 :Data; # Opaque request to application
message @0 :Data; # opaque request to application
}
struct OperationAppCallA @0xf7c797ac85f214b8 {
message @0 :Data; # Opaque response from application
message @0 :Data; # opaque response from application
}
struct OperationAppMessage @0x9baf542d81b411f5 {
message @0 :Data; # Opaque message to application
message @0 :Data; # opaque message to application
}
struct OperationGetValueQ @0xf88a5b6da5eda5d0 {
key @0 :ValueKey; # key for value to get
publicKey @0 :TypedKey; # the location of the value
subkey @1 :Subkey; # the index of the subkey (0 for the default subkey)
}
struct OperationGetValueA @0xd896bb46f2e0249f {
@ -329,8 +344,9 @@ struct OperationGetValueA @0xd896bb46f2e0249f {
}
struct OperationSetValueQ @0xbac06191ff8bdbc5 {
key @0 :ValueKey; # key for value to update
value @1 :ValueData; # value or subvalue contents (older or equal seq number gets dropped)
publicKey @0 :TypedKey; # the location of the value
subkey @1 :Subkey; # the index of the subkey (0 for the default subkey)
value @2 :ValueData; # value or subvalue contents (older or equal seq number gets dropped)
}
struct OperationSetValueA @0x9378d0732dc95be2 {
@ -341,21 +357,26 @@ struct OperationSetValueA @0x9378d0732dc95be2 {
}
struct OperationWatchValueQ @0xf9a5a6c547b9b228 {
key @0 :ValueKey; # key for value to watch
publicKey @0 :TypedKey; # key for value to watch
subkeys @1 :List(SubkeyRange) # subkey range to watch, if empty, watch everything
expiration @2 :UInt64; # requested timestamp when this watch will expire in usec since epoch (can be return less, 0 for max)
count @3 :UInt32; # requested number of changes to watch for (0 = continuous, 1 = single shot, 2+ = counter)
}
struct OperationWatchValueA @0xa726cab7064ba893 {
expiration @0 :UInt64; # timestamp when this watch will expire in usec since epoch (0 if watch failed)
peers @1 :List(PeerInfo); # returned list of other nodes to ask that could propagate watches
peers @2 :List(PeerInfo); # returned list of other nodes to ask that could propagate watches
}
struct OperationValueChanged @0xd1c59ebdd8cc1bf6 {
key @0 :ValueKey; # key for value that changed
value @1 :ValueData; # value or subvalue contents with sequence number
publicKey @0 :TypedKey; # key for value that changed
subkeys @1 :List(SubkeyRange) # subkey range that changed (up to 512 ranges at a time)
count @2 :UInt32; # remaining changes left (0 means watch has expired)
value @3 :ValueData; # first value that changed (the rest can be gotten with getvalue)
}
struct OperationSupplyBlockQ @0xadbf4c542d749971 {
blockId @0 :BlockID; # hash of the block we can supply
blockId @0 :TypedKey; # hash of the block we can supply
}
struct OperationSupplyBlockA @0xf003822e83b5c0d7 {
@ -366,7 +387,7 @@ struct OperationSupplyBlockA @0xf003822e83b5c0d7 {
}
struct OperationFindBlockQ @0xaf4353ff004c7156 {
blockId @0 :BlockID; # hash of the block to locate
blockId @0 :TypedKey; # hash of the block to locate
}
struct OperationFindBlockA @0xc51455bc4915465d {

View File

@ -67,12 +67,6 @@ impl ServicesContext {
}
self.protected_store = Some(protected_store.clone());
// Init node id from config now that protected store is set up
if let Err(e) = self.config.init_node_id(protected_store.clone()).await {
self.shutdown().await;
return Err(e).wrap_err("init node id failed");
}
// Set up tablestore
trace!("init table store");
let table_store = TableStore::new(self.config.clone());
@ -84,7 +78,11 @@ impl ServicesContext {
// Set up crypto
trace!("init crypto");
let crypto = Crypto::new(self.config.clone(), table_store.clone());
let crypto = Crypto::new(
self.config.clone(),
table_store.clone(),
protected_store.clone(),
);
if let Err(e) = crypto.init().await {
self.shutdown().await;
return Err(e);

View File

@ -1,4 +1,4 @@
use crate::*;
use super::*;
use core::cmp::{Eq, Ord, PartialEq, PartialOrd};
use core::convert::{TryFrom, TryInto};
@ -11,24 +11,42 @@ use rkyv::{Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as
//////////////////////////////////////////////////////////////////////
/// Length of a DHT key in bytes
/// Length of a public key in bytes
#[allow(dead_code)]
pub const DHT_KEY_LENGTH: usize = 32;
/// Length of a DHT key in bytes after encoding to base64url
pub const PUBLIC_KEY_LENGTH: usize = 32;
/// Length of a public key in bytes after encoding to base64url
#[allow(dead_code)]
pub const DHT_KEY_LENGTH_ENCODED: usize = 43;
/// Length of a DHT secret in bytes
pub const PUBLIC_KEY_LENGTH_ENCODED: usize = 43;
/// Length of a secret key in bytes
#[allow(dead_code)]
pub const DHT_KEY_SECRET_LENGTH: usize = 32;
/// Length of a DHT secret in bytes after encoding to base64url
pub const SECRET_KEY_LENGTH: usize = 32;
/// Length of a secret key in bytes after encoding to base64url
#[allow(dead_code)]
pub const DHT_KEY_SECRET_LENGTH_ENCODED: usize = 43;
/// Length of a DHT signature in bytes
pub const SECRET_KEY_LENGTH_ENCODED: usize = 43;
/// Length of a signature in bytes
#[allow(dead_code)]
/// Length of a DHT signature in bytes after encoding to base64url
pub const DHT_SIGNATURE_LENGTH: usize = 64;
pub const SIGNATURE_LENGTH: usize = 64;
/// Length of a signature in bytes after encoding to base64url
#[allow(dead_code)]
pub const DHT_SIGNATURE_LENGTH_ENCODED: usize = 86;
pub const SIGNATURE_LENGTH_ENCODED: usize = 86;
/// Length of a nonce in bytes
#[allow(dead_code)]
pub const NONCE_LENGTH: usize = 24;
/// Length of a nonce in bytes after encoding to base64url
#[allow(dead_code)]
pub const NONCE_LENGTH_ENCODED: usize = 32;
/// Length of a shared secret in bytes
#[allow(dead_code)]
pub const SHARED_SECRET_LENGTH: usize = 32;
/// Length of a shared secret in bytes after encoding to base64url
#[allow(dead_code)]
pub const SHARED_SECRET_LENGTH_ENCODED: usize = 43;
//////////////////////////////////////////////////////////////////////
pub trait Encodable {
fn encode(&self) -> String;
}
//////////////////////////////////////////////////////////////////////
@ -141,14 +159,13 @@ macro_rules! byte_array_type {
None
}
pub fn encode(&self) -> String {
BASE64URL_NOPAD.encode(&self.bytes)
}
pub fn try_decode<S: AsRef<str>>(input: S) -> Result<Self, VeilidAPIError> {
let b = input.as_ref().as_bytes();
Self::try_decode_bytes(b)
}
pub fn try_decode_bytes(b: &[u8]) -> Result<Self, VeilidAPIError> {
let mut bytes = [0u8; $size];
let res = BASE64URL_NOPAD.decode_len(input.as_ref().len());
let res = BASE64URL_NOPAD.decode_len(b.len());
match res {
Ok(v) => {
if v != $size {
@ -160,7 +177,7 @@ macro_rules! byte_array_type {
}
}
let res = BASE64URL_NOPAD.decode_mut(input.as_ref().as_bytes(), &mut bytes);
let res = BASE64URL_NOPAD.decode_mut(b, &mut bytes);
match res {
Ok(_) => Ok(Self::new(bytes)),
Err(_) => apibail_generic!("Failed to decode"),
@ -168,6 +185,11 @@ macro_rules! byte_array_type {
}
}
impl Encodable for $name {
fn encode(&self) -> String {
BASE64URL_NOPAD.encode(&self.bytes)
}
}
impl fmt::Display for $name {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
//write!(f, "{}", String::from(self))
@ -224,7 +246,9 @@ macro_rules! byte_array_type {
/////////////////////////////////////////
byte_array_type!(DHTKey, DHT_KEY_LENGTH);
byte_array_type!(DHTKeySecret, DHT_KEY_SECRET_LENGTH);
byte_array_type!(DHTSignature, DHT_SIGNATURE_LENGTH);
byte_array_type!(DHTKeyDistance, DHT_KEY_LENGTH);
byte_array_type!(PublicKey, PUBLIC_KEY_LENGTH);
byte_array_type!(SecretKey, SECRET_KEY_LENGTH);
byte_array_type!(Signature, SIGNATURE_LENGTH);
byte_array_type!(PublicKeyDistance, PUBLIC_KEY_LENGTH);
byte_array_type!(Nonce, NONCE_LENGTH);
byte_array_type!(SharedSecret, SHARED_SECRET_LENGTH);

View File

@ -2,14 +2,14 @@ use super::*;
pub trait CryptoSystem {
// Accessors
fn version(&self) -> CryptoVersion;
fn kind(&self) -> CryptoKind;
fn crypto(&self) -> Crypto;
// Cached Operations
fn cached_dh(
&self,
key: &DHTKey,
secret: &DHTKeySecret,
key: &PublicKey,
secret: &SecretKey,
) -> Result<SharedSecret, VeilidAPIError>;
// Generation
@ -17,40 +17,40 @@ pub trait CryptoSystem {
fn random_shared_secret(&self) -> SharedSecret;
fn compute_dh(
&self,
key: &DHTKey,
secret: &DHTKeySecret,
key: &PublicKey,
secret: &SecretKey,
) -> Result<SharedSecret, VeilidAPIError>;
fn generate_keypair(&self) -> (DHTKey, DHTKeySecret);
fn generate_hash(&self, data: &[u8]) -> DHTKey;
fn generate_keypair(&self) -> (PublicKey, SecretKey);
fn generate_hash(&self, data: &[u8]) -> PublicKey;
fn generate_hash_reader(
&self,
reader: &mut dyn std::io::Read,
) -> Result<DHTKey, VeilidAPIError>;
) -> Result<PublicKey, VeilidAPIError>;
// Validation
fn validate_keypair(&self, dht_key: &DHTKey, dht_key_secret: &DHTKeySecret) -> bool;
fn validate_hash(&self, data: &[u8], dht_key: &DHTKey) -> bool;
fn validate_keypair(&self, dht_key: &PublicKey, dht_key_secret: &SecretKey) -> bool;
fn validate_hash(&self, data: &[u8], dht_key: &PublicKey) -> bool;
fn validate_hash_reader(
&self,
reader: &mut dyn std::io::Read,
dht_key: &DHTKey,
dht_key: &PublicKey,
) -> Result<bool, VeilidAPIError>;
// Distance Metric
fn distance(&self, key1: &DHTKey, key2: &DHTKey) -> DHTKeyDistance;
fn distance(&self, key1: &PublicKey, key2: &PublicKey) -> PublicKeyDistance;
// Authentication
fn sign(
&self,
dht_key: &DHTKey,
dht_key_secret: &DHTKeySecret,
dht_key: &PublicKey,
dht_key_secret: &SecretKey,
data: &[u8],
) -> Result<DHTSignature, VeilidAPIError>;
) -> Result<Signature, VeilidAPIError>;
fn verify(
&self,
dht_key: &DHTKey,
dht_key: &PublicKey,
data: &[u8],
signature: &DHTSignature,
signature: &Signature,
) -> Result<(), VeilidAPIError>;
// AEAD Encrypt/Decrypt

View File

@ -1,73 +1,65 @@
#![allow(dead_code)]
#![allow(clippy::absurd_extreme_comparisons)]
use super::*;
use crate::routing_table::VersionRange;
use crate::*;
use core::convert::TryInto;
/// Envelopes are versioned along with crypto versions
/// Envelopes are versioned
///
/// These are the formats for the on-the-wire serialization performed by this module
///
/// #[repr(C, packed)]
/// struct EnvelopeHeader {
/// // Size is at least 8 bytes. Depending on the version specified, the size may vary and should be case to the appropriate struct
/// magic: [u8; 4], // 0x00: 0x56 0x4C 0x49 0x44 ("VLID")
/// version: u8, // 0x04: 0 = EnvelopeV0
/// min_version: u8, // 0x05: 0 = EnvelopeV0
/// max_version: u8, // 0x06: 0 = EnvelopeV0
/// reserved: u8, // 0x07: Reserved for future use
/// // Size is at least 4 bytes. Depending on the version specified, the size may vary and should be case to the appropriate struct
/// magic: [u8; 3], // 0x00: 0x56 0x4C 0x44 ("VLD")
/// version: u8, // 0x03: 0 = EnvelopeV0
/// }
///
/// #[repr(C, packed)]
/// struct EnvelopeV0 {
/// // Size is 106 bytes.
/// magic: [u8; 4], // 0x00: 0x56 0x4C 0x49 0x44 ("VLID")
/// version: u8, // 0x04: 0 = EnvelopeV0
/// min_version: u8, // 0x05: 0 = EnvelopeV0
/// max_version: u8, // 0x06: 0 = EnvelopeV0
/// reserved: u8, // 0x07: Reserved for future use
/// // Size is 106 bytes without signature and 170 with signature
/// magic: [u8; 3], // 0x00: 0x56 0x4C 0x44 ("VLD")
/// version: u8, // 0x03: 0 = EnvelopeV0
/// crypto_kind: [u8; 4], // 0x04: CryptoSystemVersion FOURCC code (CryptoKind)
/// size: u16, // 0x08: Total size of the envelope including the encrypted operations message. Maximum size is 65,507 bytes, which is the data size limit for a single UDP message on IPv4.
/// timestamp: u64, // 0x0A: Duration since UNIX_EPOCH in microseconds when this message is sent. Messages older than 10 seconds are dropped.
/// nonce: [u8; 24], // 0x12: Random nonce for replay protection and for x25519
/// sender_id: [u8; 32], // 0x2A: Node ID of the message source, which is the Ed25519 public key of the sender (must be verified with find_node if this is a new node_id/address combination)
/// recipient_id: [u8; 32], // 0x4A: Node ID of the intended recipient, which is the Ed25519 public key of the recipient (must be the receiving node, or a relay lease holder)
/// nonce: [u8; 24], // 0x12: Random nonce for replay protection and for dh
/// sender_id: [u8; 32], // 0x2A: Node ID of the message source, which is the public key of the sender (must be verified with find_node if this is a new node_id/address combination)
/// recipient_id: [u8; 32], // 0x4A: Node ID of the intended recipient, which is the public key of the recipient (must be the receiving node, or a relay lease holder)
/// // 0x6A: message is appended (operations)
/// // encrypted by XChaCha20Poly1305(nonce,x25519(recipient_id, sender_secret_key))
/// signature: [u8; 64], // 0x?? (end-0x40): Ed25519 signature of the entire envelope including header is appended to the packet
/// signature: [u8; 64], // 0x?? (end-0x40): Signature of the entire envelope including header is appended to the packet
/// // entire header needs to be included in message digest, relays are not allowed to modify the envelope without invalidating the signature.
/// }
pub const MAX_ENVELOPE_SIZE: usize = 65507;
pub const MIN_ENVELOPE_SIZE: usize = 0x6A + 0x40; // Header + Signature
pub const ENVELOPE_MAGIC: &[u8; 4] = b"VLID";
pub type EnvelopeNonce = [u8; 24];
pub const ENVELOPE_MAGIC: &[u8; 3] = b"VLD";
#[derive(Debug, Clone, PartialEq, Eq, Default)]
pub struct Envelope {
version: u8,
min_version: u8,
max_version: u8,
crypto_kind: CryptoKind,
timestamp: Timestamp,
nonce: EnvelopeNonce,
sender_id: DHTKey,
recipient_id: DHTKey,
nonce: Nonce,
sender_id: PublicKey,
recipient_id: PublicKey,
}
impl Envelope {
pub fn new(
version: u8,
crypto_kind: CryptoKind,
timestamp: Timestamp,
nonce: EnvelopeNonce,
sender_id: DHTKey,
recipient_id: DHTKey,
nonce: Nonce,
sender_id: PublicKey,
recipient_id: PublicKey,
) -> Self {
assert!(version >= MIN_CRYPTO_VERSION);
assert!(version <= MAX_CRYPTO_VERSION);
assert!(version >= MIN_ENVELOPE_VERSION);
assert!(version <= MAX_ENVELOPE_VERSION);
assert!(VALID_CRYPTO_KINDS.contains(&crypto_kind));
Self {
version,
min_version: MIN_CRYPTO_VERSION,
max_version: MAX_CRYPTO_VERSION,
crypto_kind,
timestamp,
nonce,
sender_id,
@ -75,7 +67,7 @@ impl Envelope {
}
}
pub fn from_signed_data(data: &[u8]) -> Result<Envelope, VeilidAPIError> {
pub fn from_signed_data(crypto: Crypto, data: &[u8]) -> Result<Envelope, VeilidAPIError> {
// Ensure we are at least the length of the envelope
// Silent drop here, as we use zero length packets as part of the protocol for hole punching
if data.len() < MIN_ENVELOPE_SIZE {
@ -83,33 +75,28 @@ impl Envelope {
}
// Verify magic number
let magic: [u8; 4] = data[0x00..0x04]
let magic: [u8; 3] = data[0x00..0x03]
.try_into()
.map_err(VeilidAPIError::internal)?;
if magic != *ENVELOPE_MAGIC {
apibail_generic!("bad magic number");
}
// Check version
let version = data[0x04];
if version > MAX_CRYPTO_VERSION || version < MIN_CRYPTO_VERSION {
apibail_parse_error!("unsupported cryptography version", version);
// Check envelope version
let version = data[0x03];
if version > MAX_ENVELOPE_VERSION || version < MIN_ENVELOPE_VERSION {
apibail_parse_error!("unsupported envelope version", version);
}
// Get min version
let min_version = data[0x05];
if min_version > version {
apibail_parse_error!("version too low", version);
}
// Get max version
let max_version = data[0x06];
if version > max_version {
apibail_parse_error!("version too high", version);
}
if min_version > max_version {
apibail_generic!("version information invalid");
}
// Check crypto kind
let crypto_kind = CryptoKind(
data[0x04..0x08]
.try_into()
.map_err(VeilidAPIError::internal)?,
);
let Some(vcrypto) = crypto.get(crypto_kind) else {
apibail_parse_error!("unsupported crypto kind", crypto_kind);
};
// Get size and ensure it matches the size of the envelope and is less than the maximum message size
let size: u16 = u16::from_le_bytes(
@ -140,17 +127,18 @@ impl Envelope {
.into();
// Get nonce and sender node id
let nonce: EnvelopeNonce = data[0x12..0x2A]
let nonce_slice: [u8; NONCE_LENGTH] = data[0x12..0x2A]
.try_into()
.map_err(VeilidAPIError::internal)?;
let sender_id_slice: [u8; 32] = data[0x2A..0x4A]
let sender_id_slice: [u8; PUBLIC_KEY_LENGTH] = data[0x2A..0x4A]
.try_into()
.map_err(VeilidAPIError::internal)?;
let recipient_id_slice: [u8; 32] = data[0x4A..0x6A]
let recipient_id_slice: [u8; PUBLIC_KEY_LENGTH] = data[0x4A..0x6A]
.try_into()
.map_err(VeilidAPIError::internal)?;
let sender_id = DHTKey::new(sender_id_slice);
let recipient_id = DHTKey::new(recipient_id_slice);
let nonce: Nonce = Nonce::new(nonce_slice);
let sender_id = PublicKey::new(sender_id_slice);
let recipient_id = PublicKey::new(recipient_id_slice);
// Ensure sender_id and recipient_id are not the same
if sender_id == recipient_id {
@ -161,21 +149,21 @@ impl Envelope {
}
// Get signature
let signature = DHTSignature::new(
let signature = Signature::new(
data[(data.len() - 64)..]
.try_into()
.map_err(VeilidAPIError::internal)?,
);
// Validate signature
verify(&sender_id, &data[0..(data.len() - 64)], &signature)
vcrypto
.verify(&sender_id, &data[0..(data.len() - 64)], &signature)
.map_err(VeilidAPIError::internal)?;
// Return envelope
Ok(Self {
version,
min_version,
max_version,
crypto_kind,
timestamp,
nonce,
sender_id,
@ -187,10 +175,12 @@ impl Envelope {
&self,
crypto: Crypto,
data: &[u8],
node_id_secret: &DHTKeySecret,
node_id_secret: &SecretKey,
) -> Result<Vec<u8>, VeilidAPIError> {
// Get DH secret
let vcrypto = crypto.get(self.version)?;
let vcrypto = crypto
.get(self.crypto_kind)
.expect("need to ensure only valid crypto kinds here");
let dh_secret = vcrypto.cached_dh(&self.sender_id, node_id_secret)?;
// Decrypt message without authentication
@ -204,40 +194,41 @@ impl Envelope {
&self,
crypto: Crypto,
body: &[u8],
node_id_secret: &DHTKeySecret,
node_id_secret: &SecretKey,
) -> Result<Vec<u8>, VeilidAPIError> {
// Ensure body isn't too long
let envelope_size: usize = body.len() + MIN_ENVELOPE_SIZE;
if envelope_size > MAX_ENVELOPE_SIZE {
apibail_parse_error!("envelope size is too large", envelope_size);
}
// Generate dh secret
let vcrypto = crypto
.get(self.crypto_kind)
.expect("need to ensure only valid crypto kinds here");
let dh_secret = vcrypto.cached_dh(&self.recipient_id, node_id_secret)?;
// Write envelope body
let mut data = vec![0u8; envelope_size];
// Write magic
data[0x00..0x04].copy_from_slice(ENVELOPE_MAGIC);
data[0x00..0x03].copy_from_slice(ENVELOPE_MAGIC);
// Write version
data[0x04] = self.version;
// Write min version
data[0x05] = self.min_version;
// Write max version
data[0x06] = self.max_version;
data[0x03] = self.version;
// Write crypto kind
data[0x04..0x08].copy_from_slice(&self.crypto_kind.0);
// Write size
data[0x08..0x0A].copy_from_slice(&(envelope_size as u16).to_le_bytes());
// Write timestamp
data[0x0A..0x12].copy_from_slice(&self.timestamp.as_u64().to_le_bytes());
// Write nonce
data[0x12..0x2A].copy_from_slice(&self.nonce);
data[0x12..0x2A].copy_from_slice(&self.nonce.bytes);
// Write sender node id
data[0x2A..0x4A].copy_from_slice(&self.sender_id.bytes);
// Write recipient node id
data[0x4A..0x6A].copy_from_slice(&self.recipient_id.bytes);
// Generate dh secret
let vcrypto = crypto.get(self.version)?;
let dh_secret = vcrypto.cached_dh(&self.recipient_id, node_id_secret)?;
// Encrypt and authenticate message
let encrypted_body = vcrypto.crypt_no_auth_aligned_8(body, &self.nonce, &dh_secret);
let encrypted_body = vcrypto.crypt_no_auth_unaligned(body, &self.nonce, &dh_secret);
// Write body
if !encrypted_body.is_empty() {
@ -245,7 +236,7 @@ impl Envelope {
}
// Sign the envelope
let signature = sign(
let signature = vcrypto.sign(
&self.sender_id,
node_id_secret,
&data[0..(envelope_size - 64)],
@ -261,25 +252,22 @@ impl Envelope {
self.version
}
pub fn get_min_max_version(&self) -> VersionRange {
VersionRange {
min: self.min_version,
max: self.max_version,
}
pub fn get_crypto_kind(&self) -> CryptoKind {
self.crypto_kind
}
pub fn get_timestamp(&self) -> Timestamp {
self.timestamp
}
pub fn get_nonce(&self) -> EnvelopeNonce {
pub fn get_nonce(&self) -> Nonce {
self.nonce
}
pub fn get_sender_id(&self) -> DHTKey {
pub fn get_sender_id(&self) -> PublicKey {
self.sender_id
}
pub fn get_recipient_id(&self) -> DHTKey {
pub fn get_recipient_id(&self) -> PublicKey {
self.recipient_id
}
}

View File

@ -1,21 +1,20 @@
mod byte_array_types;
mod envelope;
mod key;
mod receipt;
mod types;
mod value;
pub mod crypto_system;
pub mod tests;
pub mod v0;
pub mod vld0;
pub use byte_array_types::*;
pub use crypto_system::*;
pub use envelope::*;
pub use key::*;
pub use receipt::*;
pub use types::*;
pub use value::*;
pub type CryptoVersion = u8;
pub const MIN_CRYPTO_VERSION: CryptoVersion = 0u8;
pub const MAX_CRYPTO_VERSION: CryptoVersion = 0u8;
pub use vld0::*;
use crate::*;
use core::convert::TryInto;
@ -23,17 +22,18 @@ use hashlink::linked_hash_map::Entry;
use hashlink::LruCache;
use serde::{Deserialize, Serialize};
pub type SharedSecret = [u8; 32];
pub type Nonce = [u8; 24];
pub type CryptoSystemVersion = Arc<dyn CryptoSystem + Send + Sync>;
pub const VALID_CRYPTO_KINDS: [CryptoKind; 1] = [CRYPTO_KIND_VLD0];
pub const MIN_ENVELOPE_VERSION: u8 = 0u8;
pub const MAX_ENVELOPE_VERSION: u8 = 0u8;
const DH_CACHE_SIZE: usize = 4096;
#[derive(Serialize, Deserialize, PartialEq, Eq, Hash)]
struct DHCacheKey {
key: DHTKey,
secret: DHTKeySecret,
key: PublicKey,
secret: SecretKey,
}
#[derive(Serialize, Deserialize)]
@ -48,7 +48,7 @@ fn cache_to_bytes(cache: &DHCache) -> Vec<u8> {
for e in cache.iter() {
out.extend(&e.0.key.bytes);
out.extend(&e.0.secret.bytes);
out.extend(&e.1.shared_secret);
out.extend(&e.1.shared_secret.bytes);
}
let mut rev: Vec<u8> = Vec::with_capacity(out.len());
for d in out.chunks(32 + 32 + 32).rev() {
@ -60,70 +60,101 @@ fn cache_to_bytes(cache: &DHCache) -> Vec<u8> {
fn bytes_to_cache(bytes: &[u8], cache: &mut DHCache) {
for d in bytes.chunks(32 + 32 + 32) {
let k = DHCacheKey {
key: DHTKey::new(d[0..32].try_into().expect("asdf")),
secret: DHTKeySecret::new(d[32..64].try_into().expect("asdf")),
key: PublicKey::new(d[0..32].try_into().expect("asdf")),
secret: SecretKey::new(d[32..64].try_into().expect("asdf")),
};
let v = DHCacheValue {
shared_secret: d[64..96].try_into().expect("asdf"),
shared_secret: SharedSecret::new(d[64..96].try_into().expect("asdf")),
};
cache.insert(k, v);
}
}
struct CryptoInner {
table_store: TableStore,
node_id: DHTKey,
node_id_secret: DHTKeySecret,
dh_cache: DHCache,
flush_future: Option<SendPinBoxFuture<()>>,
crypto_v0: Option<Arc<dyn CryptoSystem + Send + Sync>>,
crypto_vld0: Option<Arc<dyn CryptoSystem + Send + Sync>>,
}
struct CryptoUnlockedInner {
config: VeilidConfig,
table_store: TableStore,
protected_store: ProtectedStore,
}
/// Crypto factory implementation
#[derive(Clone)]
pub struct Crypto {
config: VeilidConfig,
unlocked_inner: Arc<CryptoUnlockedInner>,
inner: Arc<Mutex<CryptoInner>>,
}
impl Crypto {
fn new_inner(table_store: TableStore) -> CryptoInner {
fn new_inner() -> CryptoInner {
CryptoInner {
table_store,
node_id: Default::default(),
node_id_secret: Default::default(),
dh_cache: DHCache::new(DH_CACHE_SIZE),
flush_future: None,
crypto_v0: None,
crypto_vld0: None,
}
}
pub fn new(config: VeilidConfig, table_store: TableStore) -> Self {
pub fn new(
config: VeilidConfig,
table_store: TableStore,
protected_store: ProtectedStore,
) -> Self {
let out = Self {
config,
inner: Arc::new(Mutex::new(Self::new_inner(table_store))),
unlocked_inner: Arc::new(CryptoUnlockedInner {
config,
table_store,
protected_store,
}),
inner: Arc::new(Mutex::new(Self::new_inner())),
};
out.inner.lock().crypto_v0 = Some(Arc::new(v0::CryptoV0System::new(out.clone())));
out.inner.lock().crypto_vld0 = Some(Arc::new(vld0::CryptoSystemVLD0::new(out.clone())));
out
}
pub async fn init(&self) -> EyreResult<()> {
trace!("Crypto::init");
let table_store = self.unlocked_inner.table_store.clone();
// Init node id from config
if let Err(e) = self
.unlocked_inner
.config
.init_node_ids(self.clone(), self.unlocked_inner.protected_store.clone())
.await
{
return Err(e).wrap_err("init node id failed");
}
// make local copy of node id for easy access
let (table_store, node_id) = {
let mut inner = self.inner.lock();
let c = self.config.get();
inner.node_id = c.network.node_id.unwrap();
inner.node_id_secret = c.network.node_id_secret.unwrap();
(inner.table_store.clone(), c.network.node_id)
let mut cache_validity_key: Vec<u8> = Vec::new();
{
let c = self.unlocked_inner.config.get();
for ck in &VALID_CRYPTO_KINDS {
cache_validity_key.append(
&mut c
.network
.routing_table
.node_ids
.get(ck)
.unwrap()
.node_id
.unwrap()
.bytes
.to_vec(),
);
}
};
// load caches if they are valid for this node id
let mut db = table_store.open("crypto_caches", 1).await?;
let caches_valid = match db.load(0, b"node_id")? {
Some(v) => v.as_slice() == node_id.unwrap().bytes,
let caches_valid = match db.load(0, b"cache_validity_key")? {
Some(v) => v == cache_validity_key,
None => false,
};
if caches_valid {
@ -135,7 +166,8 @@ impl Crypto {
drop(db);
table_store.delete("crypto_caches").await?;
db = table_store.open("crypto_caches", 1).await?;
db.store(0, b"node_id", &node_id.unwrap().bytes).await?;
db.store(0, b"cache_validity_key", &cache_validity_key)
.await?;
}
// Schedule flushing
@ -155,13 +187,16 @@ impl Crypto {
pub async fn flush(&self) -> EyreResult<()> {
//trace!("Crypto::flush");
let (table_store, cache_bytes) = {
let cache_bytes = {
let inner = self.inner.lock();
let cache_bytes = cache_to_bytes(&inner.dh_cache);
(inner.table_store.clone(), cache_bytes)
cache_to_bytes(&inner.dh_cache)
};
let db = table_store.open("crypto_caches", 1).await?;
let db = self
.unlocked_inner
.table_store
.open("crypto_caches", 1)
.await?;
db.store(0, b"dh_cache", &cache_bytes).await?;
Ok(())
}
@ -183,26 +218,67 @@ impl Crypto {
};
}
// Factory
fn get(&self, version: CryptoVersion) -> Result<CryptoSystemVersion, VeilidAPIError> {
/// Factory method to get a specific crypto version
pub fn get(&self, kind: CryptoKind) -> Option<CryptoSystemVersion> {
let inner = self.inner.lock();
match version {
0u8 => Ok(inner.crypto_v0.clone().unwrap()),
_ => Err(VeilidAPIError::InvalidArgument {
context: "Unsupported crypto version".to_owned(),
argument: "version".to_owned(),
value: format!("{}", version),
}),
match kind {
CRYPTO_KIND_VLD0 => Some(inner.crypto_vld0.clone().unwrap()),
_ => None,
}
}
/// Signature set verification
/// Returns the set of signature cryptokinds that validate and are supported
/// If any cryptokinds are supported and do not validate, the whole operation
/// returns an error
pub fn verify_signatures<F, R>(
&self,
data: &[u8],
signatures: &[TypedKeySignature],
transform: F,
) -> Result<Vec<R>, VeilidAPIError>
where
F: Fn(&TypedKeySignature) -> R,
{
let mut out = Vec::<R>::with_capacity(signatures.len());
for sig in signatures {
if let Some(vcrypto) = self.get(sig.kind) {
vcrypto.verify(&sig.key, data, &sig.signature)?;
out.push(transform(sig));
}
}
Ok(out)
}
/// Signature set generation
/// Generates the set of signatures that are supported
/// Any cryptokinds that are not supported are silently dropped
pub fn generate_signatures<F, R>(
&self,
data: &[u8],
keypairs: &[TypedKeyPair],
transform: F,
) -> Result<Vec<R>, VeilidAPIError>
where
F: Fn(&TypedKeyPair, Signature) -> R,
{
let mut out = Vec::<R>::with_capacity(keypairs.len());
for kp in keypairs {
if let Some(vcrypto) = self.get(kp.kind) {
let sig = vcrypto.sign(&kp.key, &kp.secret, data)?;
out.push(transform(kp, sig))
}
}
Ok(out)
}
// Internal utilities
fn cached_dh_internal<T: CryptoSystem>(
&self,
vcrypto: &T,
key: &DHTKey,
secret: &DHTKeySecret,
key: &PublicKey,
secret: &SecretKey,
) -> Result<SharedSecret, VeilidAPIError> {
Ok(
match self.inner.lock().dh_cache.entry(DHCacheKey {

View File

@ -3,64 +3,57 @@
use super::*;
use crate::*;
use core::convert::TryInto;
use data_encoding::BASE64URL_NOPAD;
/// Out-of-band receipts are versioned along with crypto versions
/// Out-of-band receipts are versioned along with envelope versions
///
/// These are the formats for the on-the-wire serialization performed by this module
///
/// #[repr(C, packed)]
/// struct ReceiptHeader {
/// // Size is at least 8 bytes. Depending on the version specified, the size may vary and should be case to the appropriate struct
/// magic: [u8; 4], // 0x00: 0x52 0x43 0x50 0x54 ("RCPT")
/// version: u8, // 0x04: 0 = ReceiptV0
/// reserved: u8, // 0x05: Reserved for future use
/// // Size is at least 4 bytes. Depending on the version specified, the size may vary and should be case to the appropriate struct
/// magic: [u8; 3], // 0x00: 0x52 0x43 0x50 ("RCP")
/// version: u8, // 0x03: 0 = ReceiptV0
/// }
///
/// #[repr(C, packed)]
/// struct ReceiptV0 {
/// // Size is 106 bytes.
/// magic: [u8; 4], // 0x00: 0x52 0x43 0x50 0x54 ("RCPT")
/// version: u8, // 0x04: 0 = ReceiptV0
/// reserved: u8, // 0x05: Reserved for future use
/// size: u16, // 0x06: Total size of the receipt including the extra data and the signature. Maximum size is 1152 bytes.
/// nonce: [u8; 24], // 0x08: Randomly chosen bytes that represent a unique receipt. Could be used to encrypt the extra data, but it's not required.
/// sender_id: [u8; 32], // 0x20: Node ID of the message source, which is the Ed25519 public key of the sender
/// extra_data: [u8; ??], // 0x40: Extra data is appended (arbitrary extra data, not encrypted by receipt itself, maximum size is 1024 bytes)
/// signature: [u8; 64], // 0x?? (end-0x40): Ed25519 signature of the entire receipt including header and extra data is appended to the packet
/// // Size is 66 bytes without extra data and signature, 130 with signature
/// magic: [u8; 3], // 0x00: 0x52 0x43 0x50 ("RCP")
/// version: u8, // 0x03: 0 = ReceiptV0
/// crypto_kind: [u8; 4], // 0x04: CryptoSystemVersion FOURCC code
/// size: u16, // 0x08: Total size of the receipt including the extra data and the signature. Maximum size is 1380 bytes.
/// nonce: [u8; 24], // 0x0A: Randomly chosen bytes that represent a unique receipt. Could be used to encrypt the extra data, but it's not required.
/// sender_id: [u8; 32], // 0x22: Node ID of the message source, which is the public key of the sender
/// extra_data: [u8; ??], // 0x42: Extra data is appended (arbitrary extra data, not encrypted by receipt itself, maximum size is 1250 bytes)
/// signature: [u8; 64], // 0x?? (end-0x40): Signature of the entire receipt including header and extra data is appended to the packet
/// }
pub const MAX_RECEIPT_SIZE: usize = 1152;
pub const MAX_EXTRA_DATA_SIZE: usize = 1024;
pub const MIN_RECEIPT_SIZE: usize = 128;
pub const RECEIPT_MAGIC: &[u8; 4] = b"RCPT";
pub type ReceiptNonce = [u8; 24];
pub trait Encodable {
fn encode(&self) -> String;
}
impl Encodable for ReceiptNonce {
fn encode(&self) -> String {
BASE64URL_NOPAD.encode(self)
}
}
pub const MAX_RECEIPT_SIZE: usize = 1380;
pub const MAX_EXTRA_DATA_SIZE: usize = MAX_RECEIPT_SIZE - MIN_RECEIPT_SIZE; // 1250
pub const MIN_RECEIPT_SIZE: usize = 130;
pub const RECEIPT_MAGIC: &[u8; 3] = b"RCP";
#[derive(Debug, Clone, PartialEq, Eq, Default)]
pub struct Receipt {
version: u8,
nonce: ReceiptNonce,
sender_id: DHTKey,
crypto_kind: CryptoKind,
nonce: Nonce,
sender_id: PublicKey,
extra_data: Vec<u8>,
}
impl Receipt {
pub fn try_new<D: AsRef<[u8]>>(
version: u8,
nonce: ReceiptNonce,
sender_id: DHTKey,
crypto_kind: CryptoKind,
nonce: Nonce,
sender_id: PublicKey,
extra_data: D,
) -> Result<Self, VeilidAPIError> {
assert!(version >= MIN_ENVELOPE_VERSION);
assert!(version <= MAX_ENVELOPE_VERSION);
assert!(VALID_CRYPTO_KINDS.contains(&crypto_kind));
if extra_data.as_ref().len() > MAX_EXTRA_DATA_SIZE {
apibail_parse_error!(
"extra data too large for receipt",
@ -69,20 +62,21 @@ impl Receipt {
}
Ok(Self {
version,
crypto_kind,
nonce,
sender_id,
extra_data: Vec::from(extra_data.as_ref()),
})
}
pub fn from_signed_data(data: &[u8]) -> Result<Receipt, VeilidAPIError> {
pub fn from_signed_data(crypto: Crypto, data: &[u8]) -> Result<Receipt, VeilidAPIError> {
// Ensure we are at least the length of the envelope
if data.len() < MIN_RECEIPT_SIZE {
apibail_parse_error!("receipt too small", data.len());
}
// Verify magic number
let magic: [u8; 4] = data[0x00..0x04]
let magic: [u8; 3] = data[0x00..0x03]
.try_into()
.map_err(VeilidAPIError::internal)?;
if magic != *RECEIPT_MAGIC {
@ -90,14 +84,24 @@ impl Receipt {
}
// Check version
let version = data[0x04];
if version > MAX_CRYPTO_VERSION || version < MIN_CRYPTO_VERSION {
apibail_parse_error!("unsupported cryptography version", version);
let version = data[0x03];
if version > MAX_ENVELOPE_VERSION || version < MIN_ENVELOPE_VERSION {
apibail_parse_error!("unsupported envelope version", version);
}
// Check crypto kind
let crypto_kind = CryptoKind(
data[0x04..0x08]
.try_into()
.map_err(VeilidAPIError::internal)?,
);
let Some(vcrypto) = crypto.get(crypto_kind) else {
apibail_parse_error!("unsupported crypto kind", crypto_kind);
};
// Get size and ensure it matches the size of the envelope and is less than the maximum message size
let size: u16 = u16::from_le_bytes(
data[0x06..0x08]
data[0x08..0x0A]
.try_into()
.map_err(VeilidAPIError::internal)?,
);
@ -112,64 +116,80 @@ impl Receipt {
}
// Get sender id
let sender_id = DHTKey::new(
data[0x20..0x40]
let sender_id = PublicKey::new(
data[0x22..0x42]
.try_into()
.map_err(VeilidAPIError::internal)?,
);
// Get signature
let signature = DHTSignature::new(
let signature = Signature::new(
data[(data.len() - 64)..]
.try_into()
.map_err(VeilidAPIError::internal)?,
);
// Validate signature
verify(&sender_id, &data[0..(data.len() - 64)], &signature)
vcrypto
.verify(&sender_id, &data[0..(data.len() - 64)], &signature)
.map_err(VeilidAPIError::generic)?;
// Get nonce
let nonce: ReceiptNonce = data[0x08..0x20]
.try_into()
.map_err(VeilidAPIError::internal)?;
let nonce: Nonce = Nonce::new(
data[0x0A..0x22]
.try_into()
.map_err(VeilidAPIError::internal)?,
);
// Get extra data and signature
let extra_data: Vec<u8> = Vec::from(&data[0x40..(data.len() - 64)]);
let extra_data: Vec<u8> = Vec::from(&data[0x42..(data.len() - 64)]);
// Return receipt
Ok(Self {
version,
crypto_kind,
nonce,
sender_id,
extra_data,
})
}
pub fn to_signed_data(&self, secret: &DHTKeySecret) -> Result<Vec<u8>, VeilidAPIError> {
pub fn to_signed_data(
&self,
crypto: Crypto,
secret: &SecretKey,
) -> Result<Vec<u8>, VeilidAPIError> {
// Ensure extra data isn't too long
let receipt_size: usize = self.extra_data.len() + MIN_RECEIPT_SIZE;
if receipt_size > MAX_RECEIPT_SIZE {
apibail_parse_error!("receipt too large", receipt_size);
}
// Get crypto version
let vcrypto = crypto
.get(self.crypto_kind)
.expect("need to ensure only valid crypto kinds here");
let mut data: Vec<u8> = vec![0u8; receipt_size];
// Write magic
data[0x00..0x04].copy_from_slice(RECEIPT_MAGIC);
data[0x00..0x03].copy_from_slice(RECEIPT_MAGIC);
// Write version
data[0x04] = self.version;
data[0x03] = self.version;
// Write crypto kind
data[0x04..0x08].copy_from_slice(&self.crypto_kind.0);
// Write size
data[0x06..0x08].copy_from_slice(&(receipt_size as u16).to_le_bytes());
data[0x08..0x0A].copy_from_slice(&(receipt_size as u16).to_le_bytes());
// Write nonce
data[0x08..0x20].copy_from_slice(&self.nonce);
data[0x0A..0x22].copy_from_slice(&self.nonce.bytes);
// Write sender node id
data[0x20..0x40].copy_from_slice(&self.sender_id.bytes);
data[0x22..0x42].copy_from_slice(&self.sender_id.bytes);
// Write extra data
if !self.extra_data.is_empty() {
data[0x40..(receipt_size - 64)].copy_from_slice(self.extra_data.as_slice());
data[0x42..(receipt_size - 64)].copy_from_slice(self.extra_data.as_slice());
}
// Sign the receipt
let signature = sign(&self.sender_id, secret, &data[0..(receipt_size - 64)])
let signature = vcrypto
.sign(&self.sender_id, secret, &data[0..(receipt_size - 64)])
.map_err(VeilidAPIError::generic)?;
// Append the signature
data[(receipt_size - 64)..].copy_from_slice(&signature.bytes);
@ -181,11 +201,15 @@ impl Receipt {
self.version
}
pub fn get_nonce(&self) -> ReceiptNonce {
pub fn get_crypto_kind(&self) -> CryptoKind {
self.crypto_kind
}
pub fn get_nonce(&self) -> Nonce {
self.nonce
}
pub fn get_sender_id(&self) -> DHTKey {
pub fn get_sender_id(&self) -> PublicKey {
self.sender_id
}
pub fn get_extra_data(&self) -> &[u8] {

View File

@ -1,6 +1,6 @@
pub mod test_crypto;
pub mod test_dht_key;
pub mod test_envelope_receipt;
pub mod test_types;
use super::*;
use crate::tests::common::test_veilid_config::*;

View File

@ -167,7 +167,7 @@ pub async fn test_all() {
let crypto = api.crypto().unwrap();
// Test versions
for v in MIN_CRYPTO_VERSION..=MAX_CRYPTO_VERSION {
for v in VALID_CRYPTO_KINDS {
let vcrypto = crypto.get(v).unwrap();
test_aead(vcrypto.clone()).await;
test_no_auth(vcrypto.clone()).await;

View File

@ -8,7 +8,14 @@ pub async fn test_envelope_round_trip(vcrypto: CryptoSystemVersion) {
let nonce = vcrypto.random_nonce();
let (sender_id, sender_secret) = vcrypto.generate_keypair();
let (recipient_id, recipient_secret) = vcrypto.generate_keypair();
let envelope = Envelope::new(vcrypto.version(), ts, nonce, sender_id, recipient_id);
let envelope = Envelope::new(
MAX_ENVELOPE_VERSION,
vcrypto.kind(),
ts,
nonce,
sender_id,
recipient_id,
);
// Create arbitrary body
let body = b"This is an arbitrary body";
@ -19,8 +26,8 @@ pub async fn test_envelope_round_trip(vcrypto: CryptoSystemVersion) {
.expect("failed to encrypt data");
// Deserialize from bytes
let envelope2 =
Envelope::from_signed_data(&enc_data).expect("failed to deserialize envelope from data");
let envelope2 = Envelope::from_signed_data(vcrypto.crypto(), &enc_data)
.expect("failed to deserialize envelope from data");
let body2 = envelope2
.decrypt_body(vcrypto.crypto(), &enc_data, &recipient_secret)
@ -35,13 +42,13 @@ pub async fn test_envelope_round_trip(vcrypto: CryptoSystemVersion) {
let mut mod_enc_data = enc_data.clone();
mod_enc_data[enc_data_len - 1] ^= 0x80u8;
assert!(
Envelope::from_signed_data(&mod_enc_data).is_err(),
Envelope::from_signed_data(vcrypto.crypto(), &mod_enc_data).is_err(),
"should have failed to decode envelope with modified signature"
);
let mut mod_enc_data2 = enc_data.clone();
mod_enc_data2[enc_data_len - 65] ^= 0x80u8;
assert!(
Envelope::from_signed_data(&mod_enc_data2).is_err(),
Envelope::from_signed_data(vcrypto.crypto(), &mod_enc_data2).is_err(),
"should have failed to decode envelope with modified data"
);
}
@ -54,20 +61,21 @@ pub async fn test_receipt_round_trip(vcrypto: CryptoSystemVersion) {
// Create receipt
let nonce = vcrypto.random_nonce();
let (sender_id, sender_secret) = vcrypto.generate_keypair();
let receipt = Receipt::try_new(0, nonce, sender_id, body).expect("should not fail");
let receipt = Receipt::try_new(MAX_ENVELOPE_VERSION, vcrypto.kind(), nonce, sender_id, body)
.expect("should not fail");
// Serialize to bytes
let mut enc_data = receipt
.to_signed_data(&sender_secret)
.to_signed_data(vcrypto.crypto(), &sender_secret)
.expect("failed to make signed data");
// Deserialize from bytes
let receipt2 =
Receipt::from_signed_data(&enc_data).expect("failed to deserialize envelope from data");
let receipt2 = Receipt::from_signed_data(vcrypto.crypto(), &enc_data)
.expect("failed to deserialize envelope from data");
// Should not validate even when a single bit is changed
enc_data[5] = 0x01;
Receipt::from_signed_data(&enc_data)
Receipt::from_signed_data(vcrypto.crypto(), &enc_data)
.expect_err("should have failed to decrypt using wrong secret");
// Compare receipts
@ -79,7 +87,7 @@ pub async fn test_all() {
let crypto = api.crypto().unwrap();
// Test versions
for v in MIN_CRYPTO_VERSION..=MAX_CRYPTO_VERSION {
for v in VALID_CRYPTO_KINDS {
let vcrypto = crypto.get(v).unwrap();
test_envelope_round_trip(vcrypto.clone()).await;

View File

@ -5,8 +5,8 @@ use core::convert::TryFrom;
static LOREM_IPSUM:&str = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. ";
static CHEEZBURGER: &str = "I can has cheezburger";
static EMPTY_KEY: [u8; key::DHT_KEY_LENGTH] = [0u8; key::DHT_KEY_LENGTH];
static EMPTY_KEY_SECRET: [u8; key::DHT_KEY_SECRET_LENGTH] = [0u8; key::DHT_KEY_SECRET_LENGTH];
static EMPTY_KEY: [u8; PUBLIC_KEY_LENGTH] = [0u8; PUBLIC_KEY_LENGTH];
static EMPTY_KEY_SECRET: [u8; SECRET_KEY_LENGTH] = [0u8; SECRET_KEY_LENGTH];
pub async fn test_generate_secret(vcrypto: CryptoSystemVersion) {
// Verify keys generate
@ -119,7 +119,7 @@ pub async fn test_sign_and_verify(vcrypto: CryptoSystemVersion) {
pub async fn test_key_conversions(vcrypto: CryptoSystemVersion) {
// Test default key
let (dht_key, dht_key_secret) = (key::DHTKey::default(), key::DHTKeySecret::default());
let (dht_key, dht_key_secret) = (PublicKey::default(), SecretKey::default());
assert_eq!(dht_key.bytes, EMPTY_KEY);
assert_eq!(dht_key_secret.bytes, EMPTY_KEY_SECRET);
let dht_key_string = String::from(&dht_key);
@ -150,50 +150,49 @@ pub async fn test_key_conversions(vcrypto: CryptoSystemVersion) {
assert_ne!(dht_key_secret2_string, dht_key2_string);
// Assert they convert back correctly
let dht_key_back = key::DHTKey::try_from(dht_key_string.as_str()).unwrap();
let dht_key_back2 = key::DHTKey::try_from(dht_key_string2.as_str()).unwrap();
let dht_key_back = PublicKey::try_from(dht_key_string.as_str()).unwrap();
let dht_key_back2 = PublicKey::try_from(dht_key_string2.as_str()).unwrap();
assert_eq!(dht_key_back, dht_key_back2);
assert_eq!(dht_key_back, dht_key);
assert_eq!(dht_key_back2, dht_key);
let dht_key_secret_back = key::DHTKeySecret::try_from(dht_key_secret_string.as_str()).unwrap();
let dht_key_secret_back = SecretKey::try_from(dht_key_secret_string.as_str()).unwrap();
assert_eq!(dht_key_secret_back, dht_key_secret);
let dht_key2_back = key::DHTKey::try_from(dht_key2_string.as_str()).unwrap();
let dht_key2_back2 = key::DHTKey::try_from(dht_key2_string2.as_str()).unwrap();
let dht_key2_back = PublicKey::try_from(dht_key2_string.as_str()).unwrap();
let dht_key2_back2 = PublicKey::try_from(dht_key2_string2.as_str()).unwrap();
assert_eq!(dht_key2_back, dht_key2_back2);
assert_eq!(dht_key2_back, dht_key2);
assert_eq!(dht_key2_back2, dht_key2);
let dht_key_secret2_back =
key::DHTKeySecret::try_from(dht_key_secret2_string.as_str()).unwrap();
let dht_key_secret2_back = SecretKey::try_from(dht_key_secret2_string.as_str()).unwrap();
assert_eq!(dht_key_secret2_back, dht_key_secret2);
// Assert string roundtrip
assert_eq!(String::from(&dht_key2_back), dht_key2_string);
// These conversions should fail
assert!(key::DHTKey::try_from("whatever").is_err());
assert!(key::DHTKeySecret::try_from("whatever").is_err());
assert!(key::DHTKey::try_from("").is_err());
assert!(key::DHTKeySecret::try_from("").is_err());
assert!(key::DHTKey::try_from(" ").is_err());
assert!(key::DHTKeySecret::try_from(" ").is_err());
assert!(key::DHTKey::try_from(
assert!(PublicKey::try_from("whatever").is_err());
assert!(SecretKey::try_from("whatever").is_err());
assert!(PublicKey::try_from("").is_err());
assert!(SecretKey::try_from("").is_err());
assert!(PublicKey::try_from(" ").is_err());
assert!(SecretKey::try_from(" ").is_err());
assert!(PublicKey::try_from(
"qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq"
)
.is_err());
assert!(key::DHTKeySecret::try_from(
assert!(SecretKey::try_from(
"qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq"
)
.is_err());
}
pub async fn test_encode_decode(vcrypto: CryptoSystemVersion) {
let dht_key = key::DHTKey::try_decode("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA").unwrap();
let dht_key = PublicKey::try_decode("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA").unwrap();
let dht_key_secret =
key::DHTKeySecret::try_decode("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA").unwrap();
let dht_key_b = key::DHTKey::new(EMPTY_KEY);
let dht_key_secret_b = key::DHTKeySecret::new(EMPTY_KEY_SECRET);
SecretKey::try_decode("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA").unwrap();
let dht_key_b = PublicKey::new(EMPTY_KEY);
let dht_key_secret_b = SecretKey::new(EMPTY_KEY_SECRET);
assert_eq!(dht_key, dht_key_b);
assert_eq!(dht_key_secret, dht_key_secret_b);
@ -209,31 +208,31 @@ pub async fn test_encode_decode(vcrypto: CryptoSystemVersion) {
let e2s = dht_key_secret2.encode();
trace!("e2s: {:?}", e2s);
let d1 = key::DHTKey::try_decode(e1.as_str()).unwrap();
let d1 = PublicKey::try_decode(e1.as_str()).unwrap();
trace!("d1: {:?}", d1);
assert_eq!(dht_key, d1);
let d1s = key::DHTKeySecret::try_decode(e1s.as_str()).unwrap();
let d1s = SecretKey::try_decode(e1s.as_str()).unwrap();
trace!("d1s: {:?}", d1s);
assert_eq!(dht_key_secret, d1s);
let d2 = key::DHTKey::try_decode(e2.as_str()).unwrap();
let d2 = PublicKey::try_decode(e2.as_str()).unwrap();
trace!("d2: {:?}", d2);
assert_eq!(dht_key2, d2);
let d2s = key::DHTKeySecret::try_decode(e2s.as_str()).unwrap();
let d2s = SecretKey::try_decode(e2s.as_str()).unwrap();
trace!("d2s: {:?}", d2s);
assert_eq!(dht_key_secret2, d2s);
// Failures
let f1 = key::DHTKeySecret::try_decode("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA");
let f1 = SecretKey::try_decode("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA");
assert!(f1.is_err());
let f2 = key::DHTKeySecret::try_decode("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA&");
let f2 = SecretKey::try_decode("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA&");
assert!(f2.is_err());
}
async fn test_hash(vcrypto: CryptoSystemVersion) {
let mut s = BTreeSet::<key::DHTKey>::new();
let mut s = BTreeSet::<PublicKey>::new();
let k1 = vcrypto.generate_hash("abc".as_bytes());
let k2 = vcrypto.generate_hash("abcd".as_bytes());
@ -333,7 +332,7 @@ pub async fn test_all() {
let crypto = api.crypto().unwrap();
// Test versions
for v in MIN_CRYPTO_VERSION..=MAX_CRYPTO_VERSION {
for v in VALID_CRYPTO_KINDS {
let vcrypto = crypto.get(v).unwrap();
test_generate_secret(vcrypto.clone()).await;

View File

@ -0,0 +1,233 @@
use super::*;
use core::cmp::{Eq, Ord, PartialEq, PartialOrd};
use core::convert::{TryFrom, TryInto};
use core::fmt;
use core::hash::Hash;
use rkyv::{Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as RkyvSerialize};
/// Cryptography version fourcc code
#[derive(
Copy,
Debug,
Default,
Clone,
Hash,
PartialOrd,
Ord,
PartialEq,
Eq,
Serialize,
Deserialize,
RkyvArchive,
RkyvSerialize,
RkyvDeserialize,
)]
#[archive_attr(repr(C), derive(CheckBytes, PartialOrd, Ord, PartialEq, Eq))]
pub struct CryptoKind(pub [u8; 4]);
impl From<[u8; 4]> for CryptoKind {
fn from(b: [u8; 4]) -> Self {
Self(b)
}
}
impl TryFrom<&[u8]> for CryptoKind {
type Error = VeilidAPIError;
fn try_from(b: &[u8]) -> Result<Self, Self::Error> {
Ok(Self(b.try_into().map_err(VeilidAPIError::generic)?))
}
}
impl fmt::Display for CryptoKind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
write!(f, "{}", String::from_utf8_lossy(&self.0))
}
}
impl FromStr for CryptoKind {
type Err = VeilidAPIError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(Self(
s.as_bytes().try_into().map_err(VeilidAPIError::generic)?,
))
}
}
#[derive(
Clone, Copy, Debug, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize,
)]
#[archive_attr(repr(C), derive(CheckBytes))]
pub struct TypedKey {
pub kind: CryptoKind,
pub key: PublicKey,
}
impl TypedKey {
pub fn new(kind: CryptoKind, key: PublicKey) -> Self {
Self { kind, key }
}
}
impl fmt::Display for TypedKey {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
write!(f, "{}:{}", self.kind, self.key.encode())
}
}
impl FromStr for TypedKey {
type Err = VeilidAPIError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let b = s.as_bytes();
if b.len() != (5 + PUBLIC_KEY_LENGTH_ENCODED) || b[4..5] != b":"[..] {
apibail_parse_error!("invalid typed key", s);
}
let kind: CryptoKind = b[0..4].try_into().expect("should not fail to convert");
let key = PublicKey::try_decode_bytes(&b[5..])?;
Ok(Self { kind, key })
}
}
#[derive(
Clone, Copy, Debug, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize,
)]
#[archive_attr(repr(C), derive(CheckBytes))]
pub struct TypedKeyPair {
pub kind: CryptoKind,
pub key: PublicKey,
pub secret: SecretKey,
}
impl TypedKeyPair {
pub fn new(kind: CryptoKind, key: PublicKey, secret: SecretKey) -> Self {
Self { kind, key, secret }
}
}
impl fmt::Display for TypedKeyPair {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
write!(
f,
"{}:{}:{}",
self.kind,
self.key.encode(),
self.secret.encode()
)
}
}
impl FromStr for TypedKeyPair {
type Err = VeilidAPIError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let b = s.as_bytes();
if b.len() != (5 + PUBLIC_KEY_LENGTH_ENCODED + 1 + SECRET_KEY_LENGTH_ENCODED)
|| b[4..5] != b":"[..]
|| b[5 + PUBLIC_KEY_LENGTH_ENCODED..6 + PUBLIC_KEY_LENGTH_ENCODED] != b":"[..]
{
apibail_parse_error!("invalid typed key pair", s);
}
let kind: CryptoKind = b[0..4].try_into().expect("should not fail to convert");
let key = PublicKey::try_decode_bytes(&b[5..5 + PUBLIC_KEY_LENGTH_ENCODED])?;
let secret = SecretKey::try_decode_bytes(&b[5 + PUBLIC_KEY_LENGTH_ENCODED + 1..])?;
Ok(Self { kind, key, secret })
}
}
#[derive(
Clone, Copy, Debug, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize,
)]
#[archive_attr(repr(C), derive(CheckBytes))]
pub struct TypedSignature {
pub kind: CryptoKind,
pub signature: Signature,
}
impl TypedSignature {
pub fn new(kind: CryptoKind, signature: Signature) -> Self {
Self { kind, signature }
}
pub fn from_keyed(tks: &TypedKeySignature) -> Self {
Self {
kind: tks.kind,
signature: tks.signature,
}
}
pub fn from_pair_sig(tkp: &TypedKeyPair, sig: Signature) -> Self {
Self {
kind: tkp.kind,
signature: sig,
}
}
}
impl fmt::Display for TypedSignature {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
write!(f, "{}:{}", self.kind, self.signature.encode())
}
}
impl FromStr for TypedSignature {
type Err = VeilidAPIError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let b = s.as_bytes();
if b.len() != (5 + SIGNATURE_LENGTH_ENCODED) || b[4..5] != b":"[..] {
apibail_parse_error!("invalid typed signature", s);
}
let kind: CryptoKind = b[0..4].try_into()?;
let signature = Signature::try_decode_bytes(&b[5..])?;
Ok(Self { kind, signature })
}
}
#[derive(
Clone, Copy, Debug, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize,
)]
#[archive_attr(repr(C), derive(CheckBytes))]
pub struct TypedKeySignature {
pub kind: CryptoKind,
pub key: PublicKey,
pub signature: Signature,
}
impl TypedKeySignature {
pub fn new(kind: CryptoKind, key: PublicKey, signature: Signature) -> Self {
Self {
kind,
key,
signature,
}
}
pub fn as_typed_signature(&self) -> TypedSignature {
TypedSignature {
kind: self.kind,
signature: self.signature,
}
}
}
impl fmt::Display for TypedKeySignature {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
write!(
f,
"{}:{}:{}",
self.kind,
self.key.encode(),
self.signature.encode()
)
}
}
impl FromStr for TypedKeySignature {
type Err = VeilidAPIError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let b = s.as_bytes();
if b.len() != (5 + PUBLIC_KEY_LENGTH_ENCODED + 1 + SIGNATURE_LENGTH_ENCODED)
|| b[4] != b':'
|| b[5 + PUBLIC_KEY_LENGTH_ENCODED] != b':'
{
apibail_parse_error!("invalid typed key signature", s);
}
let kind: CryptoKind = b[0..4].try_into().expect("should not fail to convert");
let key = PublicKey::try_decode_bytes(&b[5..5 + PUBLIC_KEY_LENGTH_ENCODED])?;
let signature = Signature::try_decode_bytes(&b[5 + PUBLIC_KEY_LENGTH_ENCODED + 1..])?;
Ok(Self {
kind,
key,
signature,
})
}
}

View File

@ -1,7 +1,7 @@
pub mod blake3digest512;
pub use blake3digest512::*;
pub use super::*;
use super::*;
use chacha20::cipher::{KeyIvInit, StreamCipher};
use chacha20::XChaCha20;
@ -11,11 +11,10 @@ use core::convert::TryInto;
use curve25519_dalek as cd;
use digest::Digest;
use ed25519_dalek as ed;
use ed25519_dalek::{Keypair, PublicKey, Signature};
use x25519_dalek as xd;
const AEAD_OVERHEAD: usize = 16;
pub const CRYPTO_KIND_VLD0: CryptoKind = CryptoKind([b'V', b'L', b'D', b'0']);
fn ed25519_to_x25519_pk(key: &ed::PublicKey) -> Result<xd::PublicKey, VeilidAPIError> {
let bytes = key.to_bytes();
@ -35,80 +34,80 @@ fn ed25519_to_x25519_sk(key: &ed::SecretKey) -> Result<xd::StaticSecret, VeilidA
/// V1 CryptoSystem
#[derive(Clone)]
pub struct CryptoV0System {
pub struct CryptoSystemVLD0 {
crypto: Crypto,
}
impl CryptoV0System {
impl CryptoSystemVLD0 {
pub fn new(crypto: Crypto) -> Self {
Self { crypto }
}
}
impl CryptoSystem for CryptoV0System {
impl CryptoSystem for CryptoSystemVLD0 {
// Accessors
fn version(&self) -> CryptoVersion {
return 0u8;
fn kind(&self) -> CryptoKind {
CRYPTO_KIND_VLD0
}
fn crypto(&self) -> Crypto {
return self.crypto.clone();
self.crypto.clone()
}
// Cached Operations
fn cached_dh(
&self,
key: &DHTKey,
secret: &DHTKeySecret,
key: &PublicKey,
secret: &SecretKey,
) -> Result<SharedSecret, VeilidAPIError> {
self.crypto
.cached_dh_internal::<CryptoV0System>(self, key, secret)
.cached_dh_internal::<CryptoSystemVLD0>(self, key, secret)
}
// Generation
fn random_nonce(&self) -> Nonce {
let mut nonce = [0u8; 24];
random_bytes(&mut nonce).unwrap();
nonce
Nonce::new(nonce)
}
fn random_shared_secret(&self) -> SharedSecret {
let mut s = [0u8; 32];
random_bytes(&mut s).unwrap();
s
SharedSecret::new(s)
}
fn compute_dh(
&self,
key: &DHTKey,
secret: &DHTKeySecret,
key: &PublicKey,
secret: &SecretKey,
) -> Result<SharedSecret, VeilidAPIError> {
let pk_ed = ed::PublicKey::from_bytes(&key.bytes).map_err(VeilidAPIError::internal)?;
let pk_xd = ed25519_to_x25519_pk(&pk_ed)?;
let sk_ed = ed::SecretKey::from_bytes(&secret.bytes).map_err(VeilidAPIError::internal)?;
let sk_xd = ed25519_to_x25519_sk(&sk_ed)?;
Ok(sk_xd.diffie_hellman(&pk_xd).to_bytes())
Ok(SharedSecret::new(sk_xd.diffie_hellman(&pk_xd).to_bytes()))
}
fn generate_keypair(&self) -> (DHTKey, DHTKeySecret) {
fn generate_keypair(&self) -> (PublicKey, SecretKey) {
let mut csprng = VeilidRng {};
let keypair = Keypair::generate(&mut csprng);
let dht_key = DHTKey::new(keypair.public.to_bytes());
let dht_key_secret = DHTKeySecret::new(keypair.secret.to_bytes());
let keypair = ed::Keypair::generate(&mut csprng);
let dht_key = PublicKey::new(keypair.public.to_bytes());
let dht_key_secret = SecretKey::new(keypair.secret.to_bytes());
(dht_key, dht_key_secret)
}
fn generate_hash(&self, data: &[u8]) -> DHTKey {
DHTKey::new(*blake3::hash(data).as_bytes())
fn generate_hash(&self, data: &[u8]) -> PublicKey {
PublicKey::new(*blake3::hash(data).as_bytes())
}
fn generate_hash_reader(
&self,
reader: &mut dyn std::io::Read,
) -> Result<DHTKey, VeilidAPIError> {
) -> Result<PublicKey, VeilidAPIError> {
let mut hasher = blake3::Hasher::new();
std::io::copy(reader, &mut hasher).map_err(VeilidAPIError::generic)?;
Ok(DHTKey::new(*hasher.finalize().as_bytes()))
Ok(PublicKey::new(*hasher.finalize().as_bytes()))
}
// Validation
fn validate_keypair(&self, dht_key: &DHTKey, dht_key_secret: &DHTKeySecret) -> bool {
fn validate_keypair(&self, dht_key: &PublicKey, dht_key_secret: &SecretKey) -> bool {
let data = vec![0u8; 512];
let sig = match self.sign(dht_key, dht_key_secret, &data) {
Ok(s) => s,
@ -118,7 +117,7 @@ impl CryptoSystem for CryptoV0System {
};
self.verify(dht_key, &data, &sig).is_ok()
}
fn validate_hash(&self, data: &[u8], dht_key: &DHTKey) -> bool {
fn validate_hash(&self, data: &[u8], dht_key: &PublicKey) -> bool {
let bytes = *blake3::hash(data).as_bytes();
bytes == dht_key.bytes
@ -126,7 +125,7 @@ impl CryptoSystem for CryptoV0System {
fn validate_hash_reader(
&self,
reader: &mut dyn std::io::Read,
dht_key: &DHTKey,
dht_key: &PublicKey,
) -> Result<bool, VeilidAPIError> {
let mut hasher = blake3::Hasher::new();
std::io::copy(reader, &mut hasher).map_err(VeilidAPIError::generic)?;
@ -134,29 +133,29 @@ impl CryptoSystem for CryptoV0System {
Ok(bytes == dht_key.bytes)
}
// Distance Metric
fn distance(&self, key1: &DHTKey, key2: &DHTKey) -> DHTKeyDistance {
let mut bytes = [0u8; DHT_KEY_LENGTH];
fn distance(&self, key1: &PublicKey, key2: &PublicKey) -> PublicKeyDistance {
let mut bytes = [0u8; PUBLIC_KEY_LENGTH];
for (n, byte) in bytes.iter_mut().enumerate() {
*byte = key1.bytes[n] ^ key2.bytes[n];
}
DHTKeyDistance::new(bytes)
PublicKeyDistance::new(bytes)
}
// Authentication
fn sign(
&self,
dht_key: &DHTKey,
dht_key_secret: &DHTKeySecret,
dht_key: &PublicKey,
dht_key_secret: &SecretKey,
data: &[u8],
) -> Result<DHTSignature, VeilidAPIError> {
let mut kpb: [u8; DHT_KEY_SECRET_LENGTH + DHT_KEY_LENGTH] =
[0u8; DHT_KEY_SECRET_LENGTH + DHT_KEY_LENGTH];
) -> Result<Signature, VeilidAPIError> {
let mut kpb: [u8; SECRET_KEY_LENGTH + PUBLIC_KEY_LENGTH] =
[0u8; SECRET_KEY_LENGTH + PUBLIC_KEY_LENGTH];
kpb[..DHT_KEY_SECRET_LENGTH].copy_from_slice(&dht_key_secret.bytes);
kpb[DHT_KEY_SECRET_LENGTH..].copy_from_slice(&dht_key.bytes);
let keypair = Keypair::from_bytes(&kpb)
kpb[..SECRET_KEY_LENGTH].copy_from_slice(&dht_key_secret.bytes);
kpb[SECRET_KEY_LENGTH..].copy_from_slice(&dht_key.bytes);
let keypair = ed::Keypair::from_bytes(&kpb)
.map_err(|e| VeilidAPIError::parse_error("Keypair is invalid", e))?;
let mut dig = Blake3Digest512::new();
@ -166,18 +165,18 @@ impl CryptoSystem for CryptoV0System {
.sign_prehashed(dig, None)
.map_err(VeilidAPIError::internal)?;
let dht_sig = DHTSignature::new(sig.to_bytes());
let dht_sig = Signature::new(sig.to_bytes());
Ok(dht_sig)
}
fn verify(
&self,
dht_key: &DHTKey,
dht_key: &PublicKey,
data: &[u8],
signature: &DHTSignature,
signature: &Signature,
) -> Result<(), VeilidAPIError> {
let pk = PublicKey::from_bytes(&dht_key.bytes)
let pk = ed::PublicKey::from_bytes(&dht_key.bytes)
.map_err(|e| VeilidAPIError::parse_error("Public key is invalid", e))?;
let sig = Signature::from_bytes(&signature.bytes)
let sig = ed::Signature::from_bytes(&signature.bytes)
.map_err(|e| VeilidAPIError::parse_error("Signature is invalid", e))?;
let mut dig = Blake3Digest512::new();
@ -199,8 +198,8 @@ impl CryptoSystem for CryptoV0System {
shared_secret: &SharedSecret,
associated_data: Option<&[u8]>,
) -> Result<(), VeilidAPIError> {
let key = ch::Key::from(*shared_secret);
let xnonce = ch::XNonce::from(*nonce);
let key = ch::Key::from(shared_secret.bytes);
let xnonce = ch::XNonce::from(nonce.bytes);
let aead = ch::XChaCha20Poly1305::new(&key);
aead.decrypt_in_place(&xnonce, associated_data.unwrap_or(b""), body)
.map_err(map_to_string)
@ -228,8 +227,8 @@ impl CryptoSystem for CryptoV0System {
shared_secret: &SharedSecret,
associated_data: Option<&[u8]>,
) -> Result<(), VeilidAPIError> {
let key = ch::Key::from(*shared_secret);
let xnonce = ch::XNonce::from(*nonce);
let key = ch::Key::from(shared_secret.bytes);
let xnonce = ch::XNonce::from(nonce.bytes);
let aead = ch::XChaCha20Poly1305::new(&key);
aead.encrypt_in_place(&xnonce, associated_data.unwrap_or(b""), body)
@ -258,7 +257,7 @@ impl CryptoSystem for CryptoV0System {
nonce: &Nonce,
shared_secret: &SharedSecret,
) {
let mut cipher = XChaCha20::new(shared_secret.into(), nonce.into());
let mut cipher = XChaCha20::new(&shared_secret.bytes.into(), &nonce.bytes.into());
cipher.apply_keystream(body);
}
@ -269,7 +268,7 @@ impl CryptoSystem for CryptoV0System {
nonce: &Nonce,
shared_secret: &SharedSecret,
) {
let mut cipher = XChaCha20::new(shared_secret.into(), nonce.into());
let mut cipher = XChaCha20::new(&shared_secret.bytes.into(), &nonce.bytes.into());
cipher.apply_keystream_b2b(in_buf, &mut out_buf).unwrap();
}

View File

@ -113,28 +113,35 @@ impl ProtectedStore {
}
#[instrument(level = "trace", skip(self, value), ret, err)]
pub async fn save_user_secret_string(&self, key: &str, value: &str) -> EyreResult<bool> {
pub async fn save_user_secret_string<K: AsRef<str> + fmt::Debug, V: AsRef<str> + fmt::Debug>(
&self,
key: K,
value: V,
) -> EyreResult<bool> {
let inner = self.inner.lock();
inner
.keyring_manager
.as_ref()
.ok_or_else(|| eyre!("Protected store not initialized"))?
.with_keyring(&self.service_name(), key, |kr| {
.with_keyring(&self.service_name(), key.as_ref(), |kr| {
let existed = kr.get_value().is_ok();
kr.set_value(value)?;
kr.set_value(value.as_ref())?;
Ok(existed)
})
.wrap_err("failed to save user secret")
}
#[instrument(level = "trace", skip(self), err)]
pub async fn load_user_secret_string(&self, key: &str) -> EyreResult<Option<String>> {
pub async fn load_user_secret_string<K: AsRef<str> + fmt::Debug>(
&self,
key: K,
) -> EyreResult<Option<String>> {
let inner = self.inner.lock();
match inner
.keyring_manager
.as_ref()
.ok_or_else(|| eyre!("Protected store not initialized"))?
.with_keyring(&self.service_name(), key, |kr| kr.get_value())
.with_keyring(&self.service_name(), key.as_ref(), |kr| kr.get_value())
{
Ok(v) => Ok(Some(v)),
Err(KeyringError::NoPasswordFound) => Ok(None),
@ -143,17 +150,19 @@ impl ProtectedStore {
}
#[instrument(level = "trace", skip(self, value))]
pub async fn save_user_secret_rkyv<T>(&self, key: &str, value: &T) -> EyreResult<bool>
pub async fn save_user_secret_rkyv<K, T>(&self, key: K, value: &T) -> EyreResult<bool>
where
K: AsRef<str> + fmt::Debug,
T: RkyvSerialize<rkyv::ser::serializers::AllocSerializer<1024>>,
{
let v = to_rkyv(value)?;
self.save_user_secret(&key, &v).await
self.save_user_secret(key, &v).await
}
#[instrument(level = "trace", skip(self, value))]
pub async fn save_user_secret_json<T>(&self, key: &str, value: &T) -> EyreResult<bool>
pub async fn save_user_secret_json<K, T>(&self, key: K, value: &T) -> EyreResult<bool>
where
K: AsRef<str> + fmt::Debug,
T: serde::Serialize,
{
let v = serde_json::to_vec(value)?;
@ -161,8 +170,9 @@ impl ProtectedStore {
}
#[instrument(level = "trace", skip(self))]
pub async fn load_user_secret_rkyv<T>(&self, key: &str) -> EyreResult<Option<T>>
pub async fn load_user_secret_rkyv<K, T>(&self, key: K) -> EyreResult<Option<T>>
where
K: AsRef<str> + fmt::Debug,
T: RkyvArchive,
<T as RkyvArchive>::Archived:
for<'t> bytecheck::CheckBytes<rkyv::validation::validators::DefaultValidator<'t>>,
@ -182,8 +192,9 @@ impl ProtectedStore {
}
#[instrument(level = "trace", skip(self))]
pub async fn load_user_secret_json<T>(&self, key: &str) -> EyreResult<Option<T>>
pub async fn load_user_secret_json<K, T>(&self, key: K) -> EyreResult<Option<T>>
where
K: AsRef<str> + fmt::Debug,
T: for<'de> serde::de::Deserialize<'de>,
{
let out = self.load_user_secret(key).await?;
@ -199,7 +210,11 @@ impl ProtectedStore {
}
#[instrument(level = "trace", skip(self, value), ret, err)]
pub async fn save_user_secret(&self, key: &str, value: &[u8]) -> EyreResult<bool> {
pub async fn save_user_secret<K: AsRef<str> + fmt::Debug>(
&self,
key: K,
value: &[u8],
) -> EyreResult<bool> {
let mut s = BASE64URL_NOPAD.encode(value);
s.push('!');
@ -207,7 +222,10 @@ impl ProtectedStore {
}
#[instrument(level = "trace", skip(self), err)]
pub async fn load_user_secret(&self, key: &str) -> EyreResult<Option<Vec<u8>>> {
pub async fn load_user_secret<K: AsRef<str> + fmt::Debug>(
&self,
key: K,
) -> EyreResult<Option<Vec<u8>>> {
let mut s = match self.load_user_secret_string(key).await? {
Some(s) => s,
None => {
@ -238,13 +256,13 @@ impl ProtectedStore {
}
#[instrument(level = "trace", skip(self), ret, err)]
pub async fn remove_user_secret(&self, key: &str) -> EyreResult<bool> {
pub async fn remove_user_secret<K: AsRef<str> + fmt::Debug>(&self, key: K) -> EyreResult<bool> {
let inner = self.inner.lock();
match inner
.keyring_manager
.as_ref()
.ok_or_else(|| eyre!("Protected store not initialized"))?
.with_keyring(&self.service_name(), key, |kr| kr.delete_value())
.with_keyring(&self.service_name(), key.as_ref(), |kr| kr.delete_value())
{
Ok(_) => Ok(true),
Err(KeyringError::NoPasswordFound) => Ok(false),

View File

@ -50,7 +50,11 @@ impl ProtectedStore {
}
//#[instrument(level = "trace", skip(self, value), ret, err)]
pub async fn save_user_secret_string(&self, key: &str, value: &str) -> EyreResult<bool> {
pub async fn save_user_secret_string<K: AsRef<str> + fmt::Debug, V: AsRef<str> + fmt::Debug>(
&self,
key: K,
value: V,
) -> EyreResult<bool> {
if is_browser() {
let win = match window() {
Some(w) => w,
@ -70,7 +74,7 @@ impl ProtectedStore {
}
};
let vkey = self.browser_key_name(key);
let vkey = self.browser_key_name(key.as_ref());
let prev = match ls
.get_item(&vkey)
@ -81,7 +85,7 @@ impl ProtectedStore {
None => false,
};
ls.set_item(&vkey, value)
ls.set_item(&vkey, value.as_ref())
.map_err(map_jsvalue_error)
.wrap_err("exception_thrown")?;
@ -92,7 +96,10 @@ impl ProtectedStore {
}
#[instrument(level = "trace", skip(self), err)]
pub async fn load_user_secret_string(&self, key: &str) -> EyreResult<Option<String>> {
pub async fn load_user_secret_string<K: AsRef<str> + fmt::Debug>(
&self,
key: K,
) -> EyreResult<Option<String>> {
if is_browser() {
let win = match window() {
Some(w) => w,
@ -112,7 +119,7 @@ impl ProtectedStore {
}
};
let vkey = self.browser_key_name(key);
let vkey = self.browser_key_name(key.as_ref());
ls.get_item(&vkey)
.map_err(map_jsvalue_error)
@ -123,26 +130,29 @@ impl ProtectedStore {
}
#[instrument(level = "trace", skip(self, value))]
pub async fn save_user_secret_rkyv<T>(&self, key: &str, value: &T) -> EyreResult<bool>
pub async fn save_user_secret_rkyv<K, T>(&self, key: K, value: &T) -> EyreResult<bool>
where
K: AsRef<str> + fmt::Debug,
T: RkyvSerialize<rkyv::ser::serializers::AllocSerializer<1024>>,
{
let v = to_rkyv(value)?;
self.save_user_secret(&key, &v).await
self.save_user_secret(key, &v).await
}
#[instrument(level = "trace", skip(self, value))]
pub async fn save_user_secret_json<T>(&self, key: &str, value: &T) -> EyreResult<bool>
pub async fn save_user_secret_json<K, T>(&self, key: K, value: &T) -> EyreResult<bool>
where
K: AsRef<str> + fmt::Debug,
T: serde::Serialize,
{
let v = serde_json::to_vec(value)?;
self.save_user_secret(&key, &v).await
self.save_user_secret(key, &v).await
}
#[instrument(level = "trace", skip(self))]
pub async fn load_user_secret_rkyv<T>(&self, key: &str) -> EyreResult<Option<T>>
pub async fn load_user_secret_rkyv<K, T>(&self, key: K) -> EyreResult<Option<T>>
where
K: AsRef<str> + fmt::Debug,
T: RkyvArchive,
<T as RkyvArchive>::Archived:
for<'t> bytecheck::CheckBytes<rkyv::validation::validators::DefaultValidator<'t>>,
@ -162,8 +172,9 @@ impl ProtectedStore {
}
#[instrument(level = "trace", skip(self))]
pub async fn load_user_secret_json<T>(&self, key: &str) -> EyreResult<Option<T>>
pub async fn load_user_secret_json<K, T>(&self, key: K) -> EyreResult<Option<T>>
where
K: AsRef<str> + fmt::Debug,
T: for<'de> serde::de::Deserialize<'de>,
{
let out = self.load_user_secret(key).await?;
@ -179,7 +190,11 @@ impl ProtectedStore {
}
#[instrument(level = "trace", skip(self, value), ret, err)]
pub async fn save_user_secret(&self, key: &str, value: &[u8]) -> EyreResult<bool> {
pub async fn save_user_secret<K: AsRef<str> + fmt::Debug>(
&self,
key: K,
value: &[u8],
) -> EyreResult<bool> {
let mut s = BASE64URL_NOPAD.encode(value);
s.push('!');
@ -187,7 +202,10 @@ impl ProtectedStore {
}
#[instrument(level = "trace", skip(self), err)]
pub async fn load_user_secret(&self, key: &str) -> EyreResult<Option<Vec<u8>>> {
pub async fn load_user_secret<K: AsRef<str> + fmt::Debug>(
&self,
key: K,
) -> EyreResult<Option<Vec<u8>>> {
let mut s = match self.load_user_secret_string(key).await? {
Some(s) => s,
None => {
@ -218,7 +236,7 @@ impl ProtectedStore {
}
#[instrument(level = "trace", skip(self), ret, err)]
pub async fn remove_user_secret(&self, key: &str) -> EyreResult<bool> {
pub async fn remove_user_secret<K: AsRef<str> + fmt::Debug>(&self, key: K) -> EyreResult<bool> {
if is_browser() {
let win = match window() {
Some(w) => w,
@ -238,7 +256,7 @@ impl ProtectedStore {
}
};
let vkey = self.browser_key_name(key);
let vkey = self.browser_key_name(key.as_ref());
match ls
.get_item(&vkey)

View File

@ -134,7 +134,7 @@ struct PublicAddressCheckCacheKey(ProtocolType, AddressType);
// The mutable state of the network manager
struct NetworkManagerInner {
stats: NetworkManagerStats,
client_whitelist: LruCache<DHTKey, ClientWhitelistEntry>,
client_whitelist: LruCache<PublicKey, ClientWhitelistEntry>,
public_address_check_cache:
BTreeMap<PublicAddressCheckCacheKey, LruCache<IpAddr, SocketAddress>>,
public_address_inconsistencies_table:
@ -396,7 +396,7 @@ impl NetworkManager {
debug!("finished network manager shutdown");
}
pub fn update_client_whitelist(&self, client: DHTKey) {
pub fn update_client_whitelist(&self, client: PublicKey) {
let mut inner = self.inner.lock();
match inner.client_whitelist.entry(client) {
hashlink::lru_cache::Entry::Occupied(mut entry) => {
@ -411,7 +411,7 @@ impl NetworkManager {
}
#[instrument(level = "trace", skip(self), ret)]
pub fn check_client_whitelist(&self, client: DHTKey) -> bool {
pub fn check_client_whitelist(&self, client: PublicKey) -> bool {
let mut inner = self.inner.lock();
match inner.client_whitelist.entry(client) {
@ -624,7 +624,7 @@ impl NetworkManager {
pub async fn handle_private_receipt<R: AsRef<[u8]>>(
&self,
receipt_data: R,
private_route: DHTKey,
private_route: PublicKey,
) -> NetworkResult<()> {
let receipt_manager = self.receipt_manager();
@ -731,7 +731,7 @@ impl NetworkManager {
#[instrument(level = "trace", skip(self, body), err)]
fn build_envelope<B: AsRef<[u8]>>(
&self,
dest_node_id: DHTKey,
dest_node_id: PublicKey,
version: u8,
body: B,
) -> EyreResult<Vec<u8>> {
@ -759,7 +759,7 @@ impl NetworkManager {
pub async fn send_envelope<B: AsRef<[u8]>>(
&self,
node_ref: NodeRef,
envelope_node_id: Option<DHTKey>,
envelope_node_id: Option<PublicKey>,
body: B,
) -> EyreResult<NetworkResult<SendDataKind>> {
let via_node_id = node_ref.node_id();

View File

@ -112,7 +112,7 @@ impl DiscoveryContext {
&self,
protocol_type: ProtocolType,
address_type: AddressType,
ignore_node: Option<DHTKey>,
ignore_node: Option<PublicKey>,
) -> Option<(SocketAddress, NodeRef)> {
let node_count = {
let config = self.routing_table.network_manager().config();
@ -130,7 +130,7 @@ impl DiscoveryContext {
dial_info_filter.clone(),
);
let disallow_relays_filter = Box::new(
move |rti: &RoutingTableInner, _k: DHTKey, v: Option<Arc<BucketEntry>>| {
move |rti: &RoutingTableInner, _k: PublicKey, v: Option<Arc<BucketEntry>>| {
let v = v.unwrap();
v.with(rti, |_rti, e| {
if let Some(n) = e.signed_node_info(RoutingDomain::PublicInternet) {

View File

@ -11,7 +11,7 @@ pub enum ReceiptEvent {
ReturnedOutOfBand,
ReturnedInBand { inbound_noderef: NodeRef },
ReturnedSafety,
ReturnedPrivate { private_route: DHTKey },
ReturnedPrivate { private_route: PublicKey },
Expired,
Cancelled,
}
@ -21,7 +21,7 @@ pub enum ReceiptReturned {
OutOfBand,
InBand { inbound_noderef: NodeRef },
Safety,
Private { private_route: DHTKey },
Private { private_route: PublicKey },
}
pub trait ReceiptCallback: Send + 'static {
@ -149,7 +149,7 @@ impl PartialOrd for ReceiptRecordTimestampSort {
pub struct ReceiptManagerInner {
network_manager: NetworkManager,
records_by_nonce: BTreeMap<ReceiptNonce, Arc<Mutex<ReceiptRecord>>>,
records_by_nonce: BTreeMap<Nonce, Arc<Mutex<ReceiptRecord>>>,
next_oldest_ts: Option<Timestamp>,
stop_source: Option<StopSource>,
timeout_task: MustJoinSingleFuture<()>,
@ -370,7 +370,7 @@ impl ReceiptManager {
inner.next_oldest_ts = new_next_oldest_ts;
}
pub async fn cancel_receipt(&self, nonce: &ReceiptNonce) -> EyreResult<()> {
pub async fn cancel_receipt(&self, nonce: &Nonce) -> EyreResult<()> {
log_rpc!(debug "== Cancel Receipt {}", nonce.encode());
// Remove the record

View File

@ -4,15 +4,16 @@ use rkyv::{Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as
pub struct Bucket {
routing_table: RoutingTable,
entries: BTreeMap<DHTKey, Arc<BucketEntry>>,
newest_entry: Option<DHTKey>,
entries: BTreeMap<PublicKey, Arc<BucketEntry>>,
newest_entry: Option<PublicKey>,
}
pub(super) type EntriesIter<'a> = alloc::collections::btree_map::Iter<'a, DHTKey, Arc<BucketEntry>>;
pub(super) type EntriesIter<'a> =
alloc::collections::btree_map::Iter<'a, PublicKey, Arc<BucketEntry>>;
#[derive(Debug, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
#[archive_attr(repr(C), derive(CheckBytes))]
struct BucketEntryData {
key: DHTKey,
key: PublicKey,
value: Vec<u8>,
}
@ -20,7 +21,7 @@ struct BucketEntryData {
#[archive_attr(repr(C), derive(CheckBytes))]
struct BucketData {
entries: Vec<BucketEntryData>,
newest_entry: Option<DHTKey>,
newest_entry: Option<PublicKey>,
}
fn state_ordering(state: BucketEntryState) -> usize {
@ -70,7 +71,7 @@ impl Bucket {
Ok(out)
}
pub(super) fn add_entry(&mut self, node_id: DHTKey) -> NodeRef {
pub(super) fn add_entry(&mut self, node_id: PublicKey) -> NodeRef {
log_rtab!("Node added: {}", node_id.encode());
// Add new entry
@ -84,7 +85,7 @@ impl Bucket {
NodeRef::new(self.routing_table.clone(), node_id, entry, None)
}
pub(super) fn remove_entry(&mut self, node_id: &DHTKey) {
pub(super) fn remove_entry(&mut self, node_id: &PublicKey) {
log_rtab!("Node removed: {}", node_id);
// Remove the entry
@ -93,7 +94,7 @@ impl Bucket {
// newest_entry is updated by kick_bucket()
}
pub(super) fn entry(&self, key: &DHTKey) -> Option<Arc<BucketEntry>> {
pub(super) fn entry(&self, key: &PublicKey) -> Option<Arc<BucketEntry>> {
self.entries.get(key).cloned()
}
@ -101,7 +102,7 @@ impl Bucket {
self.entries.iter()
}
pub(super) fn kick(&mut self, bucket_depth: usize) -> Option<BTreeSet<DHTKey>> {
pub(super) fn kick(&mut self, bucket_depth: usize) -> Option<BTreeSet<PublicKey>> {
// Get number of entries to attempt to purge from bucket
let bucket_len = self.entries.len();
@ -111,11 +112,11 @@ impl Bucket {
}
// Try to purge the newest entries that overflow the bucket
let mut dead_node_ids: BTreeSet<DHTKey> = BTreeSet::new();
let mut dead_node_ids: BTreeSet<PublicKey> = BTreeSet::new();
let mut extra_entries = bucket_len - bucket_depth;
// Get the sorted list of entries by their kick order
let mut sorted_entries: Vec<(DHTKey, Arc<BucketEntry>)> = self
let mut sorted_entries: Vec<(PublicKey, Arc<BucketEntry>)> = self
.entries
.iter()
.map(|(k, v)| (k.clone(), v.clone()))

View File

@ -310,7 +310,7 @@ impl BucketEntryInner {
opt_current_sni.as_ref().map(|s| s.as_ref())
}
pub fn make_peer_info(&self, key: DHTKey, routing_domain: RoutingDomain) -> Option<PeerInfo> {
pub fn make_peer_info(&self, key: PublicKey, routing_domain: RoutingDomain) -> Option<PeerInfo> {
let opt_current_sni = match routing_domain {
RoutingDomain::LocalNetwork => &self.local_network.signed_node_info,
RoutingDomain::PublicInternet => &self.public_internet.signed_node_info,

View File

@ -113,7 +113,7 @@ impl RoutingTable {
let mut cnt = 0;
out += &format!("Entries: {}\n", inner.bucket_entry_count);
while b < blen {
let filtered_entries: Vec<(&DHTKey, &Arc<BucketEntry>)> = inner.buckets[b]
let filtered_entries: Vec<(&PublicKey, &Arc<BucketEntry>)> = inner.buckets[b]
.entries()
.filter(|e| {
let state = e.1.with(inner, |_rti, e| e.state(cur_ts));
@ -149,7 +149,7 @@ impl RoutingTable {
out
}
pub(crate) fn debug_info_entry(&self, node_id: DHTKey) -> String {
pub(crate) fn debug_info_entry(&self, node_id: PublicKey) -> String {
let mut out = String::new();
out += &format!("Entry {:?}:\n", node_id);
if let Some(nr) = self.lookup_node_ref(node_id) {

View File

@ -49,7 +49,7 @@ pub struct LowLevelPortInfo {
pub protocol_to_port: ProtocolToPortMapping,
}
pub type RoutingTableEntryFilter<'t> =
Box<dyn FnMut(&RoutingTableInner, DHTKey, Option<Arc<BucketEntry>>) -> bool + Send + 't>;
Box<dyn FnMut(&RoutingTableInner, PublicKey, Option<Arc<BucketEntry>>) -> bool + Send + 't>;
#[derive(Clone, Debug, Default, Eq, PartialEq)]
pub struct RoutingTableHealth {
@ -71,9 +71,9 @@ pub(super) struct RoutingTableUnlockedInner {
network_manager: NetworkManager,
/// The current node's public DHT key
node_id: DHTKey,
node_id: PublicKey,
/// The current node's DHT key secret
node_id_secret: DHTKeySecret,
node_id_secret: SecretKey,
/// Buckets to kick on our next kick task
kick_queue: Mutex<BTreeSet<usize>>,
/// Background process for computing statistics
@ -149,11 +149,11 @@ impl RoutingTable {
f(&*self.unlocked_inner.config.get())
}
pub fn node_id(&self) -> DHTKey {
pub fn node_id(&self) -> PublicKey {
self.unlocked_inner.node_id
}
pub fn node_id_secret(&self) -> DHTKeySecret {
pub fn node_id_secret(&self) -> SecretKey {
self.unlocked_inner.node_id_secret
}
@ -453,7 +453,7 @@ impl RoutingTable {
self.inner.write().purge_last_connections();
}
fn find_bucket_index(&self, node_id: DHTKey) -> usize {
fn find_bucket_index(&self, node_id: PublicKey) -> usize {
distance(&node_id, &self.unlocked_inner.node_id)
.first_nonzero_bit()
.unwrap()
@ -484,7 +484,7 @@ impl RoutingTable {
inner.get_all_nodes(self.clone(), cur_ts)
}
fn queue_bucket_kick(&self, node_id: DHTKey) {
fn queue_bucket_kick(&self, node_id: PublicKey) {
let idx = self.find_bucket_index(node_id);
self.unlocked_inner.kick_queue.lock().insert(idx);
}
@ -492,7 +492,7 @@ impl RoutingTable {
/// Create a node reference, possibly creating a bucket entry
/// the 'update_func' closure is called on the node, and, if created,
/// in a locked fashion as to ensure the bucket entry state is always valid
pub fn create_node_ref<F>(&self, node_id: DHTKey, update_func: F) -> Option<NodeRef>
pub fn create_node_ref<F>(&self, node_id: PublicKey, update_func: F) -> Option<NodeRef>
where
F: FnOnce(&mut RoutingTableInner, &mut BucketEntryInner),
{
@ -502,14 +502,14 @@ impl RoutingTable {
}
/// Resolve an existing routing table entry and return a reference to it
pub fn lookup_node_ref(&self, node_id: DHTKey) -> Option<NodeRef> {
pub fn lookup_node_ref(&self, node_id: PublicKey) -> Option<NodeRef> {
self.inner.read().lookup_node_ref(self.clone(), node_id)
}
/// Resolve an existing routing table entry and return a filtered reference to it
pub fn lookup_and_filter_noderef(
&self,
node_id: DHTKey,
node_id: PublicKey,
routing_domain_set: RoutingDomainSet,
dial_info_filter: DialInfoFilter,
) -> Option<NodeRef> {
@ -527,7 +527,7 @@ impl RoutingTable {
pub fn register_node_with_signed_node_info(
&self,
routing_domain: RoutingDomain,
node_id: DHTKey,
node_id: PublicKey,
signed_node_info: SignedNodeInfo,
allow_invalid: bool,
) -> Option<NodeRef> {
@ -544,7 +544,7 @@ impl RoutingTable {
/// and add the last peer address we have for it, since that's pretty common
pub fn register_node_with_existing_connection(
&self,
node_id: DHTKey,
node_id: PublicKey,
descriptor: ConnectionDescriptor,
timestamp: Timestamp,
) -> Option<NodeRef> {
@ -563,7 +563,7 @@ impl RoutingTable {
self.inner.read().get_routing_table_health()
}
pub fn get_recent_peers(&self) -> Vec<(DHTKey, RecentPeersEntry)> {
pub fn get_recent_peers(&self) -> Vec<(PublicKey, RecentPeersEntry)> {
let mut recent_peers = Vec::new();
let mut dead_peers = Vec::new();
let mut out = Vec::new();
@ -602,7 +602,7 @@ impl RoutingTable {
out
}
pub fn touch_recent_peer(&self, node_id: DHTKey, last_connection: ConnectionDescriptor) {
pub fn touch_recent_peer(&self, node_id: PublicKey, last_connection: ConnectionDescriptor) {
self.inner
.write()
.touch_recent_peer(node_id, last_connection)
@ -722,7 +722,7 @@ impl RoutingTable {
let mut nodes_proto_v6 = vec![0usize, 0usize, 0usize, 0usize];
let filter = Box::new(
move |rti: &RoutingTableInner, _k: DHTKey, v: Option<Arc<BucketEntry>>| {
move |rti: &RoutingTableInner, _k: PublicKey, v: Option<Arc<BucketEntry>>| {
let entry = v.unwrap();
entry.with(rti, |_rti, e| {
// skip nodes on our local network here
@ -769,7 +769,7 @@ impl RoutingTable {
self.find_fastest_nodes(
protocol_types_len * 2 * max_per_type,
filters,
|_rti, k: DHTKey, v: Option<Arc<BucketEntry>>| {
|_rti, k: PublicKey, v: Option<Arc<BucketEntry>>| {
NodeRef::new(self.clone(), k, v.unwrap().clone(), None)
},
)
@ -786,10 +786,10 @@ impl RoutingTable {
where
C: for<'a, 'b> FnMut(
&'a RoutingTableInner,
&'b (DHTKey, Option<Arc<BucketEntry>>),
&'b (DHTKey, Option<Arc<BucketEntry>>),
&'b (PublicKey, Option<Arc<BucketEntry>>),
&'b (PublicKey, Option<Arc<BucketEntry>>),
) -> core::cmp::Ordering,
T: for<'r> FnMut(&'r RoutingTableInner, DHTKey, Option<Arc<BucketEntry>>) -> O + Send,
T: for<'r> FnMut(&'r RoutingTableInner, PublicKey, Option<Arc<BucketEntry>>) -> O + Send,
{
self.inner
.read()
@ -803,7 +803,7 @@ impl RoutingTable {
transform: T,
) -> Vec<O>
where
T: for<'r> FnMut(&'r RoutingTableInner, DHTKey, Option<Arc<BucketEntry>>) -> O + Send,
T: for<'r> FnMut(&'r RoutingTableInner, PublicKey, Option<Arc<BucketEntry>>) -> O + Send,
{
self.inner
.read()
@ -812,12 +812,12 @@ impl RoutingTable {
pub fn find_closest_nodes<'a, T, O>(
&self,
node_id: DHTKey,
node_id: PublicKey,
filters: VecDeque<RoutingTableEntryFilter>,
transform: T,
) -> Vec<O>
where
T: for<'r> FnMut(&'r RoutingTableInner, DHTKey, Option<Arc<BucketEntry>>) -> O + Send,
T: for<'r> FnMut(&'r RoutingTableInner, PublicKey, Option<Arc<BucketEntry>>) -> O + Send,
{
self.inner
.read()
@ -860,7 +860,7 @@ impl RoutingTable {
pub async fn find_node(
&self,
node_ref: NodeRef,
node_id: DHTKey,
node_id: PublicKey,
) -> EyreResult<NetworkResult<Vec<NodeRef>>> {
let rpc_processor = self.rpc_processor();
@ -986,7 +986,7 @@ impl RoutingTable {
// Go through all entries and find fastest entry that matches filter function
let inner = self.inner.read();
let inner = &*inner;
let mut best_inbound_relay: Option<(DHTKey, Arc<BucketEntry>)> = None;
let mut best_inbound_relay: Option<(PublicKey, Arc<BucketEntry>)> = None;
// Iterate all known nodes for candidates
inner.with_entries(cur_ts, BucketEntryState::Unreliable, |rti, k, v| {

View File

@ -6,7 +6,7 @@ use alloc::fmt;
pub struct NodeRefBaseCommon {
routing_table: RoutingTable,
node_id: DHTKey,
node_id: PublicKey,
entry: Arc<BucketEntry>,
filter: Option<NodeRefFilter>,
sequencing: Sequencing,
@ -99,7 +99,7 @@ pub trait NodeRefBase: Sized {
fn routing_table(&self) -> RoutingTable {
self.common().routing_table.clone()
}
fn node_id(&self) -> DHTKey {
fn node_id(&self) -> PublicKey {
self.common().node_id
}
fn has_updated_since_last_network_change(&self) -> bool {
@ -346,7 +346,7 @@ pub struct NodeRef {
impl NodeRef {
pub fn new(
routing_table: RoutingTable,
node_id: DHTKey,
node_id: PublicKey,
entry: Arc<BucketEntry>,
filter: Option<NodeRefFilter>,
) -> Self {

View File

@ -57,14 +57,14 @@ pub enum PrivateRouteHops {
#[derive(Clone, Debug)]
pub struct PrivateRoute {
/// The public key used for the entire route
pub public_key: DHTKey,
pub public_key: PublicKey,
pub hop_count: u8,
pub hops: PrivateRouteHops,
}
impl PrivateRoute {
/// Empty private route is the form used when receiving the last hop
pub fn new_empty(public_key: DHTKey) -> Self {
pub fn new_empty(public_key: PublicKey) -> Self {
Self {
public_key,
hop_count: 0,
@ -72,7 +72,7 @@ impl PrivateRoute {
}
}
/// Stub route is the form used when no privacy is required, but you need to specify the destination for a safety route
pub fn new_stub(public_key: DHTKey, node: RouteNode) -> Self {
pub fn new_stub(public_key: PublicKey, node: RouteNode) -> Self {
Self {
public_key,
hop_count: 1,
@ -117,7 +117,7 @@ impl PrivateRoute {
}
}
pub fn first_hop_node_id(&self) -> Option<DHTKey> {
pub fn first_hop_node_id(&self) -> Option<PublicKey> {
let PrivateRouteHops::FirstHop(pr_first_hop) = &self.hops else {
return None;
};
@ -162,13 +162,13 @@ pub enum SafetyRouteHops {
#[derive(Clone, Debug)]
pub struct SafetyRoute {
pub public_key: DHTKey,
pub public_key: PublicKey,
pub hop_count: u8,
pub hops: SafetyRouteHops,
}
impl SafetyRoute {
pub fn new_stub(public_key: DHTKey, private_route: PrivateRoute) -> Self {
pub fn new_stub(public_key: PublicKey, private_route: PrivateRoute) -> Self {
// First hop should have already been popped off for stubbed safety routes since
// we are sending directly to the first hop
assert!(matches!(private_route.hops, PrivateRouteHops::Data(_)));

View File

@ -17,8 +17,8 @@ const COMPILED_ROUTE_CACHE_SIZE: usize = 256;
// Compiled route key for caching
#[derive(Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)]
struct CompiledRouteCacheKey {
sr_pubkey: DHTKey,
pr_pubkey: DHTKey,
sr_pubkey: PublicKey,
pr_pubkey: PublicKey,
}
/// Compiled route (safety route + private route)
@ -27,7 +27,7 @@ pub struct CompiledRoute {
/// The safety route attached to the private route
pub safety_route: SafetyRoute,
/// The secret used to encrypt the message payload
pub secret: DHTKeySecret,
pub secret: SecretKey,
/// The node ref to the first hop in the compiled route
pub first_hop: NodeRef,
}
@ -35,8 +35,8 @@ pub struct CompiledRoute {
#[derive(Clone, Debug, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
#[archive_attr(repr(C), derive(CheckBytes))]
pub struct KeyPair {
key: DHTKey,
secret: DHTKeySecret,
key: PublicKey,
secret: SecretKey,
}
#[derive(Clone, Debug, Default, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
@ -172,9 +172,9 @@ impl RouteStats {
pub struct RouteSpecDetail {
/// Secret key
#[with(Skip)]
secret_key: DHTKeySecret,
secret_key: SecretKey,
/// Route hops
hops: Vec<DHTKey>,
hops: Vec<PublicKey>,
/// Route noderefs
#[with(Skip)]
hop_node_refs: Vec<NodeRef>,
@ -206,7 +206,7 @@ impl RouteSpecDetail {
pub fn hop_count(&self) -> usize {
self.hops.len()
}
pub fn get_secret_key(&self) -> DHTKeySecret {
pub fn get_secret_key(&self) -> SecretKey {
self.secret_key
}
pub fn get_stability(&self) -> Stability {
@ -228,7 +228,7 @@ impl RouteSpecDetail {
#[archive_attr(repr(C, align(8)), derive(CheckBytes))]
pub struct RouteSpecStoreContent {
/// All of the routes we have allocated so far
details: HashMap<DHTKey, RouteSpecDetail>,
details: HashMap<PublicKey, RouteSpecDetail>,
}
/// What remote private routes have seen
@ -257,19 +257,19 @@ impl RemotePrivateRouteInfo {
#[derive(Debug)]
pub struct RouteSpecStoreCache {
/// How many times nodes have been used
used_nodes: HashMap<DHTKey, usize>,
used_nodes: HashMap<PublicKey, usize>,
/// How many times nodes have been used at the terminal point of a route
used_end_nodes: HashMap<DHTKey, usize>,
used_end_nodes: HashMap<PublicKey, usize>,
/// Route spec hop cache, used to quickly disqualify routes
hop_cache: HashSet<Vec<u8>>,
/// Has a remote private route responded to a question and when
remote_private_route_cache: LruCache<DHTKey, RemotePrivateRouteInfo>,
remote_private_route_cache: LruCache<PublicKey, RemotePrivateRouteInfo>,
/// Compiled route cache
compiled_route_cache: LruCache<CompiledRouteCacheKey, SafetyRoute>,
/// List of dead allocated routes
dead_routes: Vec<DHTKey>,
dead_routes: Vec<PublicKey>,
/// List of dead remote routes
dead_remote_routes: Vec<DHTKey>,
dead_remote_routes: Vec<PublicKey>,
}
impl Default for RouteSpecStoreCache {
@ -319,8 +319,8 @@ pub struct RouteSpecStore {
unlocked_inner: Arc<RouteSpecStoreUnlockedInner>,
}
fn route_hops_to_hop_cache(hops: &[DHTKey]) -> Vec<u8> {
let mut cache: Vec<u8> = Vec::with_capacity(hops.len() * DHT_KEY_LENGTH);
fn route_hops_to_hop_cache(hops: &[PublicKey]) -> Vec<u8> {
let mut cache: Vec<u8> = Vec::with_capacity(hops.len() * PUBLIC_KEY_LENGTH);
for hop in hops {
cache.extend_from_slice(&hop.bytes);
}
@ -329,7 +329,7 @@ fn route_hops_to_hop_cache(hops: &[DHTKey]) -> Vec<u8> {
/// get the hop cache key for a particular route permutation
fn route_permutation_to_hop_cache(nodes: &[PeerInfo], perm: &[usize]) -> Vec<u8> {
let mut cache: Vec<u8> = Vec::with_capacity(perm.len() * DHT_KEY_LENGTH);
let mut cache: Vec<u8> = Vec::with_capacity(perm.len() * PUBLIC_KEY_LENGTH);
for n in perm {
cache.extend_from_slice(&nodes[*n].node_id.key.bytes)
}
@ -584,13 +584,13 @@ impl RouteSpecStore {
fn detail<'a>(
inner: &'a RouteSpecStoreInner,
public_key: &DHTKey,
public_key: &PublicKey,
) -> Option<&'a RouteSpecDetail> {
inner.content.details.get(public_key)
}
fn detail_mut<'a>(
inner: &'a mut RouteSpecStoreInner,
public_key: &DHTKey,
public_key: &PublicKey,
) -> Option<&'a mut RouteSpecDetail> {
inner.content.details.get_mut(public_key)
}
@ -616,8 +616,8 @@ impl RouteSpecStore {
sequencing: Sequencing,
hop_count: usize,
directions: DirectionSet,
avoid_node_ids: &[DHTKey],
) -> EyreResult<Option<DHTKey>> {
avoid_node_ids: &[PublicKey],
) -> EyreResult<Option<PublicKey>> {
let inner = &mut *self.inner.lock();
let routing_table = self.unlocked_inner.routing_table.clone();
let rti = &mut *routing_table.inner.write();
@ -642,8 +642,8 @@ impl RouteSpecStore {
sequencing: Sequencing,
hop_count: usize,
directions: DirectionSet,
avoid_node_ids: &[DHTKey],
) -> EyreResult<Option<DHTKey>> {
avoid_node_ids: &[PublicKey],
) -> EyreResult<Option<PublicKey>> {
use core::cmp::Ordering;
if hop_count < 1 {
@ -666,7 +666,7 @@ impl RouteSpecStore {
// Get list of all nodes, and sort them for selection
let cur_ts = get_aligned_timestamp();
let filter = Box::new(
move |rti: &RoutingTableInner, k: DHTKey, v: Option<Arc<BucketEntry>>| -> bool {
move |rti: &RoutingTableInner, k: PublicKey, v: Option<Arc<BucketEntry>>| -> bool {
// Exclude our own node from routes
if v.is_none() {
return false;
@ -735,8 +735,8 @@ impl RouteSpecStore {
) as RoutingTableEntryFilter;
let filters = VecDeque::from([filter]);
let compare = |rti: &RoutingTableInner,
v1: &(DHTKey, Option<Arc<BucketEntry>>),
v2: &(DHTKey, Option<Arc<BucketEntry>>)|
v1: &(PublicKey, Option<Arc<BucketEntry>>),
v2: &(PublicKey, Option<Arc<BucketEntry>>)|
-> Ordering {
// deprioritize nodes that we have already used as end points
let e1_used_end = inner
@ -808,7 +808,7 @@ impl RouteSpecStore {
cmpout
};
let transform =
|rti: &RoutingTableInner, k: DHTKey, v: Option<Arc<BucketEntry>>| -> PeerInfo {
|rti: &RoutingTableInner, k: PublicKey, v: Option<Arc<BucketEntry>>| -> PeerInfo {
// Return the peerinfo for that key
v.unwrap().with(rti, |_rti, e| {
e.make_peer_info(k, RoutingDomain::PublicInternet.into())
@ -842,7 +842,7 @@ impl RouteSpecStore {
}
// Ensure the route doesn't contain both a node and its relay
let mut seen_nodes: HashSet<DHTKey> = HashSet::new();
let mut seen_nodes: HashSet<PublicKey> = HashSet::new();
for n in permutation {
let node = nodes.get(*n).unwrap();
if !seen_nodes.insert(node.node_id.key) {
@ -956,7 +956,7 @@ impl RouteSpecStore {
}
// Got a unique route, lets build the detail, register it, and return it
let hops: Vec<DHTKey> = route_nodes.iter().map(|v| nodes[*v].node_id.key).collect();
let hops: Vec<PublicKey> = route_nodes.iter().map(|v| nodes[*v].node_id.key).collect();
let hop_node_refs = hops
.iter()
.map(|k| {
@ -994,10 +994,10 @@ impl RouteSpecStore {
#[instrument(level = "trace", skip(self, data, callback), ret)]
pub fn with_signature_validated_route<F,R>(
&self,
public_key: &DHTKey,
signatures: &[DHTSignature],
public_key: &PublicKey,
signatures: &[Signature],
data: &[u8],
last_hop_id: DHTKey,
last_hop_id: PublicKey,
callback: F,
) -> Option<R>
where F: FnOnce(&RouteSpecDetail) -> R,
@ -1038,7 +1038,7 @@ impl RouteSpecStore {
}
#[instrument(level = "trace", skip(self), ret, err)]
async fn test_allocated_route(&self, key: &DHTKey) -> EyreResult<bool> {
async fn test_allocated_route(&self, key: &PublicKey) -> EyreResult<bool> {
// Make loopback route to test with
let dest = {
let private_route = self.assemble_private_route(key, None)?;
@ -1081,7 +1081,7 @@ impl RouteSpecStore {
}
#[instrument(level = "trace", skip(self), ret, err)]
async fn test_remote_route(&self, key: &DHTKey) -> EyreResult<bool> {
async fn test_remote_route(&self, key: &PublicKey) -> EyreResult<bool> {
// Make private route test
let dest = {
// Get the route to test
@ -1121,7 +1121,7 @@ impl RouteSpecStore {
/// Test an allocated route for continuity
#[instrument(level = "trace", skip(self), ret, err)]
pub async fn test_route(&self, key: &DHTKey) -> EyreResult<bool> {
pub async fn test_route(&self, key: &PublicKey) -> EyreResult<bool> {
let is_remote = {
let inner = &mut *self.inner.lock();
let cur_ts = get_aligned_timestamp();
@ -1136,7 +1136,7 @@ impl RouteSpecStore {
/// Release an allocated route that is no longer in use
#[instrument(level = "trace", skip(self), ret)]
fn release_allocated_route(&self, public_key: &DHTKey) -> bool {
fn release_allocated_route(&self, public_key: &PublicKey) -> bool {
let mut inner = self.inner.lock();
let Some(detail) = inner.content.details.remove(public_key) else {
return false;
@ -1185,7 +1185,7 @@ impl RouteSpecStore {
/// Release an allocated or remote route that is no longer in use
#[instrument(level = "trace", skip(self), ret)]
pub fn release_route(&self, key: &DHTKey) -> bool {
pub fn release_route(&self, key: &PublicKey) -> bool {
let is_remote = {
let inner = &mut *self.inner.lock();
@ -1214,8 +1214,8 @@ impl RouteSpecStore {
stability: Stability,
sequencing: Sequencing,
directions: DirectionSet,
avoid_node_ids: &[DHTKey],
) -> Option<DHTKey> {
avoid_node_ids: &[PublicKey],
) -> Option<PublicKey> {
let cur_ts = get_aligned_timestamp();
let mut routes = Vec::new();
@ -1266,7 +1266,7 @@ impl RouteSpecStore {
/// List all allocated routes
pub fn list_allocated_routes<F, R>(&self, mut filter: F) -> Vec<R>
where
F: FnMut(&DHTKey, &RouteSpecDetail) -> Option<R>,
F: FnMut(&PublicKey, &RouteSpecDetail) -> Option<R>,
{
let inner = self.inner.lock();
let mut out = Vec::with_capacity(inner.content.details.len());
@ -1281,7 +1281,7 @@ impl RouteSpecStore {
/// List all allocated routes
pub fn list_remote_routes<F, R>(&self, mut filter: F) -> Vec<R>
where
F: FnMut(&DHTKey, &RemotePrivateRouteInfo) -> Option<R>,
F: FnMut(&PublicKey, &RemotePrivateRouteInfo) -> Option<R>,
{
let inner = self.inner.lock();
let mut out = Vec::with_capacity(inner.cache.remote_private_route_cache.len());
@ -1294,7 +1294,7 @@ impl RouteSpecStore {
}
/// Get the debug description of a route
pub fn debug_route(&self, key: &DHTKey) -> Option<String> {
pub fn debug_route(&self, key: &PublicKey) -> Option<String> {
let inner = &mut *self.inner.lock();
let cur_ts = get_aligned_timestamp();
// If this is a remote route, print it
@ -1310,7 +1310,7 @@ impl RouteSpecStore {
//////////////////////////////////////////////////////////////////////
// Route cache
fn add_to_compiled_route_cache(&self, inner: &mut RouteSpecStoreInner, pr_pubkey: DHTKey, safety_route: SafetyRoute)
fn add_to_compiled_route_cache(&self, inner: &mut RouteSpecStoreInner, pr_pubkey: PublicKey, safety_route: SafetyRoute)
{
let key = CompiledRouteCacheKey {
sr_pubkey: safety_route.public_key,
@ -1322,7 +1322,7 @@ impl RouteSpecStore {
}
}
fn lookup_compiled_route_cache(&self, inner: &mut RouteSpecStoreInner, sr_pubkey: DHTKey, pr_pubkey: DHTKey) -> Option<SafetyRoute> {
fn lookup_compiled_route_cache(&self, inner: &mut RouteSpecStoreInner, sr_pubkey: PublicKey, pr_pubkey: PublicKey) -> Option<SafetyRoute> {
let key = CompiledRouteCacheKey {
sr_pubkey,
@ -1332,7 +1332,7 @@ impl RouteSpecStore {
inner.cache.compiled_route_cache.get(&key).cloned()
}
fn invalidate_compiled_route_cache(&self, inner: &mut RouteSpecStoreInner, dead_key: &DHTKey) {
fn invalidate_compiled_route_cache(&self, inner: &mut RouteSpecStoreInner, dead_key: &PublicKey) {
let mut dead_entries = Vec::new();
for (k, _v) in inner.cache.compiled_route_cache.iter() {
if k.sr_pubkey == *dead_key || k.pr_pubkey == *dead_key {
@ -1581,8 +1581,8 @@ impl RouteSpecStore {
rti: &RoutingTableInner,
safety_spec: &SafetySpec,
direction: DirectionSet,
avoid_node_ids: &[DHTKey],
) -> EyreResult<Option<DHTKey>> {
avoid_node_ids: &[PublicKey],
) -> EyreResult<Option<PublicKey>> {
// Ensure the total hop count isn't too long for our config
let max_route_hop_count = self.unlocked_inner.max_route_hop_count;
if safety_spec.hop_count == 0 {
@ -1641,8 +1641,8 @@ impl RouteSpecStore {
pub fn get_private_route_for_safety_spec(
&self,
safety_spec: &SafetySpec,
avoid_node_ids: &[DHTKey],
) -> EyreResult<Option<DHTKey>> {
avoid_node_ids: &[PublicKey],
) -> EyreResult<Option<PublicKey>> {
let inner = &mut *self.inner.lock();
let routing_table = self.unlocked_inner.routing_table.clone();
let rti = &*routing_table.inner.read();
@ -1660,7 +1660,7 @@ impl RouteSpecStore {
#[instrument(level = "trace", skip(self), err)]
pub fn assemble_private_route(
&self,
key: &DHTKey,
key: &PublicKey,
optimized: Option<bool>,
) -> EyreResult<PrivateRoute> {
let inner = &*self.inner.lock();
@ -1749,7 +1749,7 @@ impl RouteSpecStore {
/// Import a remote private route for compilation
#[instrument(level = "trace", skip(self, blob), ret, err)]
pub fn import_remote_private_route(&self, blob: Vec<u8>) -> EyreResult<DHTKey> {
pub fn import_remote_private_route(&self, blob: Vec<u8>) -> EyreResult<PublicKey> {
// decode the pr blob
let private_route = RouteSpecStore::blob_to_private_route(blob)?;
@ -1774,7 +1774,7 @@ impl RouteSpecStore {
/// Release a remote private route that is no longer in use
#[instrument(level = "trace", skip(self), ret)]
fn release_remote_private_route(&self, key: &DHTKey) -> bool {
fn release_remote_private_route(&self, key: &PublicKey) -> bool {
let inner = &mut *self.inner.lock();
if inner.cache.remote_private_route_cache.remove(key).is_some() {
// Mark it as dead for the update
@ -1786,7 +1786,7 @@ impl RouteSpecStore {
}
/// Retrieve an imported remote private route by its public key
pub fn get_remote_private_route(&self, key: &DHTKey) -> Option<PrivateRoute> {
pub fn get_remote_private_route(&self, key: &PublicKey) -> Option<PrivateRoute> {
let inner = &mut *self.inner.lock();
let cur_ts = get_aligned_timestamp();
Self::with_get_remote_private_route(inner, cur_ts, key, |r| {
@ -1795,7 +1795,7 @@ impl RouteSpecStore {
}
/// Retrieve an imported remote private route by its public key but don't 'touch' it
pub fn peek_remote_private_route(&self, key: &DHTKey) -> Option<PrivateRoute> {
pub fn peek_remote_private_route(&self, key: &PublicKey) -> Option<PrivateRoute> {
let inner = &mut *self.inner.lock();
let cur_ts = get_aligned_timestamp();
Self::with_peek_remote_private_route(inner, cur_ts, key, |r| {
@ -1856,7 +1856,7 @@ impl RouteSpecStore {
fn with_get_remote_private_route<F, R>(
inner: &mut RouteSpecStoreInner,
cur_ts: Timestamp,
key: &DHTKey,
key: &PublicKey,
f: F,
) -> Option<R>
where
@ -1876,7 +1876,7 @@ impl RouteSpecStore {
fn with_peek_remote_private_route<F, R>(
inner: &mut RouteSpecStoreInner,
cur_ts: Timestamp,
key: &DHTKey,
key: &PublicKey,
f: F,
) -> Option<R>
where
@ -1898,7 +1898,7 @@ impl RouteSpecStore {
/// Check to see if this remote (not ours) private route has seen our current node info yet
/// This happens when you communicate with a private route without a safety route
pub fn has_remote_private_route_seen_our_node_info(&self, key: &DHTKey) -> bool {
pub fn has_remote_private_route_seen_our_node_info(&self, key: &PublicKey) -> bool {
let our_node_info_ts = {
let rti = &*self.unlocked_inner.routing_table.inner.read();
let Some(ts) = rti.get_own_node_info_ts(RoutingDomain::PublicInternet) else {
@ -1930,7 +1930,7 @@ impl RouteSpecStore {
/// was that node that had the private route.
pub fn mark_remote_private_route_seen_our_node_info(
&self,
key: &DHTKey,
key: &PublicKey,
cur_ts: Timestamp,
) -> EyreResult<()> {
let our_node_info_ts = {
@ -1960,7 +1960,7 @@ impl RouteSpecStore {
}
/// Get the route statistics for any route we know about, local or remote
pub fn with_route_stats<F, R>(&self, cur_ts: Timestamp, key: &DHTKey, f: F) -> Option<R>
pub fn with_route_stats<F, R>(&self, cur_ts: Timestamp, key: &PublicKey, f: F) -> Option<R>
where
F: FnOnce(&mut RouteStats) -> R,
{
@ -2007,7 +2007,7 @@ impl RouteSpecStore {
/// Mark route as published
/// When first deserialized, routes must be re-published in order to ensure they remain
/// in the RouteSpecStore.
pub fn mark_route_published(&self, key: &DHTKey, published: bool) -> EyreResult<()> {
pub fn mark_route_published(&self, key: &PublicKey, published: bool) -> EyreResult<()> {
let inner = &mut *self.inner.lock();
Self::detail_mut(inner, key)
.ok_or_else(|| eyre!("route does not exist"))?

View File

@ -10,13 +10,13 @@ pub enum ContactMethod {
/// Contact the node directly
Direct(DialInfo),
/// Request via signal the node connect back directly (relay, target)
SignalReverse(DHTKey, DHTKey),
SignalReverse(PublicKey, PublicKey),
/// Request via signal the node negotiate a hole punch (relay, target_node)
SignalHolePunch(DHTKey, DHTKey),
SignalHolePunch(PublicKey, PublicKey),
/// Must use an inbound relay to reach the node
InboundRelay(DHTKey),
InboundRelay(PublicKey),
/// Must use outbound relay to reach the node
OutboundRelay(DHTKey),
OutboundRelay(PublicKey),
}
#[derive(Debug)]
@ -131,7 +131,7 @@ impl RoutingDomainDetailCommon {
let signed_node_info = match relay_info {
Some((relay_id, relay_sdni)) => SignedNodeInfo::Relayed(
SignedRelayedNodeInfo::with_secret(
SignedRelayedNodeInfo::make_signatures(
NodeId::new(rti.unlocked_inner.node_id),
node_info,
relay_id,

View File

@ -28,7 +28,7 @@ pub struct RoutingTableInner {
/// Statistics about the total bandwidth to/from this node
pub(super) self_transfer_stats: TransferStatsDownUp,
/// Peers we have recently communicated with
pub(super) recent_peers: LruCache<DHTKey, RecentPeersEntry>,
pub(super) recent_peers: LruCache<PublicKey, RecentPeersEntry>,
/// Storage for private/safety RouteSpecs
pub(super) route_spec_store: Option<RouteSpecStore>,
}
@ -56,11 +56,11 @@ impl RoutingTableInner {
self.network_manager().rpc_processor()
}
pub fn node_id(&self) -> DHTKey {
pub fn node_id(&self) -> PublicKey {
self.unlocked_inner.node_id
}
pub fn node_id_secret(&self) -> DHTKeySecret {
pub fn node_id_secret(&self) -> SecretKey {
self.unlocked_inner.node_id_secret
}
@ -326,8 +326,8 @@ impl RoutingTableInner {
pub fn init_buckets(&mut self, routing_table: RoutingTable) {
// Size the buckets (one per bit)
self.buckets.clear();
self.buckets.reserve(DHT_KEY_LENGTH * 8);
for _ in 0..DHT_KEY_LENGTH * 8 {
self.buckets.reserve(PUBLIC_KEY_LENGTH * 8);
for _ in 0..PUBLIC_KEY_LENGTH * 8 {
let bucket = Bucket::new(routing_table.clone());
self.buckets.push(bucket);
}
@ -412,7 +412,7 @@ impl RoutingTableInner {
}
}
pub fn find_bucket_index(&self, node_id: DHTKey) -> usize {
pub fn find_bucket_index(&self, node_id: PublicKey) -> usize {
distance(&node_id, &self.unlocked_inner.node_id)
.first_nonzero_bit()
.unwrap()
@ -436,7 +436,10 @@ impl RoutingTableInner {
count
}
pub fn with_entries<T, F: FnMut(&RoutingTableInner, DHTKey, Arc<BucketEntry>) -> Option<T>>(
pub fn with_entries<
T,
F: FnMut(&RoutingTableInner, PublicKey, Arc<BucketEntry>) -> Option<T>,
>(
&self,
cur_ts: Timestamp,
min_state: BucketEntryState,
@ -461,7 +464,7 @@ impl RoutingTableInner {
pub fn with_entries_mut<
T,
F: FnMut(&mut RoutingTableInner, DHTKey, Arc<BucketEntry>) -> Option<T>,
F: FnMut(&mut RoutingTableInner, PublicKey, Arc<BucketEntry>) -> Option<T>,
>(
&mut self,
cur_ts: Timestamp,
@ -544,7 +547,7 @@ impl RoutingTableInner {
pub fn create_node_ref<F>(
&mut self,
outer_self: RoutingTable,
node_id: DHTKey,
node_id: PublicKey,
update_func: F,
) -> Option<NodeRef>
where
@ -597,7 +600,7 @@ impl RoutingTableInner {
}
/// Resolve an existing routing table entry and return a reference to it
pub fn lookup_node_ref(&self, outer_self: RoutingTable, node_id: DHTKey) -> Option<NodeRef> {
pub fn lookup_node_ref(&self, outer_self: RoutingTable, node_id: PublicKey) -> Option<NodeRef> {
if node_id == self.unlocked_inner.node_id {
log_rtab!(error "can't look up own node id in routing table");
return None;
@ -613,7 +616,7 @@ impl RoutingTableInner {
pub fn lookup_and_filter_noderef(
&self,
outer_self: RoutingTable,
node_id: DHTKey,
node_id: PublicKey,
routing_domain_set: RoutingDomainSet,
dial_info_filter: DialInfoFilter,
) -> Option<NodeRef> {
@ -628,7 +631,7 @@ impl RoutingTableInner {
}
/// Resolve an existing routing table entry and call a function on its entry without using a noderef
pub fn with_node_entry<F, R>(&self, node_id: DHTKey, f: F) -> Option<R>
pub fn with_node_entry<F, R>(&self, node_id: PublicKey, f: F) -> Option<R>
where
F: FnOnce(Arc<BucketEntry>) -> R,
{
@ -651,7 +654,7 @@ impl RoutingTableInner {
&mut self,
outer_self: RoutingTable,
routing_domain: RoutingDomain,
node_id: DHTKey,
node_id: PublicKey,
signed_node_info: SignedNodeInfo,
allow_invalid: bool,
) -> Option<NodeRef> {
@ -696,7 +699,7 @@ impl RoutingTableInner {
pub fn register_node_with_existing_connection(
&mut self,
outer_self: RoutingTable,
node_id: DHTKey,
node_id: PublicKey,
descriptor: ConnectionDescriptor,
timestamp: Timestamp,
) -> Option<NodeRef> {
@ -757,7 +760,7 @@ impl RoutingTableInner {
}
}
pub fn touch_recent_peer(&mut self, node_id: DHTKey, last_connection: ConnectionDescriptor) {
pub fn touch_recent_peer(&mut self, node_id: PublicKey, last_connection: ConnectionDescriptor) {
self.recent_peers
.insert(node_id, RecentPeersEntry { last_connection });
}
@ -773,7 +776,7 @@ impl RoutingTableInner {
mut filters: VecDeque<RoutingTableEntryFilter>,
) -> Vec<NodeRef> {
let public_node_filter = Box::new(
|rti: &RoutingTableInner, _k: DHTKey, v: Option<Arc<BucketEntry>>| {
|rti: &RoutingTableInner, _k: PublicKey, v: Option<Arc<BucketEntry>>| {
let entry = v.unwrap();
entry.with(rti, |_rti, e| {
// skip nodes on local network
@ -793,7 +796,7 @@ impl RoutingTableInner {
self.find_fastest_nodes(
node_count,
filters,
|_rti: &RoutingTableInner, k: DHTKey, v: Option<Arc<BucketEntry>>| {
|_rti: &RoutingTableInner, k: PublicKey, v: Option<Arc<BucketEntry>>| {
NodeRef::new(outer_self.clone(), k, v.unwrap().clone(), None)
},
)
@ -819,7 +822,7 @@ impl RoutingTableInner {
&self,
routing_domain: RoutingDomain,
own_peer_info: &PeerInfo,
k: DHTKey,
k: PublicKey,
v: Option<Arc<BucketEntry>>,
) -> PeerInfo {
match v {
@ -839,14 +842,15 @@ impl RoutingTableInner {
where
C: for<'a, 'b> FnMut(
&'a RoutingTableInner,
&'b (DHTKey, Option<Arc<BucketEntry>>),
&'b (DHTKey, Option<Arc<BucketEntry>>),
&'b (PublicKey, Option<Arc<BucketEntry>>),
&'b (PublicKey, Option<Arc<BucketEntry>>),
) -> core::cmp::Ordering,
T: for<'r> FnMut(&'r RoutingTableInner, DHTKey, Option<Arc<BucketEntry>>) -> O,
T: for<'r> FnMut(&'r RoutingTableInner, PublicKey, Option<Arc<BucketEntry>>) -> O,
{
// collect all the nodes for sorting
let mut nodes =
Vec::<(DHTKey, Option<Arc<BucketEntry>>)>::with_capacity(self.bucket_entry_count + 1);
let mut nodes = Vec::<(PublicKey, Option<Arc<BucketEntry>>)>::with_capacity(
self.bucket_entry_count + 1,
);
// add our own node (only one of there with the None entry)
let mut filtered = false;
@ -893,13 +897,13 @@ impl RoutingTableInner {
transform: T,
) -> Vec<O>
where
T: for<'r> FnMut(&'r RoutingTableInner, DHTKey, Option<Arc<BucketEntry>>) -> O,
T: for<'r> FnMut(&'r RoutingTableInner, PublicKey, Option<Arc<BucketEntry>>) -> O,
{
let cur_ts = get_aligned_timestamp();
// Add filter to remove dead nodes always
let filter_dead = Box::new(
move |rti: &RoutingTableInner, _k: DHTKey, v: Option<Arc<BucketEntry>>| {
move |rti: &RoutingTableInner, _k: PublicKey, v: Option<Arc<BucketEntry>>| {
if let Some(entry) = &v {
// always filter out dead nodes
if entry.with(rti, |_rti, e| e.state(cur_ts) == BucketEntryState::Dead) {
@ -917,8 +921,8 @@ impl RoutingTableInner {
// Fastest sort
let sort = |rti: &RoutingTableInner,
(a_key, a_entry): &(DHTKey, Option<Arc<BucketEntry>>),
(b_key, b_entry): &(DHTKey, Option<Arc<BucketEntry>>)| {
(a_key, a_entry): &(PublicKey, Option<Arc<BucketEntry>>),
(b_key, b_entry): &(PublicKey, Option<Arc<BucketEntry>>)| {
// same nodes are always the same
if a_key == b_key {
return core::cmp::Ordering::Equal;
@ -973,12 +977,12 @@ impl RoutingTableInner {
pub fn find_closest_nodes<T, O>(
&self,
node_id: DHTKey,
node_id: PublicKey,
filters: VecDeque<RoutingTableEntryFilter>,
transform: T,
) -> Vec<O>
where
T: for<'r> FnMut(&'r RoutingTableInner, DHTKey, Option<Arc<BucketEntry>>) -> O,
T: for<'r> FnMut(&'r RoutingTableInner, PublicKey, Option<Arc<BucketEntry>>) -> O,
{
let cur_ts = get_aligned_timestamp();
let node_count = {
@ -989,8 +993,8 @@ impl RoutingTableInner {
// closest sort
let sort = |rti: &RoutingTableInner,
(a_key, a_entry): &(DHTKey, Option<Arc<BucketEntry>>),
(b_key, b_entry): &(DHTKey, Option<Arc<BucketEntry>>)| {
(a_key, a_entry): &(PublicKey, Option<Arc<BucketEntry>>),
(b_key, b_entry): &(PublicKey, Option<Arc<BucketEntry>>)| {
// same nodes are always the same
if a_key == b_key {
return core::cmp::Ordering::Equal;

View File

@ -11,7 +11,7 @@ pub struct BootstrapRecord {
max_version: u8,
dial_info_details: Vec<DialInfoDetail>,
}
pub type BootstrapRecordMap = BTreeMap<DHTKey, BootstrapRecord>;
pub type BootstrapRecordMap = BTreeMap<PublicKey, BootstrapRecord>;
impl RoutingTable {
// Bootstrap lookup process
@ -58,7 +58,7 @@ impl RoutingTable {
Ok(v) => v,
};
// for each record resolve into key/bootstraprecord pairs
let mut bootstrap_records: Vec<(DHTKey, BootstrapRecord)> = Vec::new();
let mut bootstrap_records: Vec<(PublicKey, BootstrapRecord)> = Vec::new();
for bsnirecord in bsnirecords {
// Bootstrap TXT Record Format Version 0:
// txt_version,min_version,max_version,nodeid,hostname,dialinfoshort*
@ -115,7 +115,7 @@ impl RoutingTable {
// Node Id
let node_id_str = &records[3];
let node_id_key = match DHTKey::try_decode(node_id_str) {
let node_id_key = match PublicKey::try_decode(node_id_str) {
Ok(v) => v,
Err(e) => {
warn!(

View File

@ -24,7 +24,7 @@ impl RoutingTable {
let noderefs = routing_table.find_fastest_nodes(
min_peer_count,
VecDeque::new(),
|_rti, k: DHTKey, v: Option<Arc<BucketEntry>>| {
|_rti, k: PublicKey, v: Option<Arc<BucketEntry>>| {
NodeRef::new(routing_table.clone(), k, v.unwrap().clone(), None)
},
);

View File

@ -8,7 +8,7 @@ const BACKGROUND_SAFETY_ROUTE_COUNT: usize = 2;
impl RoutingTable {
/// Fastest routes sort
fn route_sort_latency_fn(a: &(DHTKey, u64), b: &(DHTKey, u64)) -> cmp::Ordering {
fn route_sort_latency_fn(a: &(PublicKey, u64), b: &(PublicKey, u64)) -> cmp::Ordering {
let mut al = a.1;
let mut bl = b.1;
// Treat zero latency as uncalculated
@ -35,14 +35,14 @@ impl RoutingTable {
///
/// If a route doesn't 'need_testing', then we neither test nor drop it
#[instrument(level = "trace", skip(self))]
fn get_allocated_routes_to_test(&self, cur_ts: Timestamp) -> Vec<DHTKey> {
fn get_allocated_routes_to_test(&self, cur_ts: Timestamp) -> Vec<PublicKey> {
let default_route_hop_count =
self.with_config(|c| c.network.rpc.default_route_hop_count as usize);
let rss = self.route_spec_store();
let mut must_test_routes = Vec::<DHTKey>::new();
let mut unpublished_routes = Vec::<(DHTKey, u64)>::new();
let mut expired_routes = Vec::<DHTKey>::new();
let mut must_test_routes = Vec::<PublicKey>::new();
let mut unpublished_routes = Vec::<(PublicKey, u64)>::new();
let mut expired_routes = Vec::<PublicKey>::new();
rss.list_allocated_routes(|k, v| {
let stats = v.get_stats();
// Ignore nodes that don't need testing
@ -95,7 +95,7 @@ impl RoutingTable {
async fn test_route_set(
&self,
stop_token: StopToken,
routes_needing_testing: Vec<DHTKey>,
routes_needing_testing: Vec<PublicKey>,
) -> EyreResult<()> {
if routes_needing_testing.is_empty() {
return Ok(());
@ -107,7 +107,7 @@ impl RoutingTable {
#[derive(Default, Debug)]
struct TestRouteContext {
failed: bool,
dead_routes: Vec<DHTKey>,
dead_routes: Vec<PublicKey>,
}
let mut unord = FuturesUnordered::new();

View File

@ -1,7 +1,7 @@
use super::*;
use core::convert::TryInto;
pub fn decode_dht_key(public_key: &veilid_capnp::key256::Reader) -> DHTKey {
pub fn decode_dht_key(public_key: &veilid_capnp::key256::Reader) -> PublicKey {
let u0 = public_key.get_u0().to_be_bytes();
let u1 = public_key.get_u1().to_be_bytes();
let u2 = public_key.get_u2().to_be_bytes();
@ -13,11 +13,11 @@ pub fn decode_dht_key(public_key: &veilid_capnp::key256::Reader) -> DHTKey {
x[16..24].copy_from_slice(&u2);
x[24..32].copy_from_slice(&u3);
DHTKey::new(x)
PublicKey::new(x)
}
pub fn encode_dht_key(
key: &DHTKey,
key: &PublicKey,
builder: &mut veilid_capnp::key256::Builder,
) -> Result<(), RPCError> {
builder.set_u0(u64::from_be_bytes(

View File

@ -1,6 +1,6 @@
use super::*;
pub fn encode_signature(sig: &DHTSignature, builder: &mut veilid_capnp::signature512::Builder) {
pub fn encode_signature(sig: &Signature, builder: &mut veilid_capnp::signature512::Builder) {
let sig = &sig.bytes;
builder.set_u0(u64::from_be_bytes(
@ -29,7 +29,7 @@ pub fn encode_signature(sig: &DHTSignature, builder: &mut veilid_capnp::signatur
));
}
pub fn decode_signature(reader: &veilid_capnp::signature512::Reader) -> DHTSignature {
pub fn decode_signature(reader: &veilid_capnp::signature512::Reader) -> Signature {
let u0 = reader.get_u0().to_be_bytes();
let u1 = reader.get_u1().to_be_bytes();
let u2 = reader.get_u2().to_be_bytes();
@ -39,7 +39,7 @@ pub fn decode_signature(reader: &veilid_capnp::signature512::Reader) -> DHTSigna
let u6 = reader.get_u6().to_be_bytes();
let u7 = reader.get_u7().to_be_bytes();
DHTSignature::new([
Signature::new([
u0[0], u0[1], u0[2], u0[3], u0[4], u0[5], u0[6], u0[7], // u0
u1[0], u1[1], u1[2], u1[3], u1[4], u1[5], u1[6], u1[7], // u1
u2[0], u2[1], u2[2], u2[3], u2[4], u2[5], u2[6], u2[7], // u2

View File

@ -117,7 +117,7 @@ impl RPCOperation {
pub fn decode(
operation_reader: &veilid_capnp::operation::Reader,
opt_sender_node_id: Option<&DHTKey>,
opt_sender_node_id: Option<&PublicKey>,
) -> Result<Self, RPCError> {
let op_id = OperationId::new(operation_reader.get_op_id());

View File

@ -2,7 +2,7 @@ use super::*;
#[derive(Debug, Clone)]
pub struct RPCOperationFindBlockQ {
pub block_id: DHTKey,
pub block_id: PublicKey,
}
impl RPCOperationFindBlockQ {

View File

@ -2,7 +2,7 @@ use super::*;
#[derive(Debug, Clone)]
pub struct RPCOperationFindNodeQ {
pub node_id: DHTKey,
pub node_id: PublicKey,
}
impl RPCOperationFindNodeQ {

View File

@ -2,17 +2,15 @@ use super::*;
#[derive(Debug, Clone)]
pub struct RoutedOperation {
pub version: u8,
pub sequencing: Sequencing,
pub signatures: Vec<DHTSignature>,
pub signatures: Vec<TypedSignature>,
pub nonce: Nonce,
pub data: Vec<u8>,
}
impl RoutedOperation {
pub fn new(version: u8, sequencing: Sequencing, nonce: Nonce, data: Vec<u8>) -> Self {
pub fn new(sequencing: Sequencing, nonce: Nonce, data: Vec<u8>) -> Self {
Self {
version,
sequencing,
signatures: Vec::new(),
nonce,
@ -24,25 +22,23 @@ impl RoutedOperation {
reader: &veilid_capnp::routed_operation::Reader,
) -> Result<RoutedOperation, RPCError> {
let sigs_reader = reader.get_signatures().map_err(RPCError::protocol)?;
let mut signatures = Vec::<DHTSignature>::with_capacity(
let mut signatures = Vec::<TypedSignature>::with_capacity(
sigs_reader
.len()
.try_into()
.map_err(RPCError::map_internal("too many signatures"))?,
);
for s in sigs_reader.iter() {
let sig = decode_signature(&s);
let sig = decode_typed_signature(&s);
signatures.push(sig);
}
let version = reader.get_version();
let sequencing = decode_sequencing(reader.get_sequencing().map_err(RPCError::protocol)?);
let n_reader = reader.get_nonce().map_err(RPCError::protocol)?;
let nonce = decode_nonce(&n_reader);
let data = reader.get_data().map_err(RPCError::protocol)?.to_vec();
Ok(RoutedOperation {
version,
sequencing,
signatures,
nonce,
@ -54,7 +50,6 @@ impl RoutedOperation {
&self,
builder: &mut veilid_capnp::routed_operation::Builder,
) -> Result<(), RPCError> {
builder.reborrow().set_version(self.version);
builder
.reborrow()
.set_sequencing(encode_sequencing(self.sequencing));
@ -66,7 +61,7 @@ impl RoutedOperation {
);
for (i, sig) in self.signatures.iter().enumerate() {
let mut sig_builder = sigs_builder.reborrow().get(i as u32);
encode_signature(sig, &mut sig_builder);
encode_typed_signature(sig, &mut sig_builder);
}
let mut n_builder = builder.reborrow().init_nonce();
encode_nonce(&self.nonce, &mut n_builder);

View File

@ -2,7 +2,7 @@ use super::*;
#[derive(Debug, Clone)]
pub struct RPCOperationSupplyBlockQ {
pub block_id: DHTKey,
pub block_id: PublicKey,
}
impl RPCOperationSupplyBlockQ {

View File

@ -23,7 +23,7 @@ pub fn encode_signed_direct_node_info(
pub fn decode_signed_direct_node_info(
reader: &veilid_capnp::signed_direct_node_info::Reader,
node_id: &DHTKey,
node_id: &PublicKey,
) -> Result<SignedDirectNodeInfo, RPCError> {
let ni_reader = reader
.reborrow()

View File

@ -20,7 +20,7 @@ pub fn encode_signed_node_info(
pub fn decode_signed_node_info(
reader: &veilid_capnp::signed_node_info::Reader,
node_id: &DHTKey,
node_id: &PublicKey,
) -> Result<SignedNodeInfo, RPCError> {
match reader
.which()

View File

@ -26,7 +26,7 @@ pub fn encode_signed_relayed_node_info(
pub fn decode_signed_relayed_node_info(
reader: &veilid_capnp::signed_relayed_node_info::Reader,
node_id: &DHTKey,
node_id: &PublicKey,
) -> Result<SignedRelayedNodeInfo, RPCError> {
let ni_reader = reader
.reborrow()

View File

@ -15,7 +15,7 @@ pub enum Destination {
/// The relay to send to
relay: NodeRef,
/// The final destination the relay should send to
target: DHTKey,
target: PublicKey,
/// Require safety route or not
safety_selection: SafetySelection,
},
@ -36,7 +36,7 @@ impl Destination {
safety_selection: SafetySelection::Unsafe(sequencing),
}
}
pub fn relay(relay: NodeRef, target: DHTKey) -> Self {
pub fn relay(relay: NodeRef, target: PublicKey) -> Self {
let sequencing = relay.sequencing();
Self::Relay {
relay,

View File

@ -53,7 +53,7 @@ struct RPCMessageHeaderDetailDirect {
#[derive(Debug, Clone)]
struct RPCMessageHeaderDetailSafetyRouted {
/// Remote safety route used
remote_safety_route: DHTKey,
remote_safety_route: PublicKey,
/// The sequencing used for this route
sequencing: Sequencing,
}
@ -62,9 +62,9 @@ struct RPCMessageHeaderDetailSafetyRouted {
#[derive(Debug, Clone)]
struct RPCMessageHeaderDetailPrivateRouted {
/// Remote safety route used (or possibly node id the case of no safety route)
remote_safety_route: DHTKey,
remote_safety_route: PublicKey,
/// The private route we received the rpc over
private_route: DHTKey,
private_route: PublicKey,
// The safety spec for replying to this private routed rpc
safety_spec: SafetySpec,
}
@ -141,9 +141,9 @@ struct WaitableReply {
node_ref: NodeRef,
send_ts: Timestamp,
send_data_kind: SendDataKind,
safety_route: Option<DHTKey>,
remote_private_route: Option<DHTKey>,
reply_private_route: Option<DHTKey>,
safety_route: Option<PublicKey>,
remote_private_route: Option<PublicKey>,
reply_private_route: Option<PublicKey>,
}
/////////////////////////////////////////////////////////////////////
@ -164,17 +164,17 @@ struct RenderedOperation {
/// The rendered operation bytes
message: Vec<u8>,
/// Destination node id we're sending to
node_id: DHTKey,
node_id: PublicKey,
/// Node to send envelope to (may not be destination node id in case of relay)
node_ref: NodeRef,
/// Total safety + private route hop count + 1 hop for the initial send
hop_count: usize,
/// The safety route used to send the message
safety_route: Option<DHTKey>,
safety_route: Option<PublicKey>,
/// The private route used to send the message
remote_private_route: Option<DHTKey>,
remote_private_route: Option<PublicKey>,
/// The private route requested to receive the reply
reply_private_route: Option<DHTKey>,
reply_private_route: Option<PublicKey>,
}
/// Node information exchanged during every RPC message
@ -371,7 +371,7 @@ impl RPCProcessor {
/// If no node was found in the timeout, this returns None
pub async fn search_dht_single_key(
&self,
_node_id: DHTKey,
_node_id: PublicKey,
_count: u32,
_fanout: u32,
_timeout: Option<u64>,
@ -386,7 +386,7 @@ impl RPCProcessor {
/// Search the DHT for the 'count' closest nodes to a key, adding them all to the routing table if they are not there and returning their node references
pub async fn search_dht_multi_key(
&self,
_node_id: DHTKey,
_node_id: PublicKey,
_count: u32,
_fanout: u32,
_timeout: Option<u64>,
@ -399,7 +399,7 @@ impl RPCProcessor {
/// Note: This routine can possible be recursive, hence the SendPinBoxFuture async form
pub fn resolve_node(
&self,
node_id: DHTKey,
node_id: PublicKey,
) -> SendPinBoxFuture<Result<Option<NodeRef>, RPCError>> {
let this = self.clone();
Box::pin(async move {
@ -483,7 +483,7 @@ impl RPCProcessor {
&self,
safety_selection: SafetySelection,
remote_private_route: PrivateRoute,
reply_private_route: Option<DHTKey>,
reply_private_route: Option<PublicKey>,
message_data: Vec<u8>,
) -> Result<NetworkResult<RenderedOperation>, RPCError> {
let routing_table = self.routing_table();
@ -764,8 +764,8 @@ impl RPCProcessor {
rpc_kind: RPCKind,
send_ts: Timestamp,
node_ref: NodeRef,
safety_route: Option<DHTKey>,
remote_private_route: Option<DHTKey>,
safety_route: Option<PublicKey>,
remote_private_route: Option<PublicKey>,
) {
let wants_answer = matches!(rpc_kind, RPCKind::Question);
@ -793,9 +793,9 @@ impl RPCProcessor {
&self,
send_ts: Timestamp,
node_ref: NodeRef,
safety_route: Option<DHTKey>,
remote_private_route: Option<DHTKey>,
private_route: Option<DHTKey>,
safety_route: Option<PublicKey>,
remote_private_route: Option<PublicKey>,
private_route: Option<PublicKey>,
) {
// Record for node if this was not sent via a route
if safety_route.is_none() && remote_private_route.is_none() {
@ -833,8 +833,8 @@ impl RPCProcessor {
send_ts: Timestamp,
bytes: ByteCount,
node_ref: NodeRef,
safety_route: Option<DHTKey>,
remote_private_route: Option<DHTKey>,
safety_route: Option<PublicKey>,
remote_private_route: Option<PublicKey>,
) {
let wants_answer = matches!(rpc_kind, RPCKind::Question);
@ -870,9 +870,9 @@ impl RPCProcessor {
recv_ts: Timestamp,
bytes: ByteCount,
node_ref: NodeRef,
safety_route: Option<DHTKey>,
remote_private_route: Option<DHTKey>,
reply_private_route: Option<DHTKey>,
safety_route: Option<PublicKey>,
remote_private_route: Option<PublicKey>,
reply_private_route: Option<PublicKey>,
) {
// Record stats for remote node if this was direct
if safety_route.is_none() && remote_private_route.is_none() && reply_private_route.is_none()
@ -1388,7 +1388,7 @@ impl RPCProcessor {
#[instrument(level = "trace", skip(self, body), err)]
pub fn enqueue_safety_routed_message(
&self,
remote_safety_route: DHTKey,
remote_safety_route: PublicKey,
sequencing: Sequencing,
body: Vec<u8>,
) -> EyreResult<()> {
@ -1417,8 +1417,8 @@ impl RPCProcessor {
#[instrument(level = "trace", skip(self, body), err)]
pub fn enqueue_private_routed_message(
&self,
remote_safety_route: DHTKey,
private_route: DHTKey,
remote_safety_route: PublicKey,
private_route: PublicKey,
safety_spec: SafetySpec,
body: Vec<u8>,
) -> EyreResult<()> {

View File

@ -11,7 +11,7 @@ impl RPCProcessor {
pub async fn rpc_call_find_node(
self,
dest: Destination,
key: DHTKey,
key: PublicKey,
) -> Result<NetworkResult<Answer<Vec<PeerInfo>>>, RPCError> {
// Ensure destination never has a private route
if matches!(
@ -100,7 +100,7 @@ impl RPCProcessor {
// find N nodes closest to the target node in our routing table
let filter = Box::new(
move |rti: &RoutingTableInner, _k: DHTKey, v: Option<Arc<BucketEntry>>| {
move |rti: &RoutingTableInner, _k: PublicKey, v: Option<Arc<BucketEntry>>| {
rti.filter_has_valid_signed_node_info(RoutingDomain::PublicInternet, true, v)
},
) as RoutingTableEntryFilter;

View File

@ -77,7 +77,7 @@ impl RPCProcessor {
&self,
routed_operation: RoutedOperation,
next_route_node: RouteNode,
safety_route_public_key: DHTKey,
safety_route_public_key: PublicKey,
next_private_route: PrivateRoute,
) -> Result<NetworkResult<()>, RPCError> {
// Make sure hop count makes sense
@ -142,7 +142,7 @@ impl RPCProcessor {
&self,
_detail: RPCMessageHeaderDetailDirect,
routed_operation: RoutedOperation,
remote_sr_pubkey: DHTKey,
remote_sr_pubkey: PublicKey,
) -> Result<NetworkResult<()>, RPCError> {
// Now that things are valid, decrypt the routed operation with DEC(nonce, DH(the SR's public key, the PR's (or node's) secret)
@ -177,8 +177,8 @@ impl RPCProcessor {
&self,
detail: RPCMessageHeaderDetailDirect,
routed_operation: RoutedOperation,
remote_sr_pubkey: DHTKey,
pr_pubkey: DHTKey,
remote_sr_pubkey: PublicKey,
pr_pubkey: PublicKey,
) -> Result<NetworkResult<()>, RPCError> {
// Get sender id
let sender_id = detail.envelope.get_sender_id();
@ -237,8 +237,8 @@ impl RPCProcessor {
&self,
detail: RPCMessageHeaderDetailDirect,
routed_operation: RoutedOperation,
remote_sr_pubkey: DHTKey,
pr_pubkey: DHTKey,
remote_sr_pubkey: PublicKey,
pr_pubkey: PublicKey,
) -> Result<NetworkResult<()>, RPCError> {
// If the private route public key is our node id, then this was sent via safety route to our node directly
@ -260,7 +260,7 @@ impl RPCProcessor {
pub(crate) async fn process_private_route_first_hop(
&self,
mut routed_operation: RoutedOperation,
sr_pubkey: DHTKey,
sr_pubkey: PublicKey,
mut private_route: PrivateRoute,
) -> Result<NetworkResult<()>, RPCError> {
let Some(pr_first_hop) = private_route.pop_first_hop() else {
@ -312,7 +312,7 @@ impl RPCProcessor {
}
/// Decrypt route hop data and sign routed operation
pub(crate) fn decrypt_private_route_hop_data(&self, route_hop_data: &RouteHopData, pr_pubkey: &DHTKey, route_operation: &mut RoutedOperation) -> Result<NetworkResult<RouteHop>, RPCError>
pub(crate) fn decrypt_private_route_hop_data(&self, route_hop_data: &RouteHopData, pr_pubkey: &PublicKey, route_operation: &mut RoutedOperation) -> Result<NetworkResult<RouteHop>, RPCError>
{
// Decrypt the blob with DEC(nonce, DH(the PR's public key, this hop's secret)
let node_id_secret = self.routing_table.node_id_secret();

View File

@ -102,7 +102,7 @@ impl RPCProcessor {
dial_info.clone(),
);
let will_validate_dial_info_filter = Box::new(
move |rti: &RoutingTableInner, _k: DHTKey, v: Option<Arc<BucketEntry>>| {
move |rti: &RoutingTableInner, _k: PublicKey, v: Option<Arc<BucketEntry>>| {
let entry = v.unwrap();
entry.with(rti, move |_rti, e| {
if let Some(status) = &e.node_status(routing_domain) {

View File

@ -132,9 +132,9 @@ pub async fn test_frozen(ts: TableStore) {
assert!(db.store_rkyv(0, b"asdf", &dht_key).await.is_ok());
assert_eq!(db.load_rkyv::<DHTKey>(0, b"qwer").unwrap(), None);
assert_eq!(db.load_rkyv::<PublicKey>(0, b"qwer").unwrap(), None);
let d = match db.load_rkyv::<DHTKey>(0, b"asdf") {
let d = match db.load_rkyv::<PublicKey>(0, b"asdf") {
Ok(x) => x,
Err(e) => {
panic!("couldn't decode: {}", e);
@ -148,7 +148,7 @@ pub async fn test_frozen(ts: TableStore) {
);
assert!(
db.load_rkyv::<DHTKey>(1, b"foo").is_err(),
db.load_rkyv::<PublicKey>(1, b"foo").is_err(),
"should fail to unfreeze"
);
}

View File

@ -192,8 +192,8 @@ fn config_callback(key: String) -> ConfigCallbackReturn {
"network.client_whitelist_timeout_ms" => Ok(Box::new(300_000u32)),
"network.reverse_connection_receipt_time_ms" => Ok(Box::new(5_000u32)),
"network.hole_punch_receipt_time_ms" => Ok(Box::new(5_000u32)),
"network.node_id" => Ok(Box::new(Option::<DHTKey>::None)),
"network.node_id_secret" => Ok(Box::new(Option::<DHTKeySecret>::None)),
"network.node_id" => Ok(Box::new(Option::<PublicKey>::None)),
"network.node_id_secret" => Ok(Box::new(Option::<SecretKey>::None)),
"network.bootstrap" => Ok(Box::new(Vec::<String>::new())),
"network.bootstrap_nodes" => Ok(Box::new(Vec::<String>::new())),
"network.routing_table.limit_over_attached" => Ok(Box::new(64u32)),

View File

@ -91,7 +91,7 @@ pub async fn test_signed_node_info() {
let (pkey2, skey2) = generate_secret();
let sni2 = SignedRelayedNodeInfo::with_secret(
let sni2 = SignedRelayedNodeInfo::make_signatures(
NodeId::new(pkey2.clone()),
node_info2.clone(),
NodeId::new(pkey.clone()),

View File

@ -12,7 +12,7 @@ pub async fn run_all_tests() {
info!("TEST: test_host_interface");
test_host_interface::test_all().await;
info!("TEST: test_dht_key");
test_dht_key::test_all().await;
test_types::test_all().await;
info!("TEST: test_veilid_core");
test_veilid_core::test_all().await;
info!("TEST: test_veilid_config");
@ -85,7 +85,7 @@ cfg_if! {
fn run_test_dht_key() {
setup();
block_on(async {
test_dht_key::test_all().await;
test_types::test_all().await;
});
}

View File

@ -167,7 +167,7 @@ impl VeilidAPI {
// Private route allocation
#[instrument(level = "debug", skip(self))]
pub async fn new_private_route(&self) -> Result<(DHTKey, Vec<u8>), VeilidAPIError> {
pub async fn new_private_route(&self) -> Result<(PublicKey, Vec<u8>), VeilidAPIError> {
self.new_custom_private_route(Stability::default(), Sequencing::default())
.await
}
@ -177,7 +177,7 @@ impl VeilidAPI {
&self,
stability: Stability,
sequencing: Sequencing,
) -> Result<(DHTKey, Vec<u8>), VeilidAPIError> {
) -> Result<(PublicKey, Vec<u8>), VeilidAPIError> {
let default_route_hop_count: usize = {
let config = self.config()?;
let c = config.get();
@ -223,14 +223,14 @@ impl VeilidAPI {
}
#[instrument(level = "debug", skip(self))]
pub fn import_remote_private_route(&self, blob: Vec<u8>) -> Result<DHTKey, VeilidAPIError> {
pub fn import_remote_private_route(&self, blob: Vec<u8>) -> Result<PublicKey, VeilidAPIError> {
let rss = self.routing_table()?.route_spec_store();
rss.import_remote_private_route(blob)
.map_err(|e| VeilidAPIError::invalid_argument(e, "blob", "private route blob"))
}
#[instrument(level = "debug", skip(self))]
pub fn release_private_route(&self, key: &DHTKey) -> Result<(), VeilidAPIError> {
pub fn release_private_route(&self, key: &PublicKey) -> Result<(), VeilidAPIError> {
let rss = self.routing_table()?.route_spec_store();
if rss.release_route(key) {
Ok(())

View File

@ -7,7 +7,7 @@ use routing_table::*;
#[derive(Default, Debug)]
struct DebugCache {
imported_routes: Vec<DHTKey>,
imported_routes: Vec<PublicKey>,
}
static DEBUG_CACHE: Mutex<DebugCache> = Mutex::new(DebugCache {
@ -30,12 +30,12 @@ fn get_string(text: &str) -> Option<String> {
Some(text.to_owned())
}
fn get_route_id(rss: RouteSpecStore) -> impl Fn(&str) -> Option<DHTKey> {
fn get_route_id(rss: RouteSpecStore) -> impl Fn(&str) -> Option<PublicKey> {
return move |text: &str| {
if text.is_empty() {
return None;
}
match DHTKey::try_decode(text).ok() {
match PublicKey::try_decode(text).ok() {
Some(key) => {
let routes = rss.list_allocated_routes(|k, _| Some(*k));
if routes.contains(&key) {
@ -187,8 +187,8 @@ fn get_destination(routing_table: RoutingTable) -> impl FnOnce(&str) -> Option<D
fn get_number(text: &str) -> Option<usize> {
usize::from_str(text).ok()
}
fn get_dht_key(text: &str) -> Option<DHTKey> {
DHTKey::try_decode(text).ok()
fn get_dht_key(text: &str) -> Option<PublicKey> {
PublicKey::try_decode(text).ok()
}
fn get_node_ref(routing_table: RoutingTable) -> impl FnOnce(&str) -> Option<NodeRef> {

View File

@ -108,7 +108,7 @@ pub enum VeilidAPIError {
#[error("Shutdown")]
Shutdown,
#[error("Key not found: {key}")]
KeyNotFound { key: DHTKey },
KeyNotFound { key: PublicKey },
#[error("No connection: {message}")]
NoConnection { message: String },
#[error("No peer info: {node_id}")]
@ -147,7 +147,7 @@ impl VeilidAPIError {
pub fn shutdown() -> Self {
Self::Shutdown
}
pub fn key_not_found(key: DHTKey) -> Self {
pub fn key_not_found(key: PublicKey) -> Self {
Self::KeyNotFound { key }
}
pub fn no_connection<T: ToString>(msg: T) -> Self {

View File

@ -19,8 +19,7 @@ pub use types::*;
pub use alloc::string::ToString;
pub use attachment_manager::AttachmentManager;
pub use core::str::FromStr;
pub use crypto::Crypto;
pub use crypto::{generate_secret, sign, verify, DHTKey, DHTKeySecret, DHTSignature, Nonce};
pub use crypto::*;
pub use intf::BlockStore;
pub use intf::ProtectedStore;
pub use intf::{TableDB, TableDBTransaction, TableStore};

View File

@ -5,7 +5,7 @@ use super::*;
#[derive(Clone, Debug)]
pub enum Target {
NodeId(NodeId),
PrivateRoute(DHTKey),
PrivateRoute(PublicKey),
}
pub struct RoutingContextInner {}

View File

@ -108,7 +108,7 @@ pub struct VeilidLog {
pub struct VeilidAppMessage {
/// Some(sender) if the message was sent directly, None if received via a private/safety route
#[serde(with = "opt_json_as_string")]
pub sender: Option<NodeId>,
pub sender: Option<TypedKey>,
/// The content of the message to deliver to the application
#[serde(with = "json_as_base64")]
pub message: Vec<u8>,
@ -121,7 +121,7 @@ pub struct VeilidAppMessage {
pub struct VeilidAppCall {
/// Some(sender) if the request was sent directly, None if received via a private/safety route
#[serde(with = "opt_json_as_string")]
pub sender: Option<NodeId>,
pub sender: Option<TypedKey>,
/// The content of the request to deliver to the application
#[serde(with = "json_as_base64")]
pub message: Vec<u8>,
@ -203,7 +203,7 @@ pub struct VeilidStateAttachment {
)]
#[archive_attr(repr(C), derive(CheckBytes))]
pub struct PeerTableData {
pub node_id: DHTKey,
pub node_id: TypedKey,
pub peer_address: PeerAddress,
pub peer_stats: PeerStats,
}
@ -226,8 +226,8 @@ pub struct VeilidStateNetwork {
)]
#[archive_attr(repr(C), derive(CheckBytes))]
pub struct VeilidStateRoute {
pub dead_routes: Vec<DHTKey>,
pub dead_remote_routes: Vec<DHTKey>,
pub dead_routes: Vec<PublicKey>,
pub dead_remote_routes: Vec<PublicKey>,
}
#[derive(
@ -262,42 +262,6 @@ pub struct VeilidState {
/////////////////////////////////////////////////////////////////////////////////////////////////////
///
#[derive(
Clone,
Debug,
Default,
PartialOrd,
PartialEq,
Eq,
Ord,
Serialize,
Deserialize,
RkyvArchive,
RkyvSerialize,
RkyvDeserialize,
)]
#[archive_attr(repr(C), derive(CheckBytes))]
pub struct NodeId {
pub key: DHTKey,
}
impl NodeId {
pub fn new(key: DHTKey) -> Self {
Self { key }
}
}
impl fmt::Display for NodeId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
write!(f, "{}", self.key.encode())
}
}
impl FromStr for NodeId {
type Err = VeilidAPIError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(Self {
key: DHTKey::try_decode(s)?,
})
}
}
#[derive(
Clone,
@ -315,22 +279,12 @@ impl FromStr for NodeId {
)]
#[archive_attr(repr(C), derive(CheckBytes))]
pub struct ValueKey {
pub key: DHTKey,
pub subkey: Option<String>,
pub key: TypedKey,
pub subkey: u32,
}
impl ValueKey {
pub fn new(key: DHTKey) -> Self {
Self { key, subkey: None }
}
pub fn new_subkey(key: DHTKey, subkey: String) -> Self {
Self {
key,
subkey: if subkey.is_empty() {
None
} else {
Some(subkey)
},
}
pub fn new(key: TypedKey, subkey: u32) -> Self {
Self { key, subkey }
}
}
@ -366,30 +320,6 @@ impl ValueData {
}
}
#[derive(
Clone,
Debug,
Default,
PartialOrd,
PartialEq,
Eq,
Ord,
Serialize,
Deserialize,
RkyvArchive,
RkyvSerialize,
RkyvDeserialize,
)]
#[archive_attr(repr(C), derive(CheckBytes))]
pub struct BlockId {
pub key: DHTKey,
}
impl BlockId {
pub fn new(key: DHTKey) -> Self {
Self { key }
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////
// Keep member order appropriate for sorting < preference
@ -550,7 +480,7 @@ impl SafetySelection {
#[archive_attr(repr(C), derive(CheckBytes))]
pub struct SafetySpec {
/// preferred safety route if it still exists
pub preferred_route: Option<DHTKey>,
pub preferred_route: Option<PublicKey>,
/// must be greater than 0
pub hop_count: usize,
/// prefer reliability over speed
@ -713,8 +643,8 @@ pub struct NodeInfo {
pub outbound_protocols: ProtocolTypeSet,
#[with(RkyvEnumSet)]
pub address_types: AddressTypeSet,
pub min_version: u8,
pub max_version: u8,
pub envelope_support: Vec<u8>,
pub crypto_support: Vec<CryptoKind>,
pub dial_info_detail_list: Vec<DialInfoDetail>,
}
@ -1885,37 +1815,44 @@ impl MatchesDialInfoFilter for DialInfo {
pub struct SignedDirectNodeInfo {
pub node_info: NodeInfo,
pub timestamp: Timestamp,
pub signature: Option<DHTSignature>,
pub signatures: Vec<TypedSignature>,
}
impl SignedDirectNodeInfo {
pub fn new(
node_id: NodeId,
crypto: Crypto,
node_info: NodeInfo,
timestamp: Timestamp,
signature: DHTSignature,
keyed_signatures: Vec<TypedKeySignature>,
) -> Result<Self, VeilidAPIError> {
let node_info_bytes = Self::make_signature_bytes(&node_info, timestamp)?;
verify(&node_id.key, &node_info_bytes, &signature)?;
let signatures = crypto.verify_signatures(
&node_info_bytes,
&keyed_signatures,
TypedSignature::from_keyed,
)?;
Ok(Self {
node_info,
timestamp,
signature: Some(signature),
signatures,
})
}
pub fn with_secret(
node_id: NodeId,
pub fn make_signatures(
crypto: Crypto,
node_info: NodeInfo,
secret: &DHTKeySecret,
keypairs: Vec<TypedKeyPair>,
) -> Result<Self, VeilidAPIError> {
let timestamp = get_aligned_timestamp();
let node_info_bytes = Self::make_signature_bytes(&node_info, timestamp)?;
let signature = sign(&node_id.key, secret, &node_info_bytes)?;
let signatures = crypto.generate_signatures(
&node_info_bytes,
&keypairs,
TypedSignature::from_pair_sig,
)?;
Ok(Self {
node_info,
timestamp,
signature: Some(signature),
signatures,
})
}
@ -1940,13 +1877,13 @@ impl SignedDirectNodeInfo {
pub fn with_no_signature(node_info: NodeInfo) -> Self {
Self {
node_info,
signature: None,
signatures: Vec::new(),
timestamp: get_aligned_timestamp(),
}
}
pub fn has_valid_signature(&self) -> bool {
self.signature.is_some()
pub fn has_any_signature(&self) -> bool {
!self.signatures.is_empty()
}
}
@ -1955,56 +1892,64 @@ impl SignedDirectNodeInfo {
#[archive_attr(repr(C), derive(CheckBytes))]
pub struct SignedRelayedNodeInfo {
pub node_info: NodeInfo,
pub relay_id: NodeId,
pub relay_ids: Vec<TypedKey>,
pub relay_info: SignedDirectNodeInfo,
pub timestamp: Timestamp,
pub signature: DHTSignature,
pub signatures: Vec<TypedSignature>,
}
impl SignedRelayedNodeInfo {
pub fn new(
node_id: NodeId,
crypto: Crypto,
node_info: NodeInfo,
relay_id: NodeId,
relay_ids: Vec<TypedKey>,
relay_info: SignedDirectNodeInfo,
timestamp: Timestamp,
signature: DHTSignature,
keyed_signatures: Vec<TypedKeySignature>,
) -> Result<Self, VeilidAPIError> {
let node_info_bytes =
Self::make_signature_bytes(&node_info, &relay_id, &relay_info, timestamp)?;
verify(&node_id.key, &node_info_bytes, &signature)?;
Self::make_signature_bytes(&node_info, &relay_ids, &relay_info, timestamp)?;
let signatures = crypto.verify_signatures(
&node_info_bytes,
&keyed_signatures,
TypedSignature::from_keyed,
)?;
Ok(Self {
node_info,
relay_id,
relay_ids,
relay_info,
signature,
timestamp,
signatures,
})
}
pub fn with_secret(
node_id: NodeId,
pub fn make_signatures(
crypto: Crypto,
node_info: NodeInfo,
relay_id: NodeId,
relay_ids: Vec<TypedKey>,
relay_info: SignedDirectNodeInfo,
secret: &DHTKeySecret,
keypairs: Vec<TypedKeyPair>,
) -> Result<Self, VeilidAPIError> {
let timestamp = get_aligned_timestamp();
let node_info_bytes =
Self::make_signature_bytes(&node_info, &relay_id, &relay_info, timestamp)?;
let signature = sign(&node_id.key, secret, &node_info_bytes)?;
Self::make_signature_bytes(&node_info, &relay_ids, &relay_info, timestamp)?;
let signatures = crypto.generate_signatures(
&node_info_bytes,
&keypairs,
TypedSignature::from_pair_sig,
)?;
Ok(Self {
node_info,
relay_id,
relay_ids,
relay_info,
signature,
signatures,
timestamp,
})
}
fn make_signature_bytes(
node_info: &NodeInfo,
relay_id: &NodeId,
relay_ids: &[TypedKey],
relay_info: &SignedDirectNodeInfo,
timestamp: Timestamp,
) -> Result<Vec<u8>, VeilidAPIError> {
@ -2016,7 +1961,7 @@ impl SignedRelayedNodeInfo {
encode_node_info(node_info, &mut ni_builder).map_err(VeilidAPIError::internal)?;
sig_bytes.append(&mut builder_to_vec(ni_msg).map_err(VeilidAPIError::internal)?);
// Add relay id to signature
// Add relay ids to signature
let mut rid_msg = ::capnp::message::Builder::new_default();
let mut rid_builder = rid_msg.init_root::<veilid_capnp::key256::Builder>();
encode_dht_key(&relay_id.key, &mut rid_builder).map_err(VeilidAPIError::internal)?;
@ -2046,7 +1991,7 @@ pub enum SignedNodeInfo {
impl SignedNodeInfo {
pub fn has_valid_signature(&self) -> bool {
match self {
SignedNodeInfo::Direct(d) => d.has_valid_signature(),
SignedNodeInfo::Direct(d) => d.has_any_signature(),
SignedNodeInfo::Relayed(_) => true,
}
}
@ -2063,10 +2008,10 @@ impl SignedNodeInfo {
SignedNodeInfo::Relayed(r) => &r.node_info,
}
}
pub fn relay_id(&self) -> Option<NodeId> {
pub fn relay_ids(&self) -> Vec<TypedKey> {
match self {
SignedNodeInfo::Direct(_) => None,
SignedNodeInfo::Relayed(r) => Some(r.relay_id.clone()),
SignedNodeInfo::Direct(_) => Vec::new(),
SignedNodeInfo::Relayed(r) => r.relay_ids.clone(),
}
}
pub fn relay_info(&self) -> Option<&NodeInfo> {
@ -2079,7 +2024,7 @@ impl SignedNodeInfo {
match self {
SignedNodeInfo::Direct(_) => None,
SignedNodeInfo::Relayed(r) => Some(PeerInfo::new(
r.relay_id.clone(),
r.relay_ids.clone(),
SignedNodeInfo::Direct(r.relay_info.clone()),
)),
}
@ -2127,14 +2072,14 @@ impl SignedNodeInfo {
#[derive(Clone, Debug, Serialize, Deserialize, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
#[archive_attr(repr(C), derive(CheckBytes))]
pub struct PeerInfo {
pub node_id: NodeId,
pub node_ids: Vec<TypedKey>,
pub signed_node_info: SignedNodeInfo,
}
impl PeerInfo {
pub fn new(node_id: NodeId, signed_node_info: SignedNodeInfo) -> Self {
pub fn new(node_ids: Vec<TypedKey>, signed_node_info: SignedNodeInfo) -> Self {
Self {
node_id,
node_ids,
signed_node_info,
}
}

View File

@ -316,6 +316,25 @@ pub struct VeilidConfigRPC {
pub default_route_hop_count: u8,
}
/// Configure the per-crypto version configuration
///
#[derive(
Default,
Debug,
Clone,
PartialEq,
Eq,
Serialize,
Deserialize,
RkyvArchive,
RkyvSerialize,
RkyvDeserialize,
)]
pub struct VeilidConfigNodeId {
pub node_id: Option<PublicKey>,
pub node_id_secret: Option<SecretKey>,
}
/// Configure the network routing table
///
#[derive(
@ -331,6 +350,9 @@ pub struct VeilidConfigRPC {
RkyvDeserialize,
)]
pub struct VeilidConfigRoutingTable {
pub node_ids: BTreeMap<CryptoKind, VeilidConfigNodeId>,
pub bootstrap: Vec<String>,
pub bootstrap_nodes: Vec<String>,
pub limit_over_attached: u32,
pub limit_fully_attached: u32,
pub limit_attached_strong: u32,
@ -362,10 +384,6 @@ pub struct VeilidConfigNetwork {
pub client_whitelist_timeout_ms: u32,
pub reverse_connection_receipt_time_ms: u32,
pub hole_punch_receipt_time_ms: u32,
pub node_id: Option<DHTKey>,
pub node_id_secret: Option<DHTKeySecret>,
pub bootstrap: Vec<String>,
pub bootstrap_nodes: Vec<String>,
pub routing_table: VeilidConfigRoutingTable,
pub rpc: VeilidConfigRPC,
pub dht: VeilidConfigDHT,
@ -608,6 +626,23 @@ impl VeilidConfig {
};
};
}
macro_rules! get_config_indexed {
($key:expr, $index:expr, $subkey:tt) => {
let keyname = format!(
"{}[{}].{}",
&stringify!($key)[6..],
$index,
&stringify!($subkey)
);
let v = cb(keyname.to_owned())?;
$key.entry($index).or_default().$subkey = match v.downcast() {
Ok(v) => *v,
Err(_) => {
apibail_generic!(format!("incorrect type for key {}", keyname))
}
};
};
}
self.update_cb = Some(update_cb);
self.with_mut(|inner| {
@ -628,8 +663,6 @@ impl VeilidConfig {
get_config!(inner.protected_store.always_use_insecure_storage);
get_config!(inner.protected_store.insecure_fallback_directory);
get_config!(inner.protected_store.delete);
get_config!(inner.network.node_id);
get_config!(inner.network.node_id_secret);
get_config!(inner.network.connection_initial_timeout_ms);
get_config!(inner.network.connection_inactivity_timeout_ms);
get_config!(inner.network.max_connections_per_ip4);
@ -639,8 +672,12 @@ impl VeilidConfig {
get_config!(inner.network.client_whitelist_timeout_ms);
get_config!(inner.network.reverse_connection_receipt_time_ms);
get_config!(inner.network.hole_punch_receipt_time_ms);
get_config!(inner.network.bootstrap);
get_config!(inner.network.bootstrap_nodes);
for ck in &VALID_CRYPTO_KINDS {
get_config_indexed!(inner.network.routing_table.node_ids, ck, node_id);
get_config_indexed!(inner.network.routing_table.node_ids, ck, node_id_secret);
}
get_config!(inner.network.routing_table.bootstrap);
get_config!(inner.network.routing_table.bootstrap_nodes);
get_config!(inner.network.routing_table.limit_over_attached);
get_config!(inner.network.routing_table.limit_fully_attached);
get_config!(inner.network.routing_table.limit_attached_strong);
@ -908,75 +945,102 @@ impl VeilidConfig {
// Get the node id from config if one is specified
// Must be done -after- protected store startup
pub async fn init_node_id(
pub async fn init_node_ids(
&self,
crypto: Crypto,
protected_store: intf::ProtectedStore,
) -> Result<(), VeilidAPIError> {
let mut node_id = self.inner.read().network.node_id;
let mut node_id_secret = self.inner.read().network.node_id_secret;
// See if node id was previously stored in the protected store
if node_id.is_none() {
debug!("pulling node id from storage");
if let Some(s) = protected_store
.load_user_secret_string("node_id")
.await
.map_err(VeilidAPIError::internal)?
{
debug!("node id found in storage");
node_id = Some(DHTKey::try_decode(s.as_str()).map_err(VeilidAPIError::internal)?);
} else {
debug!("node id not found in storage");
}
}
for ck in VALID_CRYPTO_KINDS {
let vcrypto = crypto.get(ck)?;
// See if node id secret was previously stored in the protected store
if node_id_secret.is_none() {
debug!("pulling node id secret from storage");
if let Some(s) = protected_store
.load_user_secret_string("node_id_secret")
.await
.map_err(VeilidAPIError::internal)?
{
debug!("node id secret found in storage");
node_id_secret =
Some(DHTKeySecret::try_decode(s.as_str()).map_err(VeilidAPIError::internal)?);
} else {
debug!("node id secret not found in storage");
}
}
// If we have a node id from storage, check it
let (node_id, node_id_secret) =
if let (Some(node_id), Some(node_id_secret)) = (node_id, node_id_secret) {
// Validate node id
if !crypto::validate_key(&node_id, &node_id_secret) {
apibail_generic!("node id secret and node id key don't match");
let mut node_id = self
.inner
.read()
.network
.routing_table
.node_ids
.get(&ck)
.map(|n| n.node_id)
.flatten();
let mut node_id_secret = self
.inner
.read()
.network
.routing_table
.node_ids
.get(&ck)
.map(|n| n.node_id_secret)
.flatten();
// See if node id was previously stored in the protected store
if node_id.is_none() {
debug!("pulling node_id_{} from storage", ck);
if let Some(s) = protected_store
.load_user_secret_string(format!("node_id_v{}", version))
.await
.map_err(VeilidAPIError::internal)?
{
debug!("node id v{} found in storage", version);
node_id =
Some(PublicKey::try_decode(s.as_str()).map_err(VeilidAPIError::internal)?);
} else {
debug!("node id v{} not found in storage", version);
}
(node_id, node_id_secret)
} else {
// If we still don't have a valid node id, generate one
debug!("generating new node id");
generate_secret()
};
info!("Node Id is {}", node_id.encode());
// info!("Node Id Secret is {}", node_id_secret.encode());
}
// Save the node id / secret in storage
protected_store
.save_user_secret_string("node_id", node_id.encode().as_str())
.await
.map_err(VeilidAPIError::internal)?;
protected_store
.save_user_secret_string("node_id_secret", node_id_secret.encode().as_str())
.await
.map_err(VeilidAPIError::internal)?;
// See if node id secret was previously stored in the protected store
if node_id_secret.is_none() {
debug!("pulling node id secret from storage");
if let Some(s) = protected_store
.load_user_secret_string(format!("node_id_secret_v{}", version))
.await
.map_err(VeilidAPIError::internal)?
{
debug!("node id v{} secret found in storage", version);
node_id_secret =
Some(SecretKey::try_decode(s.as_str()).map_err(VeilidAPIError::internal)?);
} else {
debug!("node id v{} secret not found in storage", version);
}
}
self.with_mut(|c| {
c.network.node_id = Some(node_id);
c.network.node_id_secret = Some(node_id_secret);
Ok(())
})?;
// If we have a node id from storage, check it
let (node_id, node_id_secret) =
if let (Some(node_id), Some(node_id_secret)) = (node_id, node_id_secret) {
// Validate node id
if !vcrypto.validate_keypair(&node_id, &node_id_secret) {
apibail_generic!(format!(
"version {} node id secret and node id key don't match",
version
));
}
(node_id, node_id_secret)
} else {
// If we still don't have a valid node id, generate one
debug!("generating new node id v{}", version);
vcrypto.generate_keypair()
};
info!("Node Id v{} is {}", version, node_id.encode());
// Save the node id / secret in storage
protected_store
.save_user_secret_string(format!("node_id_v{}", version), node_id.encode().as_str())
.await
.map_err(VeilidAPIError::internal)?;
protected_store
.save_user_secret_string(
format!("node_id_secret_v{}", version),
node_id_secret.encode().as_str(),
)
.await
.map_err(VeilidAPIError::internal)?;
self.with_mut(|c| {
let n = c.network.routing_table.node_ids.entry(version).or_default();
n.node_id = Some(node_id);
n.node_id_secret = Some(node_id_secret);
Ok(())
})?;
}
trace!("init_node_id complete");
Ok(())

View File

@ -93,7 +93,7 @@ pub struct VeilidFFIConfig {
#[derive(Debug, Deserialize, Serialize)]
pub struct VeilidFFIKeyBlob {
pub key: veilid_core::DHTKey,
pub key: veilid_core::PublicKey,
#[serde(with = "veilid_core::json_as_base64")]
pub blob: Vec<u8>,
}
@ -417,7 +417,7 @@ pub extern "C" fn routing_context_with_sequencing(id: u32, sequencing: FfiStr) -
#[no_mangle]
pub extern "C" fn routing_context_app_call(port: i64, id: u32, target: FfiStr, request: FfiStr) {
let target: veilid_core::DHTKey =
let target: veilid_core::PublicKey =
veilid_core::deserialize_opt_json(target.into_opt_string()).unwrap();
let request: Vec<u8> = data_encoding::BASE64URL_NOPAD
.decode(
@ -453,7 +453,7 @@ pub extern "C" fn routing_context_app_call(port: i64, id: u32, target: FfiStr, r
#[no_mangle]
pub extern "C" fn routing_context_app_message(port: i64, id: u32, target: FfiStr, message: FfiStr) {
let target: veilid_core::DHTKey =
let target: veilid_core::PublicKey =
veilid_core::deserialize_opt_json(target.into_opt_string()).unwrap();
let message: Vec<u8> = data_encoding::BASE64URL_NOPAD
.decode(
@ -539,7 +539,7 @@ pub extern "C" fn import_remote_private_route(port: i64, blob: FfiStr) {
#[no_mangle]
pub extern "C" fn release_private_route(port: i64, key: FfiStr) {
let key: veilid_core::DHTKey =
let key: veilid_core::PublicKey =
veilid_core::deserialize_opt_json(key.into_opt_string()).unwrap();
DartIsolateWrapper::new(port).spawn_result(async move {
let veilid_api = get_veilid_api().await?;

View File

@ -4,7 +4,7 @@ use clap::{Arg, ArgMatches, Command};
use std::ffi::OsStr;
use std::path::Path;
use std::str::FromStr;
use veilid_core::{DHTKey, DHTKeySecret};
use veilid_core::{PublicKey, SecretKey};
fn do_clap_matches(default_config_path: &OsStr) -> Result<clap::ArgMatches, clap::Error> {
let matches = Command::new("veilid-server")
@ -245,15 +245,16 @@ pub fn process_command_line() -> EyreResult<(Settings, ArgMatches)> {
// Split or get secret
let (k, s) = if let Some((k, s)) = v.split_once(':') {
let k = DHTKey::try_decode(k).wrap_err("failed to decode node id from command line")?;
let s = DHTKeySecret::try_decode(s)?;
let k =
PublicKey::try_decode(k).wrap_err("failed to decode node id from command line")?;
let s = SecretKey::try_decode(s)?;
(k, s)
} else {
let k = DHTKey::try_decode(v)?;
let k = PublicKey::try_decode(v)?;
let buffer = rpassword::prompt_password("Enter secret key (will not echo): ")
.wrap_err("invalid secret key")?;
let buffer = buffer.trim().to_string();
let s = DHTKeySecret::try_decode(&buffer)?;
let s = SecretKey::try_decode(&buffer)?;
(k, s)
};
settingsrw.core.network.node_id = Some(k);

View File

@ -595,8 +595,8 @@ pub struct Network {
pub client_whitelist_timeout_ms: u32,
pub reverse_connection_receipt_time_ms: u32,
pub hole_punch_receipt_time_ms: u32,
pub node_id: Option<veilid_core::DHTKey>,
pub node_id_secret: Option<veilid_core::DHTKeySecret>,
pub node_id: Option<veilid_core::PublicKey>,
pub node_id_secret: Option<veilid_core::SecretKey>,
pub bootstrap: Vec<String>,
pub bootstrap_nodes: Vec<ParsedNodeDialInfo>,
pub routing_table: RoutingTable,

View File

@ -137,7 +137,7 @@ pub struct VeilidWASMConfig {
#[derive(Debug, Deserialize, Serialize)]
pub struct VeilidKeyBlob {
pub key: veilid_core::DHTKey,
pub key: veilid_core::PublicKey,
#[serde(with = "veilid_core::json_as_base64")]
pub blob: Vec<u8>,
}
@ -459,7 +459,7 @@ pub fn import_remote_private_route(blob: String) -> Promise {
#[wasm_bindgen()]
pub fn release_private_route(key: String) -> Promise {
let key: veilid_core::DHTKey = veilid_core::deserialize_json(&key).unwrap();
let key: veilid_core::PublicKey = veilid_core::deserialize_json(&key).unwrap();
wrap_api_future_void(async move {
let veilid_api = get_veilid_api()?;
veilid_api.release_private_route(&key)?;