mirror of
https://gitlab.com/veilid/veilid.git
synced 2024-12-25 15:29:32 -05:00
cleaning up
This commit is contained in:
parent
5a4c2cb37e
commit
615158d54e
@ -17,13 +17,17 @@ pub fn compare_crypto_kind(a: &CryptoKind, b: &CryptoKind) -> cmp::Ordering {
|
|||||||
let b_idx = VALID_CRYPTO_KINDS.iter().position(|k| k == b);
|
let b_idx = VALID_CRYPTO_KINDS.iter().position(|k| k == b);
|
||||||
if let Some(a_idx) = a_idx {
|
if let Some(a_idx) = a_idx {
|
||||||
if let Some(b_idx) = b_idx {
|
if let Some(b_idx) = b_idx {
|
||||||
|
// Both are valid, prefer better crypto kind
|
||||||
a_idx.cmp(&b_idx)
|
a_idx.cmp(&b_idx)
|
||||||
} else {
|
} else {
|
||||||
|
// A is valid, B is not
|
||||||
cmp::Ordering::Less
|
cmp::Ordering::Less
|
||||||
}
|
}
|
||||||
} else if let Some(b_idx) = b_idx {
|
} else if b_idx.is_some() {
|
||||||
|
// B is valid, A is not
|
||||||
cmp::Ordering::Greater
|
cmp::Ordering::Greater
|
||||||
} else {
|
} else {
|
||||||
|
// Both are invalid, so use lex comparison
|
||||||
a.cmp(b)
|
a.cmp(b)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -66,19 +70,9 @@ impl KeyPair {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(
|
xxx make default template version here for secretkey
|
||||||
Clone,
|
and put Vec<TypedKey<SecretKey>> in settings
|
||||||
Copy,
|
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
|
||||||
Debug,
|
|
||||||
Serialize,
|
|
||||||
Deserialize,
|
|
||||||
PartialEq,
|
|
||||||
Eq,
|
|
||||||
Hash,
|
|
||||||
RkyvArchive,
|
|
||||||
RkyvSerialize,
|
|
||||||
RkyvDeserialize,
|
|
||||||
)]
|
|
||||||
#[archive_attr(repr(C), derive(CheckBytes, Hash, PartialEq, Eq))]
|
#[archive_attr(repr(C), derive(CheckBytes, Hash, PartialEq, Eq))]
|
||||||
pub struct TypedKey {
|
pub struct TypedKey {
|
||||||
pub kind: CryptoKind,
|
pub kind: CryptoKind,
|
||||||
@ -123,6 +117,23 @@ impl FromStr for TypedKey {
|
|||||||
Ok(Self { kind, key })
|
Ok(Self { kind, key })
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
impl<'de> Deserialize<'de> for TypedKey {
|
||||||
|
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||||
|
where
|
||||||
|
D: serde::Deserializer<'de>,
|
||||||
|
{
|
||||||
|
let s = <String as Deserialize>::deserialize(deserializer)?;
|
||||||
|
FromStr::from_str(&s).map_err(serde::de::Error::custom)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl Serialize for TypedKey {
|
||||||
|
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||||
|
where
|
||||||
|
S: serde::Serializer,
|
||||||
|
{
|
||||||
|
serializer.collect_str(self)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(
|
#[derive(
|
||||||
Clone,
|
Clone,
|
||||||
@ -139,6 +150,7 @@ impl FromStr for TypedKey {
|
|||||||
RkyvDeserialize,
|
RkyvDeserialize,
|
||||||
)]
|
)]
|
||||||
#[archive_attr(repr(C), derive(CheckBytes, Hash, PartialEq, Eq))]
|
#[archive_attr(repr(C), derive(CheckBytes, Hash, PartialEq, Eq))]
|
||||||
|
#[serde(from = "Vec<TypedKey>", into = "Vec<TypedKey>")]
|
||||||
pub struct TypedKeySet {
|
pub struct TypedKeySet {
|
||||||
items: Vec<TypedKey>,
|
items: Vec<TypedKey>,
|
||||||
}
|
}
|
||||||
@ -192,12 +204,12 @@ impl TypedKeySet {
|
|||||||
}
|
}
|
||||||
self.items.sort()
|
self.items.sort()
|
||||||
}
|
}
|
||||||
pub fn remove(&self, kind: CryptoKind) {
|
pub fn remove(&mut self, kind: CryptoKind) {
|
||||||
if let Some(idx) = self.items.iter().position(|x| x.kind == kind) {
|
if let Some(idx) = self.items.iter().position(|x| x.kind == kind) {
|
||||||
self.items.remove(idx);
|
self.items.remove(idx);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub fn remove_all(&self, kinds: &[CryptoKind]) {
|
pub fn remove_all(&mut self, kinds: &[CryptoKind]) {
|
||||||
for k in kinds {
|
for k in kinds {
|
||||||
self.remove(*k);
|
self.remove(*k);
|
||||||
}
|
}
|
||||||
@ -290,6 +302,18 @@ impl From<TypedKey> for TypedKeySet {
|
|||||||
tks
|
tks
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
impl From<Vec<TypedKey>> for TypedKeySet {
|
||||||
|
fn from(x: Vec<TypedKey>) -> Self {
|
||||||
|
let mut tks = TypedKeySet::with_capacity(x.len());
|
||||||
|
tks.add_all(&x);
|
||||||
|
tks
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl Into<Vec<TypedKey>> for TypedKeySet {
|
||||||
|
fn into(self) -> Vec<TypedKey> {
|
||||||
|
self.items
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(
|
#[derive(
|
||||||
Clone,
|
Clone,
|
||||||
|
@ -269,7 +269,7 @@ impl CryptoSystem for CryptoSystemVLD0 {
|
|||||||
shared_secret: &SharedSecret,
|
shared_secret: &SharedSecret,
|
||||||
) {
|
) {
|
||||||
let mut cipher = XChaCha20::new(&shared_secret.bytes.into(), &nonce.bytes.into());
|
let mut cipher = XChaCha20::new(&shared_secret.bytes.into(), &nonce.bytes.into());
|
||||||
cipher.apply_keystream_b2b(in_buf, &mut out_buf).unwrap();
|
cipher.apply_keystream_b2b(in_buf, out_buf).unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn crypt_no_auth_aligned_8(
|
fn crypt_no_auth_aligned_8(
|
||||||
|
@ -8,8 +8,6 @@ use rkyv::{Archive as RkyvArchive, Deserialize as RkyvDeserialize, Serialize as
|
|||||||
/// Helps to keep managed lists at particular distances so we can evict nodes by priority
|
/// Helps to keep managed lists at particular distances so we can evict nodes by priority
|
||||||
/// where the priority comes from liveness and age of the entry (older is better)
|
/// where the priority comes from liveness and age of the entry (older is better)
|
||||||
pub struct Bucket {
|
pub struct Bucket {
|
||||||
/// handle to the routing table
|
|
||||||
routing_table: RoutingTable,
|
|
||||||
/// Map of keys to entries for this bucket
|
/// Map of keys to entries for this bucket
|
||||||
entries: BTreeMap<PublicKey, Arc<BucketEntry>>,
|
entries: BTreeMap<PublicKey, Arc<BucketEntry>>,
|
||||||
/// The crypto kind in use for the public keys in this bucket
|
/// The crypto kind in use for the public keys in this bucket
|
||||||
@ -40,9 +38,8 @@ fn state_ordering(state: BucketEntryState) -> usize {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Bucket {
|
impl Bucket {
|
||||||
pub fn new(routing_table: RoutingTable, kind: CryptoKind) -> Self {
|
pub fn new(kind: CryptoKind) -> Self {
|
||||||
Self {
|
Self {
|
||||||
routing_table,
|
|
||||||
entries: BTreeMap::new(),
|
entries: BTreeMap::new(),
|
||||||
kind,
|
kind,
|
||||||
}
|
}
|
||||||
|
@ -262,11 +262,11 @@ impl BucketEntryInner {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Update the envelope version support we have to use
|
// Update the envelope version support we have to use
|
||||||
let mut envelope_support = signed_node_info.node_info().envelope_support.clone();
|
let envelope_support = signed_node_info.node_info().envelope_support.clone();
|
||||||
self.set_envelope_support(envelope_support);
|
|
||||||
|
|
||||||
// Update the signed node info
|
// Update the signed node info
|
||||||
*opt_current_sni = Some(Box::new(signed_node_info));
|
*opt_current_sni = Some(Box::new(signed_node_info));
|
||||||
|
self.set_envelope_support(envelope_support);
|
||||||
self.updated_since_last_network_change = true;
|
self.updated_since_last_network_change = true;
|
||||||
self.touch_last_seen(get_aligned_timestamp());
|
self.touch_last_seen(get_aligned_timestamp());
|
||||||
}
|
}
|
||||||
@ -466,7 +466,7 @@ impl BucketEntryInner {
|
|||||||
self.envelope_support.sort();
|
self.envelope_support.sort();
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set_envelope_support(&mut self, envelope_support: Vec<u8>) {
|
pub fn set_envelope_support(&mut self, mut envelope_support: Vec<u8>) {
|
||||||
envelope_support.dedup();
|
envelope_support.dedup();
|
||||||
envelope_support.sort();
|
envelope_support.sort();
|
||||||
self.envelope_support = envelope_support;
|
self.envelope_support = envelope_support;
|
||||||
|
@ -65,7 +65,9 @@ pub struct RoutingTableHealth {
|
|||||||
pub local_network_ready: bool,
|
pub local_network_ready: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) struct RoutingTableUnlockedInner {
|
pub type BucketIndex = (CryptoKind, usize);
|
||||||
|
|
||||||
|
pub struct RoutingTableUnlockedInner {
|
||||||
// Accessors
|
// Accessors
|
||||||
config: VeilidConfig,
|
config: VeilidConfig,
|
||||||
network_manager: NetworkManager,
|
network_manager: NetworkManager,
|
||||||
@ -73,7 +75,7 @@ pub(super) struct RoutingTableUnlockedInner {
|
|||||||
/// The current node's public DHT keys and secrets
|
/// The current node's public DHT keys and secrets
|
||||||
node_id_keypairs: BTreeMap<CryptoKind, KeyPair>,
|
node_id_keypairs: BTreeMap<CryptoKind, KeyPair>,
|
||||||
/// Buckets to kick on our next kick task
|
/// Buckets to kick on our next kick task
|
||||||
kick_queue: Mutex<BTreeSet<(CryptoKind, usize)>>,
|
kick_queue: Mutex<BTreeSet<BucketIndex>>,
|
||||||
/// Background process for computing statistics
|
/// Background process for computing statistics
|
||||||
rolling_transfers_task: TickTask<EyreReport>,
|
rolling_transfers_task: TickTask<EyreReport>,
|
||||||
/// Background process to purge dead routing table entries when necessary
|
/// Background process to purge dead routing table entries when necessary
|
||||||
@ -146,7 +148,7 @@ impl RoutingTableUnlockedInner {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn matches_own_node_id_key(&self, node_id_key: &PublicKey) -> bool {
|
pub fn matches_own_node_id_key(&self, node_id_key: &PublicKey) -> bool {
|
||||||
for (ck, v) in &self.node_id_keypairs {
|
for (_ck, v) in &self.node_id_keypairs {
|
||||||
if v.key == *node_id_key {
|
if v.key == *node_id_key {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -154,7 +156,7 @@ impl RoutingTableUnlockedInner {
|
|||||||
false
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn calculate_bucket_index(&self, node_id: &TypedKey) -> (CryptoKind, usize) {
|
pub fn calculate_bucket_index(&self, node_id: &TypedKey) -> BucketIndex {
|
||||||
let crypto = self.crypto();
|
let crypto = self.crypto();
|
||||||
let self_node_id = self.node_id_keypairs.get(&node_id.kind).unwrap().key;
|
let self_node_id = self.node_id_keypairs.get(&node_id.kind).unwrap().key;
|
||||||
let vcrypto = crypto.get(node_id.kind).unwrap();
|
let vcrypto = crypto.get(node_id.kind).unwrap();
|
||||||
@ -230,7 +232,7 @@ impl RoutingTable {
|
|||||||
// Set up routing buckets
|
// Set up routing buckets
|
||||||
{
|
{
|
||||||
let mut inner = self.inner.write();
|
let mut inner = self.inner.write();
|
||||||
inner.init_buckets(self.clone());
|
inner.init_buckets();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load bucket entries from table db if possible
|
// Load bucket entries from table db if possible
|
||||||
@ -238,7 +240,7 @@ impl RoutingTable {
|
|||||||
if let Err(e) = self.load_buckets().await {
|
if let Err(e) = self.load_buckets().await {
|
||||||
log_rtab!(debug "Error loading buckets from storage: {:#?}. Resetting.", e);
|
log_rtab!(debug "Error loading buckets from storage: {:#?}. Resetting.", e);
|
||||||
let mut inner = self.inner.write();
|
let mut inner = self.inner.write();
|
||||||
inner.init_buckets(self.clone());
|
inner.init_buckets();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set up routespecstore
|
// Set up routespecstore
|
||||||
@ -895,7 +897,7 @@ impl RoutingTable {
|
|||||||
let nrs =
|
let nrs =
|
||||||
self.find_bootstrap_nodes_filtered_per_crypto_kind(*crypto_kind, max_per_type);
|
self.find_bootstrap_nodes_filtered_per_crypto_kind(*crypto_kind, max_per_type);
|
||||||
'nrloop: for nr in nrs {
|
'nrloop: for nr in nrs {
|
||||||
for nro in out {
|
for nro in &out {
|
||||||
if nro.same_entry(&nr) {
|
if nro.same_entry(&nr) {
|
||||||
continue 'nrloop;
|
continue 'nrloop;
|
||||||
}
|
}
|
||||||
|
@ -36,6 +36,24 @@ pub struct RouteSetSpecDetail {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl RouteSetSpecDetail {
|
impl RouteSetSpecDetail {
|
||||||
|
pub fn new(
|
||||||
|
cur_ts: Timestamp,
|
||||||
|
route_set: BTreeMap<PublicKey, RouteSpecDetail>,
|
||||||
|
hop_node_refs: Vec<NodeRef>,
|
||||||
|
directions: DirectionSet,
|
||||||
|
stability: Stability,
|
||||||
|
can_do_sequenced: bool,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
route_set,
|
||||||
|
hop_node_refs,
|
||||||
|
published: false,
|
||||||
|
directions,
|
||||||
|
stability,
|
||||||
|
can_do_sequenced,
|
||||||
|
stats: RouteStats::new(cur_ts),
|
||||||
|
}
|
||||||
|
}
|
||||||
pub fn get_route_by_key(&self, key: &PublicKey) -> Option<&RouteSpecDetail> {
|
pub fn get_route_by_key(&self, key: &PublicKey) -> Option<&RouteSpecDetail> {
|
||||||
self.route_set.get(key)
|
self.route_set.get(key)
|
||||||
}
|
}
|
||||||
@ -61,7 +79,7 @@ impl RouteSetSpecDetail {
|
|||||||
self.route_set.iter()
|
self.route_set.iter()
|
||||||
}
|
}
|
||||||
pub fn iter_route_set_mut(
|
pub fn iter_route_set_mut(
|
||||||
&self,
|
&mut self,
|
||||||
) -> alloc::collections::btree_map::IterMut<PublicKey, RouteSpecDetail> {
|
) -> alloc::collections::btree_map::IterMut<PublicKey, RouteSpecDetail> {
|
||||||
self.route_set.iter_mut()
|
self.route_set.iter_mut()
|
||||||
}
|
}
|
||||||
@ -75,7 +93,7 @@ impl RouteSetSpecDetail {
|
|||||||
self.published
|
self.published
|
||||||
}
|
}
|
||||||
pub fn set_published(&mut self, published: bool) {
|
pub fn set_published(&mut self, published: bool) {
|
||||||
self.published = self.published;
|
self.published = published;
|
||||||
}
|
}
|
||||||
pub fn hop_count(&self) -> usize {
|
pub fn hop_count(&self) -> usize {
|
||||||
self.hop_node_refs.len()
|
self.hop_node_refs.len()
|
||||||
@ -97,7 +115,7 @@ impl RouteSetSpecDetail {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub fn contains_nodes(&self, nodes: &[TypedKey]) -> bool {
|
pub fn contains_nodes(&self, nodes: &[TypedKey]) -> bool {
|
||||||
for h in self.hop_node_refs {
|
for h in &self.hop_node_refs {
|
||||||
if h.node_ids().contains_any(nodes) {
|
if h.node_ids().contains_any(nodes) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -46,10 +46,7 @@ impl RouteSpecStore {
|
|||||||
routing_table,
|
routing_table,
|
||||||
}),
|
}),
|
||||||
inner: Arc::new(Mutex::new(RouteSpecStoreInner {
|
inner: Arc::new(Mutex::new(RouteSpecStoreInner {
|
||||||
content: RouteSpecStoreContent {
|
content: RouteSpecStoreContent::new(),
|
||||||
id_by_key: HashMap::new(),
|
|
||||||
details: HashMap::new(),
|
|
||||||
},
|
|
||||||
cache: Default::default(),
|
cache: Default::default(),
|
||||||
})),
|
})),
|
||||||
}
|
}
|
||||||
@ -199,7 +196,7 @@ impl RouteSpecStore {
|
|||||||
// Get list of all nodes, and sort them for selection
|
// Get list of all nodes, and sort them for selection
|
||||||
let cur_ts = get_aligned_timestamp();
|
let cur_ts = get_aligned_timestamp();
|
||||||
let filter = Box::new(
|
let filter = Box::new(
|
||||||
|rti: &RoutingTableInner, entry: Option<Arc<BucketEntry>>| -> bool {
|
|_rti: &RoutingTableInner, entry: Option<Arc<BucketEntry>>| -> bool {
|
||||||
// Exclude our own node from routes
|
// Exclude our own node from routes
|
||||||
if entry.is_none() {
|
if entry.is_none() {
|
||||||
return false;
|
return false;
|
||||||
@ -277,14 +274,14 @@ impl RouteSpecStore {
|
|||||||
},
|
},
|
||||||
) as RoutingTableEntryFilter;
|
) as RoutingTableEntryFilter;
|
||||||
let filters = VecDeque::from([filter]);
|
let filters = VecDeque::from([filter]);
|
||||||
let compare = |rti: &RoutingTableInner,
|
let compare = |_rti: &RoutingTableInner,
|
||||||
entry1: &Option<Arc<BucketEntry>>,
|
entry1: &Option<Arc<BucketEntry>>,
|
||||||
entry2: &Option<Arc<BucketEntry>>|
|
entry2: &Option<Arc<BucketEntry>>|
|
||||||
-> Ordering {
|
-> Ordering {
|
||||||
|
|
||||||
// Our own node is filtered out
|
// Our own node is filtered out
|
||||||
let entry1 = entry1.unwrap();
|
let entry1 = entry1.as_ref().unwrap().clone();
|
||||||
let entry2 = entry2.unwrap();
|
let entry2 = entry2.as_ref().unwrap().clone();
|
||||||
let entry1_node_ids = entry1.with_inner(|e| e.node_ids());
|
let entry1_node_ids = entry1.with_inner(|e| e.node_ids());
|
||||||
let entry2_node_ids = entry2.with_inner(|e| e.node_ids());
|
let entry2_node_ids = entry2.with_inner(|e| e.node_ids());
|
||||||
|
|
||||||
@ -336,7 +333,7 @@ impl RouteSpecStore {
|
|||||||
|
|
||||||
let routing_table = self.unlocked_inner.routing_table.clone();
|
let routing_table = self.unlocked_inner.routing_table.clone();
|
||||||
let transform =
|
let transform =
|
||||||
|rti: &RoutingTableInner, entry: Option<Arc<BucketEntry>>| -> NodeRef {
|
|_rti: &RoutingTableInner, entry: Option<Arc<BucketEntry>>| -> NodeRef {
|
||||||
NodeRef::new(routing_table.clone(), entry.unwrap(), None)
|
NodeRef::new(routing_table.clone(), entry.unwrap(), None)
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -503,15 +500,14 @@ impl RouteSpecStore {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
let rssd = RouteSetSpecDetail {
|
let rssd = RouteSetSpecDetail::new(
|
||||||
|
cur_ts,
|
||||||
route_set,
|
route_set,
|
||||||
hop_node_refs,
|
hop_node_refs,
|
||||||
published: false,
|
|
||||||
directions,
|
directions,
|
||||||
stability,
|
stability,
|
||||||
can_do_sequenced,
|
can_do_sequenced,
|
||||||
stats: RouteStats::new(cur_ts),
|
);
|
||||||
};
|
|
||||||
|
|
||||||
drop(perm_func);
|
drop(perm_func);
|
||||||
|
|
||||||
@ -1162,7 +1158,7 @@ impl RouteSpecStore {
|
|||||||
)?)
|
)?)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn assemble_private_route_inner(&self, inner: &RouteSpecStoreInner, key: &PublicKey, rsd: &RouteSpecDetail, optimized: bool) -> EyreResult<PrivateRoute>
|
fn assemble_private_route_inner(&self, key: &PublicKey, rsd: &RouteSpecDetail, optimized: bool) -> EyreResult<PrivateRoute>
|
||||||
{
|
{
|
||||||
let routing_table = self.unlocked_inner.routing_table.clone();
|
let routing_table = self.unlocked_inner.routing_table.clone();
|
||||||
let rti = &*routing_table.inner.read();
|
let rti = &*routing_table.inner.read();
|
||||||
@ -1271,7 +1267,7 @@ impl RouteSpecStore {
|
|||||||
|
|
||||||
let rsd = rssd.get_route_by_key(key).expect("route key index is broken");
|
let rsd = rssd.get_route_by_key(key).expect("route key index is broken");
|
||||||
|
|
||||||
self.assemble_private_route_inner(inner, key, rsd, optimized)
|
self.assemble_private_route_inner(key, rsd, optimized)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -1295,7 +1291,7 @@ impl RouteSpecStore {
|
|||||||
|
|
||||||
let mut out = Vec::new();
|
let mut out = Vec::new();
|
||||||
for (key, rsd) in rssd.iter_route_set() {
|
for (key, rsd) in rssd.iter_route_set() {
|
||||||
out.push(self.assemble_private_route_inner(inner, key, rsd, optimized)?);
|
out.push(self.assemble_private_route_inner(key, rsd, optimized)?);
|
||||||
}
|
}
|
||||||
Ok(out)
|
Ok(out)
|
||||||
}
|
}
|
||||||
@ -1315,7 +1311,7 @@ impl RouteSpecStore {
|
|||||||
|
|
||||||
// validate the private routes
|
// validate the private routes
|
||||||
let inner = &mut *self.inner.lock();
|
let inner = &mut *self.inner.lock();
|
||||||
for private_route in private_routes {
|
for private_route in &private_routes {
|
||||||
|
|
||||||
// ensure private route has first hop
|
// ensure private route has first hop
|
||||||
if !matches!(private_route.hops, PrivateRouteHops::FirstHop(_)) {
|
if !matches!(private_route.hops, PrivateRouteHops::FirstHop(_)) {
|
||||||
@ -1340,33 +1336,6 @@ impl RouteSpecStore {
|
|||||||
inner.cache.remove_remote_private_route(id)
|
inner.cache.remove_remote_private_route(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Check if a remote private route id is valid
|
|
||||||
// #[instrument(level = "trace", skip(self), ret)]
|
|
||||||
// pub fn is_valid_remote_private_route(&self, id: &RouteId) -> bool {
|
|
||||||
// let inner = &mut *self.inner.lock();
|
|
||||||
// let cur_ts = get_aligned_timestamp();
|
|
||||||
// inner.cache.peek_remote_private_route_mut(cur_ts, id).is_some()
|
|
||||||
// }
|
|
||||||
|
|
||||||
// /// Retrieve an imported remote private route by its public key
|
|
||||||
// pub fn get_remote_private_route(&self, id: &String) -> Option<PrivateRoute> {
|
|
||||||
// let inner = &mut *self.inner.lock();
|
|
||||||
// let cur_ts = get_aligned_timestamp();
|
|
||||||
// Self::with_get_remote_private_route(inner, cur_ts, key, |r| {
|
|
||||||
// r.private_route.as_ref().unwrap().clone()
|
|
||||||
// })
|
|
||||||
// }
|
|
||||||
|
|
||||||
// /// Retrieve an imported remote private route by its public key but don't 'touch' it
|
|
||||||
// fn peek_remote_private_route(&self, id: &String) -> Option<PrivateRoute> {
|
|
||||||
// let inner = &mut *self.inner.lock();
|
|
||||||
// let cur_ts = get_aligned_timestamp();
|
|
||||||
// inner.cache.with_peek_remote_private_route(cur_ts, id, f)
|
|
||||||
// Self::with_peek_remote_private_route(inner, cur_ts, key, |r| {
|
|
||||||
// r.private_route.as_ref().unwrap().clone()
|
|
||||||
// })
|
|
||||||
// }
|
|
||||||
|
|
||||||
/// Get a route id for a route's public key
|
/// Get a route id for a route's public key
|
||||||
pub fn get_route_id_for_key(&self, key: &PublicKey) -> Option<RouteId>
|
pub fn get_route_id_for_key(&self, key: &PublicKey) -> Option<RouteId>
|
||||||
{
|
{
|
||||||
@ -1524,8 +1493,6 @@ impl RouteSpecStore {
|
|||||||
|
|
||||||
/// Convert private route list to binary blob
|
/// Convert private route list to binary blob
|
||||||
pub fn private_routes_to_blob(private_routes: &[PrivateRoute]) -> EyreResult<Vec<u8>> {
|
pub fn private_routes_to_blob(private_routes: &[PrivateRoute]) -> EyreResult<Vec<u8>> {
|
||||||
let mut pr_message = ::capnp::message::Builder::new_default();
|
|
||||||
let mut pr_builder = pr_message.init_root::<veilid_capnp::private_route::Builder>();
|
|
||||||
|
|
||||||
let mut buffer = vec![];
|
let mut buffer = vec![];
|
||||||
|
|
||||||
@ -1539,6 +1506,9 @@ impl RouteSpecStore {
|
|||||||
|
|
||||||
// Serialize stream of private routes
|
// Serialize stream of private routes
|
||||||
for private_route in private_routes {
|
for private_route in private_routes {
|
||||||
|
let mut pr_message = ::capnp::message::Builder::new_default();
|
||||||
|
let mut pr_builder = pr_message.init_root::<veilid_capnp::private_route::Builder>();
|
||||||
|
|
||||||
encode_private_route(private_route, &mut pr_builder)
|
encode_private_route(private_route, &mut pr_builder)
|
||||||
.wrap_err("failed to encode private route")?;
|
.wrap_err("failed to encode private route")?;
|
||||||
|
|
||||||
@ -1563,7 +1533,7 @@ impl RouteSpecStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Deserialize stream of private routes
|
// Deserialize stream of private routes
|
||||||
let pr_slice = &blob[1..];
|
let mut pr_slice = &blob[1..];
|
||||||
let mut out = Vec::with_capacity(pr_count);
|
let mut out = Vec::with_capacity(pr_count);
|
||||||
for _ in 0..pr_count {
|
for _ in 0..pr_count {
|
||||||
let reader = capnp::serialize_packed::read_message(
|
let reader = capnp::serialize_packed::read_message(
|
||||||
@ -1577,7 +1547,7 @@ impl RouteSpecStore {
|
|||||||
.get_root::<veilid_capnp::private_route::Reader>()
|
.get_root::<veilid_capnp::private_route::Reader>()
|
||||||
.map_err(RPCError::internal)
|
.map_err(RPCError::internal)
|
||||||
.wrap_err("failed to make reader for private_route")?;
|
.wrap_err("failed to make reader for private_route")?;
|
||||||
let private_route = decode_private_route(&pr_reader, crypto).wrap_err("failed to decode private route")?;
|
let private_route = decode_private_route(&pr_reader, crypto.clone()).wrap_err("failed to decode private route")?;
|
||||||
out.push(private_route);
|
out.push(private_route);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -46,7 +46,7 @@ impl RouteSpecStoreCache {
|
|||||||
if !self.hop_cache.insert(cache_key) {
|
if !self.hop_cache.insert(cache_key) {
|
||||||
panic!("route should never be inserted twice");
|
panic!("route should never be inserted twice");
|
||||||
}
|
}
|
||||||
for (pk, rsd) in rssd.iter_route_set() {
|
for (_pk, rsd) in rssd.iter_route_set() {
|
||||||
for h in &rsd.hops {
|
for h in &rsd.hops {
|
||||||
self.used_nodes
|
self.used_nodes
|
||||||
.entry(*h)
|
.entry(*h)
|
||||||
@ -137,19 +137,25 @@ impl RouteSpecStoreCache {
|
|||||||
self.remote_private_routes_by_key
|
self.remote_private_routes_by_key
|
||||||
.insert(private_route.public_key.key, id.clone());
|
.insert(private_route.public_key.key, id.clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let mut dead = None;
|
||||||
self.remote_private_route_set_cache
|
self.remote_private_route_set_cache
|
||||||
.insert(id, rprinfo, |dead_id, dead_rpri| {
|
.insert(id, rprinfo, |dead_id, dead_rpri| {
|
||||||
// If anything LRUs out, remove from the by-key table
|
dead = Some((dead_id, dead_rpri));
|
||||||
// Follow the same logic as 'remove_remote_private_route' here
|
|
||||||
for dead_private_route in dead_rpri.get_private_routes() {
|
|
||||||
self.remote_private_routes_by_key
|
|
||||||
.remove(&dead_private_route.public_key.key)
|
|
||||||
.unwrap();
|
|
||||||
self.invalidate_compiled_route_cache(&dead_private_route.public_key.key);
|
|
||||||
}
|
|
||||||
self.dead_remote_routes.push(dead_id);
|
|
||||||
});
|
});
|
||||||
|
|
||||||
|
if let Some((dead_id, dead_rpri)) = dead {
|
||||||
|
// If anything LRUs out, remove from the by-key table
|
||||||
|
// Follow the same logic as 'remove_remote_private_route' here
|
||||||
|
for dead_private_route in dead_rpri.get_private_routes() {
|
||||||
|
self.remote_private_routes_by_key
|
||||||
|
.remove(&dead_private_route.public_key.key)
|
||||||
|
.unwrap();
|
||||||
|
self.invalidate_compiled_route_cache(&dead_private_route.public_key.key);
|
||||||
|
}
|
||||||
|
self.dead_remote_routes.push(dead_id);
|
||||||
|
}
|
||||||
|
|
||||||
id
|
id
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -172,7 +178,7 @@ impl RouteSpecStoreCache {
|
|||||||
cur_ts: Timestamp,
|
cur_ts: Timestamp,
|
||||||
id: &RouteId,
|
id: &RouteId,
|
||||||
) -> Option<&RemotePrivateRouteInfo> {
|
) -> Option<&RemotePrivateRouteInfo> {
|
||||||
if let Some(rpri) = self.remote_private_route_set_cache.get(id) {
|
if let Some(rpri) = self.remote_private_route_set_cache.get_mut(id) {
|
||||||
if !rpri.did_expire(cur_ts) {
|
if !rpri.did_expire(cur_ts) {
|
||||||
rpri.touch(cur_ts);
|
rpri.touch(cur_ts);
|
||||||
return Some(rpri);
|
return Some(rpri);
|
||||||
@ -238,13 +244,9 @@ impl RouteSpecStoreCache {
|
|||||||
rpri.touch(cur_ts);
|
rpri.touch(cur_ts);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
let rpri = RemotePrivateRouteInfo {
|
// New remote private route cache entry
|
||||||
// New remote private route cache entry
|
let rpri = RemotePrivateRouteInfo::new(private_routes, cur_ts);
|
||||||
private_routes,
|
|
||||||
last_seen_our_node_info_ts: Timestamp::new(0),
|
|
||||||
last_touched_ts: cur_ts,
|
|
||||||
stats: RouteStats::new(cur_ts),
|
|
||||||
};
|
|
||||||
self.add_remote_private_route(id, rpri);
|
self.add_remote_private_route(id, rpri);
|
||||||
if self.peek_remote_private_route_mut(cur_ts, &id).is_none() {
|
if self.peek_remote_private_route_mut(cur_ts, &id).is_none() {
|
||||||
panic!("remote private route should exist");
|
panic!("remote private route should exist");
|
||||||
@ -268,7 +270,7 @@ impl RouteSpecStoreCache {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Stores a compiled 'safety + private' route so we don't have to compile it again later
|
/// Stores a compiled 'safety + private' route so we don't have to compile it again later
|
||||||
pub fn add_to_compiled_route_cache(&self, pr_pubkey: PublicKey, safety_route: SafetyRoute) {
|
pub fn add_to_compiled_route_cache(&mut self, pr_pubkey: PublicKey, safety_route: SafetyRoute) {
|
||||||
let key = CompiledRouteCacheKey {
|
let key = CompiledRouteCacheKey {
|
||||||
sr_pubkey: safety_route.public_key.key,
|
sr_pubkey: safety_route.public_key.key,
|
||||||
pr_pubkey,
|
pr_pubkey,
|
||||||
@ -286,7 +288,7 @@ impl RouteSpecStoreCache {
|
|||||||
|
|
||||||
/// Looks up an existing compiled route from the safety and private route components
|
/// Looks up an existing compiled route from the safety and private route components
|
||||||
pub fn lookup_compiled_route_cache(
|
pub fn lookup_compiled_route_cache(
|
||||||
&self,
|
&mut self,
|
||||||
sr_pubkey: PublicKey,
|
sr_pubkey: PublicKey,
|
||||||
pr_pubkey: PublicKey,
|
pr_pubkey: PublicKey,
|
||||||
) -> Option<SafetyRoute> {
|
) -> Option<SafetyRoute> {
|
||||||
@ -298,7 +300,7 @@ impl RouteSpecStoreCache {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// When routes are dropped, they should be removed from the compiled route cache
|
/// When routes are dropped, they should be removed from the compiled route cache
|
||||||
fn invalidate_compiled_route_cache(&self, dead_key: &PublicKey) {
|
fn invalidate_compiled_route_cache(&mut self, dead_key: &PublicKey) {
|
||||||
let mut dead_entries = Vec::new();
|
let mut dead_entries = Vec::new();
|
||||||
for (k, _v) in self.compiled_route_cache.iter() {
|
for (k, _v) in self.compiled_route_cache.iter() {
|
||||||
if k.sr_pubkey == *dead_key || k.pr_pubkey == *dead_key {
|
if k.sr_pubkey == *dead_key || k.pr_pubkey == *dead_key {
|
||||||
@ -325,14 +327,14 @@ impl RouteSpecStoreCache {
|
|||||||
/// Resets statistics for when our node info changes
|
/// Resets statistics for when our node info changes
|
||||||
pub fn reset_remote_private_routes(&mut self) {
|
pub fn reset_remote_private_routes(&mut self) {
|
||||||
// Restart stats for routes so we test the route again
|
// Restart stats for routes so we test the route again
|
||||||
for (_k, v) in self.remote_private_route_set_cache {
|
for (_k, v) in self.remote_private_route_set_cache.iter_mut() {
|
||||||
v.get_stats_mut().reset();
|
v.get_stats_mut().reset();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Roll transfer statistics
|
/// Roll transfer statistics
|
||||||
pub fn roll_transfers(&mut self, last_ts: Timestamp, cur_ts: Timestamp) {
|
pub fn roll_transfers(&mut self, last_ts: Timestamp, cur_ts: Timestamp) {
|
||||||
for (_k, v) in self.remote_private_route_set_cache {
|
for (_k, v) in self.remote_private_route_set_cache.iter_mut() {
|
||||||
v.get_stats_mut().roll_transfers(last_ts, cur_ts);
|
v.get_stats_mut().roll_transfers(last_ts, cur_ts);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -11,6 +11,13 @@ pub struct RouteSpecStoreContent {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl RouteSpecStoreContent {
|
impl RouteSpecStoreContent {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
id_by_key: HashMap::new(),
|
||||||
|
details: HashMap::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn load(routing_table: RoutingTable) -> EyreResult<RouteSpecStoreContent> {
|
pub async fn load(routing_table: RoutingTable) -> EyreResult<RouteSpecStoreContent> {
|
||||||
// Deserialize what we can
|
// Deserialize what we can
|
||||||
let table_store = routing_table.network_manager().table_store();
|
let table_store = routing_table.network_manager().table_store();
|
||||||
@ -99,11 +106,11 @@ impl RouteSpecStoreContent {
|
|||||||
let rsstdb = table_store.open("RouteSpecStore", 1).await?;
|
let rsstdb = table_store.open("RouteSpecStore", 1).await?;
|
||||||
rsstdb.store_rkyv(0, b"content", self).await?;
|
rsstdb.store_rkyv(0, b"content", self).await?;
|
||||||
|
|
||||||
// // Keep secrets in protected store as well
|
// Keep secrets in protected store as well
|
||||||
let pstore = routing_table.network_manager().protected_store();
|
let pstore = routing_table.network_manager().protected_store();
|
||||||
|
|
||||||
let mut out: HashMap<PublicKey, SecretKey> = HashMap::new();
|
let mut out: HashMap<PublicKey, SecretKey> = HashMap::new();
|
||||||
for (rsid, rssd) in self.details.iter() {
|
for (_rsid, rssd) in self.details.iter() {
|
||||||
for (pk, rsd) in rssd.iter_route_set() {
|
for (pk, rsd) in rssd.iter_route_set() {
|
||||||
out.insert(*pk, rsd.secret_key);
|
out.insert(*pk, rsd.secret_key);
|
||||||
}
|
}
|
||||||
|
@ -287,7 +287,8 @@ impl RoutingDomainDetail for PublicInternetRoutingDomainDetail {
|
|||||||
// No common crypto kinds between these nodes, can't contact
|
// No common crypto kinds between these nodes, can't contact
|
||||||
return ContactMethod::Unreachable;
|
return ContactMethod::Unreachable;
|
||||||
};
|
};
|
||||||
let node_a_id = peer_a.node_ids.get(best_ck).unwrap();
|
|
||||||
|
//let node_a_id = peer_a.node_ids.get(best_ck).unwrap();
|
||||||
let node_b_id = peer_b.node_ids.get(best_ck).unwrap();
|
let node_b_id = peer_b.node_ids.get(best_ck).unwrap();
|
||||||
|
|
||||||
// Get the best match dial info for node B if we have it
|
// Get the best match dial info for node B if we have it
|
||||||
|
@ -298,8 +298,8 @@ impl RoutingTableInner {
|
|||||||
.with_dial_info_filter(dif)
|
.with_dial_info_filter(dif)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn bucket_depth(index: usize) -> usize {
|
fn bucket_depth(bucket_index: BucketIndex) -> usize {
|
||||||
match index {
|
match bucket_index.1 {
|
||||||
0 => 256,
|
0 => 256,
|
||||||
1 => 128,
|
1 => 128,
|
||||||
2 => 64,
|
2 => 64,
|
||||||
@ -312,13 +312,13 @@ impl RoutingTableInner {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn init_buckets(&mut self, routing_table: RoutingTable) {
|
pub fn init_buckets(&mut self) {
|
||||||
// Size the buckets (one per bit), one bucket set per crypto kind
|
// Size the buckets (one per bit), one bucket set per crypto kind
|
||||||
self.buckets.clear();
|
self.buckets.clear();
|
||||||
for ck in VALID_CRYPTO_KINDS {
|
for ck in VALID_CRYPTO_KINDS {
|
||||||
let ckbuckets = Vec::with_capacity(PUBLIC_KEY_LENGTH * 8);
|
let mut ckbuckets = Vec::with_capacity(PUBLIC_KEY_LENGTH * 8);
|
||||||
for _ in 0..PUBLIC_KEY_LENGTH * 8 {
|
for _ in 0..PUBLIC_KEY_LENGTH * 8 {
|
||||||
let bucket = Bucket::new(routing_table.clone(), ck);
|
let bucket = Bucket::new(ck);
|
||||||
ckbuckets.push(bucket);
|
ckbuckets.push(bucket);
|
||||||
}
|
}
|
||||||
self.buckets.insert(ck, ckbuckets);
|
self.buckets.insert(ck, ckbuckets);
|
||||||
@ -356,7 +356,7 @@ impl RoutingTableInner {
|
|||||||
self.bucket_entry_count()
|
self.bucket_entry_count()
|
||||||
);
|
);
|
||||||
for ck in VALID_CRYPTO_KINDS {
|
for ck in VALID_CRYPTO_KINDS {
|
||||||
for bucket in &mut self.buckets[&ck] {
|
for bucket in self.buckets.get_mut(&ck).unwrap().iter_mut() {
|
||||||
bucket.kick(0);
|
bucket.kick(0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -393,15 +393,15 @@ impl RoutingTableInner {
|
|||||||
|
|
||||||
/// Attempt to settle buckets and remove entries down to the desired number
|
/// Attempt to settle buckets and remove entries down to the desired number
|
||||||
/// which may not be possible due extant NodeRefs
|
/// which may not be possible due extant NodeRefs
|
||||||
pub fn kick_bucket(&mut self, kind: CryptoKind, idx: usize) {
|
pub fn kick_bucket(&mut self, bucket_index: BucketIndex) {
|
||||||
let bucket = &mut self.buckets[&kind][idx];
|
let bucket = self.get_bucket_mut(bucket_index);
|
||||||
let bucket_depth = Self::bucket_depth(idx);
|
let bucket_depth = Self::bucket_depth(bucket_index);
|
||||||
|
|
||||||
if let Some(dead_node_ids) = bucket.kick(bucket_depth) {
|
if let Some(_dead_node_ids) = bucket.kick(bucket_depth) {
|
||||||
// Remove expired entries
|
// Remove expired entries
|
||||||
self.all_entries.remove_expired();
|
self.all_entries.remove_expired();
|
||||||
|
|
||||||
log_rtab!(debug "Bucket {}:{} kicked Routing table now has {} nodes", kind, idx, self.bucket_entry_count());
|
log_rtab!(debug "Bucket {}:{} kicked Routing table now has {} nodes", bucket_index.0, bucket_index.1, self.bucket_entry_count());
|
||||||
|
|
||||||
// Now purge the routing table inner vectors
|
// Now purge the routing table inner vectors
|
||||||
//let filter = |k: &DHTKey| dead_node_ids.contains(k);
|
//let filter = |k: &DHTKey| dead_node_ids.contains(k);
|
||||||
@ -416,11 +416,11 @@ impl RoutingTableInner {
|
|||||||
pub fn refresh_cached_entry_counts(&mut self) -> EntryCounts {
|
pub fn refresh_cached_entry_counts(&mut self) -> EntryCounts {
|
||||||
self.live_entry_count.clear();
|
self.live_entry_count.clear();
|
||||||
let cur_ts = get_aligned_timestamp();
|
let cur_ts = get_aligned_timestamp();
|
||||||
self.with_entries(cur_ts, BucketEntryState::Unreliable, |rti, entry| {
|
self.with_entries_mut(cur_ts, BucketEntryState::Unreliable, |rti, entry| {
|
||||||
entry.with_inner(|e| {
|
entry.with_inner(|e| {
|
||||||
if let Some(rd) = e.best_routing_domain(rti, RoutingDomainSet::all()) {
|
if let Some(rd) = e.best_routing_domain(rti, RoutingDomainSet::all()) {
|
||||||
for crypto_kind in e.crypto_kinds() {
|
for crypto_kind in e.crypto_kinds() {
|
||||||
self.live_entry_count
|
rti.live_entry_count
|
||||||
.entry((rd, crypto_kind))
|
.entry((rd, crypto_kind))
|
||||||
.and_modify(|x| *x += 1)
|
.and_modify(|x| *x += 1)
|
||||||
.or_insert(1);
|
.or_insert(1);
|
||||||
@ -491,7 +491,7 @@ impl RoutingTableInner {
|
|||||||
min_state: BucketEntryState,
|
min_state: BucketEntryState,
|
||||||
mut f: F,
|
mut f: F,
|
||||||
) -> Option<T> {
|
) -> Option<T> {
|
||||||
for entry in self.all_entries {
|
for entry in &self.all_entries {
|
||||||
if entry.with_inner(|e| e.state(cur_ts) >= min_state) {
|
if entry.with_inner(|e| e.state(cur_ts) >= min_state) {
|
||||||
if let Some(out) = f(self, entry) {
|
if let Some(out) = f(self, entry) {
|
||||||
return Some(out);
|
return Some(out);
|
||||||
@ -509,14 +509,17 @@ impl RoutingTableInner {
|
|||||||
min_state: BucketEntryState,
|
min_state: BucketEntryState,
|
||||||
mut f: F,
|
mut f: F,
|
||||||
) -> Option<T> {
|
) -> Option<T> {
|
||||||
for entry in self.all_entries {
|
let mut entries = Vec::with_capacity(self.all_entries.len());
|
||||||
|
for entry in self.all_entries.iter() {
|
||||||
if entry.with_inner(|e| e.state(cur_ts) >= min_state) {
|
if entry.with_inner(|e| e.state(cur_ts) >= min_state) {
|
||||||
if let Some(out) = f(self, entry) {
|
entries.push(entry);
|
||||||
return Some(out);
|
}
|
||||||
}
|
}
|
||||||
|
for entry in entries {
|
||||||
|
if let Some(out) = f(self, entry) {
|
||||||
|
return Some(out);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -541,6 +544,7 @@ impl RoutingTableInner {
|
|||||||
// If we need a ping via the normal timing mechanism, then do it
|
// If we need a ping via the normal timing mechanism, then do it
|
||||||
// or if this node is our own relay, then we keep it alive
|
// or if this node is our own relay, then we keep it alive
|
||||||
let is_our_relay = opt_relay
|
let is_our_relay = opt_relay
|
||||||
|
.as_ref()
|
||||||
.map(|nr| nr.same_bucket_entry(&entry))
|
.map(|nr| nr.same_bucket_entry(&entry))
|
||||||
.unwrap_or(false);
|
.unwrap_or(false);
|
||||||
if e.needs_ping(cur_ts, is_our_relay) {
|
if e.needs_ping(cur_ts, is_our_relay) {
|
||||||
@ -574,8 +578,24 @@ impl RoutingTableInner {
|
|||||||
node_refs
|
node_refs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn get_bucket_mut(&mut self, bucket_index: BucketIndex) -> &mut Bucket {
|
||||||
|
self.buckets
|
||||||
|
.get_mut(&bucket_index.0)
|
||||||
|
.unwrap()
|
||||||
|
.get_mut(bucket_index.1)
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_bucket(&self, bucket_index: BucketIndex) -> &Bucket {
|
||||||
|
self.buckets
|
||||||
|
.get(&bucket_index.0)
|
||||||
|
.unwrap()
|
||||||
|
.get(bucket_index.1)
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
// Update buckets with new node ids we may have learned belong to this entry
|
// Update buckets with new node ids we may have learned belong to this entry
|
||||||
fn update_bucket_entries(&self, entry: Arc<BucketEntry>, node_ids: &[TypedKey]) {
|
fn update_bucket_entries(&mut self, entry: Arc<BucketEntry>, node_ids: &[TypedKey]) {
|
||||||
entry.with_mut_inner(|e| {
|
entry.with_mut_inner(|e| {
|
||||||
let existing_node_ids = e.node_ids();
|
let existing_node_ids = e.node_ids();
|
||||||
for node_id in node_ids {
|
for node_id in node_ids {
|
||||||
@ -583,19 +603,19 @@ impl RoutingTableInner {
|
|||||||
// Add new node id to entry
|
// Add new node id to entry
|
||||||
if let Some(old_node_id) = e.add_node_id(*node_id) {
|
if let Some(old_node_id) = e.add_node_id(*node_id) {
|
||||||
// Remove any old node id for this crypto kind
|
// Remove any old node id for this crypto kind
|
||||||
let (kind, idx) = self.unlocked_inner.calculate_bucket_index(&old_node_id);
|
let bucket_index = self.unlocked_inner.calculate_bucket_index(&old_node_id);
|
||||||
let bucket = &mut self.buckets[&kind][idx];
|
let bucket = self.get_bucket_mut(bucket_index);
|
||||||
bucket.remove_entry(&old_node_id.key);
|
bucket.remove_entry(&old_node_id.key);
|
||||||
self.unlocked_inner.kick_queue.lock().insert((kind, idx));
|
self.unlocked_inner.kick_queue.lock().insert(bucket_index);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Bucket the entry appropriately
|
// Bucket the entry appropriately
|
||||||
let (kind, idx) = self.unlocked_inner.calculate_bucket_index(node_id);
|
let bucket_index = self.unlocked_inner.calculate_bucket_index(node_id);
|
||||||
let bucket = &mut self.buckets[&kind][idx];
|
let bucket = self.get_bucket_mut(bucket_index);
|
||||||
bucket.add_existing_entry(node_id.key, entry.clone());
|
bucket.add_existing_entry(node_id.key, entry.clone());
|
||||||
|
|
||||||
// Kick bucket
|
// Kick bucket
|
||||||
self.unlocked_inner.kick_queue.lock().insert((kind, idx));
|
self.unlocked_inner.kick_queue.lock().insert(bucket_index);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -627,8 +647,8 @@ impl RoutingTableInner {
|
|||||||
log_rtab!(error "can't look up node id with invalid crypto kind");
|
log_rtab!(error "can't look up node id with invalid crypto kind");
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
let (kind, idx) = self.unlocked_inner.calculate_bucket_index(node_id);
|
let bucket_index = self.unlocked_inner.calculate_bucket_index(node_id);
|
||||||
let bucket = &self.buckets[&kind][idx];
|
let bucket = self.get_bucket(bucket_index);
|
||||||
if let Some(entry) = bucket.entry(&node_id.key) {
|
if let Some(entry) = bucket.entry(&node_id.key) {
|
||||||
// Best entry is the first one in sorted order that exists from the node id list
|
// Best entry is the first one in sorted order that exists from the node id list
|
||||||
// Everything else that matches will be overwritten in the bucket and the
|
// Everything else that matches will be overwritten in the bucket and the
|
||||||
@ -644,10 +664,10 @@ impl RoutingTableInner {
|
|||||||
// If the entry does exist already, update it
|
// If the entry does exist already, update it
|
||||||
if let Some(best_entry) = best_entry {
|
if let Some(best_entry) = best_entry {
|
||||||
// Update the entry with all of the node ids
|
// Update the entry with all of the node ids
|
||||||
self.update_bucket_entries(best_entry, node_ids);
|
self.update_bucket_entries(best_entry.clone(), node_ids);
|
||||||
|
|
||||||
// Make a noderef to return
|
// Make a noderef to return
|
||||||
let nr = NodeRef::new(outer_self.clone(), best_entry, None);
|
let nr = NodeRef::new(outer_self.clone(), best_entry.clone(), None);
|
||||||
|
|
||||||
// Update the entry with the update func
|
// Update the entry with the update func
|
||||||
best_entry.with_mut_inner(|e| update_func(self, e));
|
best_entry.with_mut_inner(|e| update_func(self, e));
|
||||||
@ -658,16 +678,16 @@ impl RoutingTableInner {
|
|||||||
|
|
||||||
// If no entry exists yet, add the first entry to a bucket, possibly evicting a bucket member
|
// If no entry exists yet, add the first entry to a bucket, possibly evicting a bucket member
|
||||||
let first_node_id = node_ids[0];
|
let first_node_id = node_ids[0];
|
||||||
let (kind, idx) = self.unlocked_inner.calculate_bucket_index(&first_node_id);
|
let bucket_entry = self.unlocked_inner.calculate_bucket_index(&first_node_id);
|
||||||
let bucket = &mut self.buckets[&kind][idx];
|
let bucket = self.get_bucket_mut(bucket_entry);
|
||||||
let new_entry = bucket.add_new_entry(first_node_id.key);
|
let new_entry = bucket.add_new_entry(first_node_id.key);
|
||||||
self.unlocked_inner.kick_queue.lock().insert((kind, idx));
|
self.unlocked_inner.kick_queue.lock().insert(bucket_entry);
|
||||||
|
|
||||||
// Update the other bucket entries with the remaining node ids
|
// Update the other bucket entries with the remaining node ids
|
||||||
self.update_bucket_entries(new_entry, node_ids);
|
self.update_bucket_entries(new_entry.clone(), node_ids);
|
||||||
|
|
||||||
// Make node ref to return
|
// Make node ref to return
|
||||||
let nr = NodeRef::new(outer_self.clone(), new_entry, None);
|
let nr = NodeRef::new(outer_self.clone(), new_entry.clone(), None);
|
||||||
|
|
||||||
// Update the entry with the update func
|
// Update the entry with the update func
|
||||||
new_entry.with_mut_inner(|e| update_func(self, e));
|
new_entry.with_mut_inner(|e| update_func(self, e));
|
||||||
@ -684,9 +704,9 @@ impl RoutingTableInner {
|
|||||||
outer_self: RoutingTable,
|
outer_self: RoutingTable,
|
||||||
node_id_key: PublicKey,
|
node_id_key: PublicKey,
|
||||||
) -> Option<NodeRef> {
|
) -> Option<NodeRef> {
|
||||||
VALID_CRYPTO_KINDS
|
VALID_CRYPTO_KINDS.iter().find_map(|ck| {
|
||||||
.iter()
|
self.lookup_node_ref(outer_self.clone(), TypedKey::new(*ck, node_id_key))
|
||||||
.find_map(|ck| self.lookup_node_ref(outer_self, TypedKey::new(*ck, node_id_key)))
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Resolve an existing routing table entry and return a reference to it
|
/// Resolve an existing routing table entry and return a reference to it
|
||||||
@ -700,8 +720,8 @@ impl RoutingTableInner {
|
|||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
let (kind, idx) = self.unlocked_inner.calculate_bucket_index(&node_id);
|
let bucket_index = self.unlocked_inner.calculate_bucket_index(&node_id);
|
||||||
let bucket = &self.buckets[&kind][idx];
|
let bucket = self.get_bucket(bucket_index);
|
||||||
bucket
|
bucket
|
||||||
.entry(&node_id.key)
|
.entry(&node_id.key)
|
||||||
.map(|e| NodeRef::new(outer_self, e, None))
|
.map(|e| NodeRef::new(outer_self, e, None))
|
||||||
@ -738,12 +758,9 @@ impl RoutingTableInner {
|
|||||||
log_rtab!(error "can't look up node id with invalid crypto kind");
|
log_rtab!(error "can't look up node id with invalid crypto kind");
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
let (kind, idx) = self.unlocked_inner.calculate_bucket_index(&node_id);
|
let bucket_entry = self.unlocked_inner.calculate_bucket_index(&node_id);
|
||||||
let bucket = &self.buckets[&kind][idx];
|
let bucket = self.get_bucket(bucket_entry);
|
||||||
if let Some(e) = bucket.entry(&node_id.key) {
|
bucket.entry(&node_id.key).map(f)
|
||||||
return Some(f(e));
|
|
||||||
}
|
|
||||||
None
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Shortcut function to add a node to our routing table if it doesn't exist
|
/// Shortcut function to add a node to our routing table if it doesn't exist
|
||||||
@ -826,7 +843,7 @@ impl RoutingTableInner {
|
|||||||
let mut dead_entry_count: usize = 0;
|
let mut dead_entry_count: usize = 0;
|
||||||
|
|
||||||
let cur_ts = get_aligned_timestamp();
|
let cur_ts = get_aligned_timestamp();
|
||||||
for entry in self.all_entries {
|
for entry in self.all_entries.iter() {
|
||||||
match entry.with_inner(|e| e.state(cur_ts)) {
|
match entry.with_inner(|e| e.state(cur_ts)) {
|
||||||
BucketEntryState::Reliable => {
|
BucketEntryState::Reliable => {
|
||||||
reliable_entry_count += 1;
|
reliable_entry_count += 1;
|
||||||
@ -877,20 +894,21 @@ impl RoutingTableInner {
|
|||||||
node_count: usize,
|
node_count: usize,
|
||||||
mut filters: VecDeque<RoutingTableEntryFilter>,
|
mut filters: VecDeque<RoutingTableEntryFilter>,
|
||||||
) -> Vec<NodeRef> {
|
) -> Vec<NodeRef> {
|
||||||
let public_node_filter = Box::new(|rti: &RoutingTableInner, v: Option<Arc<BucketEntry>>| {
|
let public_node_filter =
|
||||||
let entry = v.unwrap();
|
Box::new(|_rti: &RoutingTableInner, v: Option<Arc<BucketEntry>>| {
|
||||||
entry.with_inner(|e| {
|
let entry = v.unwrap();
|
||||||
// skip nodes on local network
|
entry.with_inner(|e| {
|
||||||
if e.node_info(RoutingDomain::LocalNetwork).is_some() {
|
// skip nodes on local network
|
||||||
return false;
|
if e.node_info(RoutingDomain::LocalNetwork).is_some() {
|
||||||
}
|
return false;
|
||||||
// skip nodes not on public internet
|
}
|
||||||
if e.node_info(RoutingDomain::PublicInternet).is_none() {
|
// skip nodes not on public internet
|
||||||
return false;
|
if e.node_info(RoutingDomain::PublicInternet).is_none() {
|
||||||
}
|
return false;
|
||||||
true
|
}
|
||||||
})
|
true
|
||||||
}) as RoutingTableEntryFilter;
|
})
|
||||||
|
}) as RoutingTableEntryFilter;
|
||||||
filters.push_front(public_node_filter);
|
filters.push_front(public_node_filter);
|
||||||
|
|
||||||
self.find_fastest_nodes(
|
self.find_fastest_nodes(
|
||||||
@ -1001,7 +1019,7 @@ impl RoutingTableInner {
|
|||||||
|
|
||||||
// Add filter to remove dead nodes always
|
// Add filter to remove dead nodes always
|
||||||
let filter_dead = Box::new(
|
let filter_dead = Box::new(
|
||||||
move |rti: &RoutingTableInner, v: Option<Arc<BucketEntry>>| {
|
move |_rti: &RoutingTableInner, v: Option<Arc<BucketEntry>>| {
|
||||||
if let Some(entry) = &v {
|
if let Some(entry) = &v {
|
||||||
// always filter out dead nodes
|
// always filter out dead nodes
|
||||||
if entry.with_inner(|e| e.state(cur_ts) == BucketEntryState::Dead) {
|
if entry.with_inner(|e| e.state(cur_ts) == BucketEntryState::Dead) {
|
||||||
@ -1018,7 +1036,7 @@ impl RoutingTableInner {
|
|||||||
filters.push_front(filter_dead);
|
filters.push_front(filter_dead);
|
||||||
|
|
||||||
// Fastest sort
|
// Fastest sort
|
||||||
let sort = |rti: &RoutingTableInner,
|
let sort = |_rti: &RoutingTableInner,
|
||||||
a_entry: &Option<Arc<BucketEntry>>,
|
a_entry: &Option<Arc<BucketEntry>>,
|
||||||
b_entry: &Option<Arc<BucketEntry>>| {
|
b_entry: &Option<Arc<BucketEntry>>| {
|
||||||
// same nodes are always the same
|
// same nodes are always the same
|
||||||
@ -1084,7 +1102,7 @@ impl RoutingTableInner {
|
|||||||
&self,
|
&self,
|
||||||
node_count: usize,
|
node_count: usize,
|
||||||
node_id: TypedKey,
|
node_id: TypedKey,
|
||||||
filters: VecDeque<RoutingTableEntryFilter>,
|
mut filters: VecDeque<RoutingTableEntryFilter>,
|
||||||
transform: T,
|
transform: T,
|
||||||
) -> Vec<O>
|
) -> Vec<O>
|
||||||
where
|
where
|
||||||
@ -1099,7 +1117,7 @@ impl RoutingTableInner {
|
|||||||
// Filter to ensure entries support the crypto kind in use
|
// Filter to ensure entries support the crypto kind in use
|
||||||
|
|
||||||
let filter = Box::new(
|
let filter = Box::new(
|
||||||
move |rti: &RoutingTableInner, opt_entry: Option<Arc<BucketEntry>>| {
|
move |_rti: &RoutingTableInner, opt_entry: Option<Arc<BucketEntry>>| {
|
||||||
if let Some(entry) = opt_entry {
|
if let Some(entry) = opt_entry {
|
||||||
entry.with_inner(|e| e.crypto_kinds().contains(&crypto_kind))
|
entry.with_inner(|e| e.crypto_kinds().contains(&crypto_kind))
|
||||||
} else {
|
} else {
|
||||||
@ -1111,7 +1129,7 @@ impl RoutingTableInner {
|
|||||||
|
|
||||||
// Closest sort
|
// Closest sort
|
||||||
// Distance is done using the node id's distance metric which may vary based on crypto system
|
// Distance is done using the node id's distance metric which may vary based on crypto system
|
||||||
let sort = |rti: &RoutingTableInner,
|
let sort = |_rti: &RoutingTableInner,
|
||||||
a_entry: &Option<Arc<BucketEntry>>,
|
a_entry: &Option<Arc<BucketEntry>>,
|
||||||
b_entry: &Option<Arc<BucketEntry>>| {
|
b_entry: &Option<Arc<BucketEntry>>| {
|
||||||
// same nodes are always the same
|
// same nodes are always the same
|
||||||
|
@ -12,7 +12,7 @@ pub struct BootstrapRecord {
|
|||||||
dial_info_details: Vec<DialInfoDetail>,
|
dial_info_details: Vec<DialInfoDetail>,
|
||||||
}
|
}
|
||||||
impl BootstrapRecord {
|
impl BootstrapRecord {
|
||||||
pub fn merge(&mut self, other: &BootstrapRecord) {
|
pub fn merge(&mut self, other: BootstrapRecord) {
|
||||||
self.node_ids.add_all(&other.node_ids);
|
self.node_ids.add_all(&other.node_ids);
|
||||||
for x in other.envelope_support {
|
for x in other.envelope_support {
|
||||||
if !self.envelope_support.contains(&x) {
|
if !self.envelope_support.contains(&x) {
|
||||||
@ -20,9 +20,9 @@ impl BootstrapRecord {
|
|||||||
self.envelope_support.sort();
|
self.envelope_support.sort();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for did in &other.dial_info_details {
|
for did in other.dial_info_details {
|
||||||
if !self.dial_info_details.contains(did) {
|
if !self.dial_info_details.contains(&did) {
|
||||||
self.dial_info_details.push(did.clone());
|
self.dial_info_details.push(did);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -48,7 +48,7 @@ impl RoutingTable {
|
|||||||
let mut envelope_support = Vec::new();
|
let mut envelope_support = Vec::new();
|
||||||
for ess in records[1].split(",") {
|
for ess in records[1].split(",") {
|
||||||
let ess = ess.trim();
|
let ess = ess.trim();
|
||||||
let es = match records[1].parse::<u8>() {
|
let es = match ess.parse::<u8>() {
|
||||||
Ok(v) => v,
|
Ok(v) => v,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
bail!(
|
bail!(
|
||||||
@ -63,7 +63,7 @@ impl RoutingTable {
|
|||||||
envelope_support.sort();
|
envelope_support.sort();
|
||||||
|
|
||||||
// Node Id
|
// Node Id
|
||||||
let node_ids = TypedKeySet::new();
|
let mut node_ids = TypedKeySet::new();
|
||||||
for node_id_str in records[2].split(",") {
|
for node_id_str in records[2].split(",") {
|
||||||
let node_id_str = node_id_str.trim();
|
let node_id_str = node_id_str.trim();
|
||||||
let node_id = match TypedKey::from_str(&node_id_str) {
|
let node_id = match TypedKey::from_str(&node_id_str) {
|
||||||
@ -76,6 +76,7 @@ impl RoutingTable {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
node_ids.add(node_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
// If this is our own node id, then we skip it for bootstrap, in case we are a bootstrap node
|
// If this is our own node id, then we skip it for bootstrap, in case we are a bootstrap node
|
||||||
@ -220,7 +221,7 @@ impl RoutingTable {
|
|||||||
if mbr.node_ids.contains_any(&bsrec.node_ids) {
|
if mbr.node_ids.contains_any(&bsrec.node_ids) {
|
||||||
// Merge record, pop this one out
|
// Merge record, pop this one out
|
||||||
let mbr = merged_bootstrap_records.remove(mbi);
|
let mbr = merged_bootstrap_records.remove(mbi);
|
||||||
bsrec.merge(&mbr);
|
bsrec.merge(mbr);
|
||||||
} else {
|
} else {
|
||||||
// No overlap, go to next record
|
// No overlap, go to next record
|
||||||
mbi += 1;
|
mbi += 1;
|
||||||
|
@ -10,13 +10,13 @@ impl RoutingTable {
|
|||||||
_last_ts: Timestamp,
|
_last_ts: Timestamp,
|
||||||
cur_ts: Timestamp,
|
cur_ts: Timestamp,
|
||||||
) -> EyreResult<()> {
|
) -> EyreResult<()> {
|
||||||
let kick_queue: Vec<(CryptoKind, usize)> =
|
let kick_queue: Vec<BucketIndex> =
|
||||||
core::mem::take(&mut *self.unlocked_inner.kick_queue.lock())
|
core::mem::take(&mut *self.unlocked_inner.kick_queue.lock())
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.collect();
|
.collect();
|
||||||
let mut inner = self.inner.write();
|
let mut inner = self.inner.write();
|
||||||
for (ck, idx) in kick_queue {
|
for bucket_index in kick_queue {
|
||||||
inner.kick_bucket(ck, idx)
|
inner.kick_bucket(bucket_index)
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -40,7 +40,7 @@ impl RoutingTable {
|
|||||||
|
|
||||||
let mut filters = VecDeque::new();
|
let mut filters = VecDeque::new();
|
||||||
let filter = Box::new(
|
let filter = Box::new(
|
||||||
move |rti: &RoutingTableInner, opt_entry: Option<Arc<BucketEntry>>| {
|
move |_rti: &RoutingTableInner, opt_entry: Option<Arc<BucketEntry>>| {
|
||||||
// Keep only the entries that contain the crypto kind we're looking for
|
// Keep only the entries that contain the crypto kind we're looking for
|
||||||
if let Some(entry) = opt_entry {
|
if let Some(entry) = opt_entry {
|
||||||
entry.with_inner(|e| e.crypto_kinds().contains(&crypto_kind))
|
entry.with_inner(|e| e.crypto_kinds().contains(&crypto_kind))
|
||||||
|
@ -36,8 +36,8 @@ impl RoutingTable {
|
|||||||
for nr in node_refs {
|
for nr in node_refs {
|
||||||
// If this is our relay, let's check for NAT keepalives
|
// If this is our relay, let's check for NAT keepalives
|
||||||
let mut did_pings = false;
|
let mut did_pings = false;
|
||||||
if let Some(relay_nr) = opt_relay_nr {
|
if let Some(relay_nr) = &opt_relay_nr {
|
||||||
if nr.same_entry(&relay_nr) {
|
if nr.same_entry(relay_nr) {
|
||||||
// Relay nodes get pinged over all protocols we have inbound dialinfo for
|
// Relay nodes get pinged over all protocols we have inbound dialinfo for
|
||||||
// This is so we can preserve the inbound NAT mappings at our router
|
// This is so we can preserve the inbound NAT mappings at our router
|
||||||
for did in &dids {
|
for did in &dids {
|
||||||
|
@ -15,7 +15,7 @@ pub fn encode_peer_info(
|
|||||||
for (i, nid) in peer_info.node_ids.iter().enumerate() {
|
for (i, nid) in peer_info.node_ids.iter().enumerate() {
|
||||||
encode_typed_key(
|
encode_typed_key(
|
||||||
nid,
|
nid,
|
||||||
&mut nids_builder.get(
|
&mut nids_builder.reborrow().get(
|
||||||
i.try_into()
|
i.try_into()
|
||||||
.map_err(RPCError::map_invalid_format("out of bound error"))?,
|
.map_err(RPCError::map_invalid_format("out of bound error"))?,
|
||||||
),
|
),
|
||||||
@ -39,7 +39,7 @@ pub fn decode_peer_info(
|
|||||||
.reborrow()
|
.reborrow()
|
||||||
.get_signed_node_info()
|
.get_signed_node_info()
|
||||||
.map_err(RPCError::protocol)?;
|
.map_err(RPCError::protocol)?;
|
||||||
let node_ids = TypedKeySet::with_capacity(nids_reader.len() as usize);
|
let mut node_ids = TypedKeySet::with_capacity(nids_reader.len() as usize);
|
||||||
for nid_reader in nids_reader.iter() {
|
for nid_reader in nids_reader.iter() {
|
||||||
node_ids.add(decode_typed_key(&nid_reader)?);
|
node_ids.add(decode_typed_key(&nid_reader)?);
|
||||||
}
|
}
|
||||||
|
@ -22,7 +22,7 @@ pub fn encode_signed_direct_node_info(
|
|||||||
for (i, typed_signature) in signed_direct_node_info.signatures.iter().enumerate() {
|
for (i, typed_signature) in signed_direct_node_info.signatures.iter().enumerate() {
|
||||||
encode_typed_signature(
|
encode_typed_signature(
|
||||||
typed_signature,
|
typed_signature,
|
||||||
&mut sigs_builder.get(
|
&mut sigs_builder.reborrow().get(
|
||||||
i.try_into()
|
i.try_into()
|
||||||
.map_err(RPCError::map_invalid_format("out of bound error"))?,
|
.map_err(RPCError::map_invalid_format("out of bound error"))?,
|
||||||
),
|
),
|
||||||
|
@ -18,7 +18,7 @@ pub fn encode_signed_relayed_node_info(
|
|||||||
for (i, typed_key) in signed_relayed_node_info.relay_ids.iter().enumerate() {
|
for (i, typed_key) in signed_relayed_node_info.relay_ids.iter().enumerate() {
|
||||||
encode_typed_key(
|
encode_typed_key(
|
||||||
typed_key,
|
typed_key,
|
||||||
&mut rids_builder.get(
|
&mut rids_builder.reborrow().get(
|
||||||
i.try_into()
|
i.try_into()
|
||||||
.map_err(RPCError::map_invalid_format("out of bound error"))?,
|
.map_err(RPCError::map_invalid_format("out of bound error"))?,
|
||||||
),
|
),
|
||||||
@ -42,7 +42,7 @@ pub fn encode_signed_relayed_node_info(
|
|||||||
for (i, typed_signature) in signed_relayed_node_info.signatures.iter().enumerate() {
|
for (i, typed_signature) in signed_relayed_node_info.signatures.iter().enumerate() {
|
||||||
encode_typed_signature(
|
encode_typed_signature(
|
||||||
typed_signature,
|
typed_signature,
|
||||||
&mut sigs_builder.get(
|
&mut sigs_builder.reborrow().get(
|
||||||
i.try_into()
|
i.try_into()
|
||||||
.map_err(RPCError::map_invalid_format("out of bound error"))?,
|
.map_err(RPCError::map_invalid_format("out of bound error"))?,
|
||||||
),
|
),
|
||||||
@ -81,7 +81,7 @@ pub fn decode_signed_relayed_node_info(
|
|||||||
.reborrow()
|
.reborrow()
|
||||||
.get_relay_info()
|
.get_relay_info()
|
||||||
.map_err(RPCError::protocol)?;
|
.map_err(RPCError::protocol)?;
|
||||||
let relay_info = decode_signed_direct_node_info(&ri_reader, crypto, &mut relay_ids)?;
|
let relay_info = decode_signed_direct_node_info(&ri_reader, crypto.clone(), &mut relay_ids)?;
|
||||||
|
|
||||||
// Ensure the relay info for the node has a superset of the crypto kinds of the node it is relaying
|
// Ensure the relay info for the node has a superset of the crypto kinds of the node it is relaying
|
||||||
if common_crypto_kinds(
|
if common_crypto_kinds(
|
||||||
|
@ -14,6 +14,6 @@ pub fn decode_typed_key(typed_key: &veilid_capnp::typed_key::Reader) -> Result<T
|
|||||||
|
|
||||||
pub fn encode_typed_key(typed_key: &TypedKey, builder: &mut veilid_capnp::typed_key::Builder) {
|
pub fn encode_typed_key(typed_key: &TypedKey, builder: &mut veilid_capnp::typed_key::Builder) {
|
||||||
builder.set_kind(u32::from_be_bytes(typed_key.kind.0));
|
builder.set_kind(u32::from_be_bytes(typed_key.kind.0));
|
||||||
let mut key_builder = builder.init_key();
|
let mut key_builder = builder.reborrow().init_key();
|
||||||
encode_key256(&typed_key.key, &mut key_builder);
|
encode_key256(&typed_key.key, &mut key_builder);
|
||||||
}
|
}
|
||||||
|
@ -19,6 +19,6 @@ pub fn encode_typed_signature(
|
|||||||
builder: &mut veilid_capnp::typed_signature::Builder,
|
builder: &mut veilid_capnp::typed_signature::Builder,
|
||||||
) {
|
) {
|
||||||
builder.set_kind(u32::from_be_bytes(typed_signature.kind.0));
|
builder.set_kind(u32::from_be_bytes(typed_signature.kind.0));
|
||||||
let mut sig_builder = builder.init_signature();
|
let mut sig_builder = builder.reborrow().init_signature();
|
||||||
encode_signature512(&typed_signature.signature, &mut sig_builder);
|
encode_signature512(&typed_signature.signature, &mut sig_builder);
|
||||||
}
|
}
|
||||||
|
@ -621,7 +621,7 @@ impl RPCProcessor {
|
|||||||
// Get the actual destination node id accounting for relays
|
// Get the actual destination node id accounting for relays
|
||||||
let (node_ref, destination_node_ref) = if let Destination::Relay {
|
let (node_ref, destination_node_ref) = if let Destination::Relay {
|
||||||
relay: _,
|
relay: _,
|
||||||
target: ref target,
|
ref target,
|
||||||
safety_selection: _,
|
safety_selection: _,
|
||||||
} = dest
|
} = dest
|
||||||
{
|
{
|
||||||
@ -1392,7 +1392,7 @@ impl RPCProcessor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "trace", skip(self, body), err)]
|
#[instrument(level = "trace", skip(self, body), err)]
|
||||||
pub fn enqueue_safety_routed_message(
|
fn enqueue_safety_routed_message(
|
||||||
&self,
|
&self,
|
||||||
direct: RPCMessageHeaderDetailDirect,
|
direct: RPCMessageHeaderDetailDirect,
|
||||||
remote_safety_route: PublicKey,
|
remote_safety_route: PublicKey,
|
||||||
@ -1423,7 +1423,7 @@ impl RPCProcessor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "trace", skip(self, body), err)]
|
#[instrument(level = "trace", skip(self, body), err)]
|
||||||
pub fn enqueue_private_routed_message(
|
fn enqueue_private_routed_message(
|
||||||
&self,
|
&self,
|
||||||
direct: RPCMessageHeaderDetailDirect,
|
direct: RPCMessageHeaderDetailDirect,
|
||||||
remote_safety_route: PublicKey,
|
remote_safety_route: PublicKey,
|
||||||
|
@ -4,7 +4,7 @@ use clap::{Arg, ArgMatches, Command};
|
|||||||
use std::ffi::OsStr;
|
use std::ffi::OsStr;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
use veilid_core::{SecretKey, TypedKey};
|
use veilid_core::{SecretKey, TypedKeySet};
|
||||||
|
|
||||||
fn do_clap_matches(default_config_path: &OsStr) -> Result<clap::ArgMatches, clap::Error> {
|
fn do_clap_matches(default_config_path: &OsStr) -> Result<clap::ArgMatches, clap::Error> {
|
||||||
let matches = Command::new("veilid-server")
|
let matches = Command::new("veilid-server")
|
||||||
@ -236,19 +236,14 @@ pub fn process_command_line() -> EyreResult<(Settings, ArgMatches)> {
|
|||||||
settingsrw.logging.terminal.enabled = false;
|
settingsrw.logging.terminal.enabled = false;
|
||||||
|
|
||||||
// Split or get secret
|
// Split or get secret
|
||||||
let (k, s) = if let Some((k, s)) = v.split_once(':') {
|
let tks =
|
||||||
let k =
|
TypedKeySet::from_str(v).wrap_err("failed to decode node id set from command line")?;
|
||||||
TypedKey::try_decode(k).wrap_err("failed to decode node id from command line")?;
|
|
||||||
let s = SecretKey::try_decode(s)?;
|
let buffer = rpassword::prompt_password("Enter secret key (will not echo): ")
|
||||||
(k, s)
|
.wrap_err("invalid secret key")?;
|
||||||
} else {
|
let buffer = buffer.trim().to_string();
|
||||||
let k = TypedKey::try_decode(v)?;
|
let s = SecretKey::try_decode(&buffer)?;
|
||||||
let buffer = rpassword::prompt_password("Enter secret key (will not echo): ")
|
|
||||||
.wrap_err("invalid secret key")?;
|
|
||||||
let buffer = buffer.trim().to_string();
|
|
||||||
let s = SecretKey::try_decode(&buffer)?;
|
|
||||||
(k, s)
|
|
||||||
};
|
|
||||||
settingsrw.core.network.node_id = Some(k);
|
settingsrw.core.network.node_id = Some(k);
|
||||||
settingsrw.core.network.node_id_secret = Some(s);
|
settingsrw.core.network.node_id_secret = Some(s);
|
||||||
}
|
}
|
||||||
|
@ -64,10 +64,10 @@ core:
|
|||||||
client_whitelist_timeout_ms: 300000
|
client_whitelist_timeout_ms: 300000
|
||||||
reverse_connection_receipt_time_ms: 5000
|
reverse_connection_receipt_time_ms: 5000
|
||||||
hole_punch_receipt_time_ms: 5000
|
hole_punch_receipt_time_ms: 5000
|
||||||
node_id: null
|
|
||||||
node_id_secret: null
|
|
||||||
bootstrap: ['bootstrap.dev.veilid.net']
|
|
||||||
routing_table:
|
routing_table:
|
||||||
|
node_id: null
|
||||||
|
node_id_secret: null
|
||||||
|
bootstrap: ['bootstrap.dev.veilid.net']
|
||||||
limit_over_attached: 64
|
limit_over_attached: 64
|
||||||
limit_fully_attached: 32
|
limit_fully_attached: 32
|
||||||
limit_attached_strong: 16
|
limit_attached_strong: 16
|
||||||
@ -516,6 +516,9 @@ pub struct Dht {
|
|||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize)]
|
#[derive(Debug, Deserialize, Serialize)]
|
||||||
pub struct RoutingTable {
|
pub struct RoutingTable {
|
||||||
|
pub node_id: Option<veilid_core::TypedKeySet>,
|
||||||
|
pub node_id_secret: Option<veilid_core::SecretKey>,
|
||||||
|
pub bootstrap: Vec<String>,
|
||||||
pub limit_over_attached: u32,
|
pub limit_over_attached: u32,
|
||||||
pub limit_fully_attached: u32,
|
pub limit_fully_attached: u32,
|
||||||
pub limit_attached_strong: u32,
|
pub limit_attached_strong: u32,
|
||||||
@ -534,9 +537,6 @@ pub struct Network {
|
|||||||
pub client_whitelist_timeout_ms: u32,
|
pub client_whitelist_timeout_ms: u32,
|
||||||
pub reverse_connection_receipt_time_ms: u32,
|
pub reverse_connection_receipt_time_ms: u32,
|
||||||
pub hole_punch_receipt_time_ms: u32,
|
pub hole_punch_receipt_time_ms: u32,
|
||||||
pub node_id: Option<veilid_core::TypedKey>,
|
|
||||||
pub node_id_secret: Option<veilid_core::SecretKey>,
|
|
||||||
pub bootstrap: Vec<String>,
|
|
||||||
pub routing_table: RoutingTable,
|
pub routing_table: RoutingTable,
|
||||||
pub rpc: Rpc,
|
pub rpc: Rpc,
|
||||||
pub dht: Dht,
|
pub dht: Dht,
|
||||||
@ -902,9 +902,9 @@ impl Settings {
|
|||||||
set_config_value!(inner.core.network.client_whitelist_timeout_ms, value);
|
set_config_value!(inner.core.network.client_whitelist_timeout_ms, value);
|
||||||
set_config_value!(inner.core.network.reverse_connection_receipt_time_ms, value);
|
set_config_value!(inner.core.network.reverse_connection_receipt_time_ms, value);
|
||||||
set_config_value!(inner.core.network.hole_punch_receipt_time_ms, value);
|
set_config_value!(inner.core.network.hole_punch_receipt_time_ms, value);
|
||||||
set_config_value!(inner.core.network.node_id, value);
|
set_config_value!(inner.core.network.routing_table.node_id, value);
|
||||||
set_config_value!(inner.core.network.node_id_secret, value);
|
set_config_value!(inner.core.network.routing_table.node_id_secret, value);
|
||||||
set_config_value!(inner.core.network.bootstrap, value);
|
set_config_value!(inner.core.network.routing_table.bootstrap, value);
|
||||||
set_config_value!(inner.core.network.routing_table.limit_over_attached, value);
|
set_config_value!(inner.core.network.routing_table.limit_over_attached, value);
|
||||||
set_config_value!(inner.core.network.routing_table.limit_fully_attached, value);
|
set_config_value!(inner.core.network.routing_table.limit_fully_attached, value);
|
||||||
set_config_value!(
|
set_config_value!(
|
||||||
@ -1056,9 +1056,15 @@ impl Settings {
|
|||||||
"network.hole_punch_receipt_time_ms" => {
|
"network.hole_punch_receipt_time_ms" => {
|
||||||
Ok(Box::new(inner.core.network.hole_punch_receipt_time_ms))
|
Ok(Box::new(inner.core.network.hole_punch_receipt_time_ms))
|
||||||
}
|
}
|
||||||
"network.node_id" => Ok(Box::new(inner.core.network.node_id)),
|
"network.routing_table.node_id" => {
|
||||||
"network.node_id_secret" => Ok(Box::new(inner.core.network.node_id_secret)),
|
Ok(Box::new(inner.core.network.routing_table.node_id))
|
||||||
"network.bootstrap" => Ok(Box::new(inner.core.network.bootstrap.clone())),
|
}
|
||||||
|
"network.routing_table.node_id_secret" => {
|
||||||
|
Ok(Box::new(inner.core.network.routing_table.node_id_secret))
|
||||||
|
}
|
||||||
|
"network.routing_table.bootstrap" => {
|
||||||
|
Ok(Box::new(inner.core.network.routing_table.bootstrap.clone()))
|
||||||
|
}
|
||||||
"network.routing_table.limit_over_attached" => Ok(Box::new(
|
"network.routing_table.limit_over_attached" => Ok(Box::new(
|
||||||
inner.core.network.routing_table.limit_over_attached,
|
inner.core.network.routing_table.limit_over_attached,
|
||||||
)),
|
)),
|
||||||
@ -1415,11 +1421,11 @@ mod tests {
|
|||||||
assert_eq!(s.core.network.client_whitelist_timeout_ms, 300_000u32);
|
assert_eq!(s.core.network.client_whitelist_timeout_ms, 300_000u32);
|
||||||
assert_eq!(s.core.network.reverse_connection_receipt_time_ms, 5_000u32);
|
assert_eq!(s.core.network.reverse_connection_receipt_time_ms, 5_000u32);
|
||||||
assert_eq!(s.core.network.hole_punch_receipt_time_ms, 5_000u32);
|
assert_eq!(s.core.network.hole_punch_receipt_time_ms, 5_000u32);
|
||||||
assert_eq!(s.core.network.node_id, None);
|
assert_eq!(s.core.network.routing_table.node_id, None);
|
||||||
assert_eq!(s.core.network.node_id_secret, None);
|
assert_eq!(s.core.network.routing_table.node_id_secret, None);
|
||||||
//
|
//
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
s.core.network.bootstrap,
|
s.core.network.routing_table.bootstrap,
|
||||||
vec!["bootstrap.dev.veilid.net".to_owned()]
|
vec!["bootstrap.dev.veilid.net".to_owned()]
|
||||||
);
|
);
|
||||||
//
|
//
|
||||||
|
Loading…
Reference in New Issue
Block a user