mirror of
https://gitlab.com/veilid/veilid.git
synced 2024-10-01 01:26:08 -04:00
more refactor
This commit is contained in:
parent
1f62d3836c
commit
064e6c018c
@ -519,6 +519,8 @@ impl NetworkManager {
|
|||||||
let routing_table = self.routing_table();
|
let routing_table = self.routing_table();
|
||||||
|
|
||||||
// Generate receipt and serialized form to return
|
// Generate receipt and serialized form to return
|
||||||
|
xxx add 'preferred_kind' and propagate envelope changes so we can make recent_peers work with cryptokind
|
||||||
|
|
||||||
let nonce = Crypto::get_random_nonce();
|
let nonce = Crypto::get_random_nonce();
|
||||||
let receipt = Receipt::try_new(0, nonce, routing_table.node_id(), extra_data)?;
|
let receipt = Receipt::try_new(0, nonce, routing_table.node_id(), extra_data)?;
|
||||||
let out = receipt
|
let out = receipt
|
||||||
@ -649,7 +651,7 @@ impl NetworkManager {
|
|||||||
let rpc = self.rpc_processor();
|
let rpc = self.rpc_processor();
|
||||||
|
|
||||||
// Add the peer info to our routing table
|
// Add the peer info to our routing table
|
||||||
let peer_nr = match routing_table.register_node_with_signed_node_info(
|
let peer_nr = match routing_table.register_node_with_peer_info(
|
||||||
RoutingDomain::PublicInternet,
|
RoutingDomain::PublicInternet,
|
||||||
peer_info.node_id.key,
|
peer_info.node_id.key,
|
||||||
peer_info.signed_node_info,
|
peer_info.signed_node_info,
|
||||||
@ -673,7 +675,7 @@ impl NetworkManager {
|
|||||||
let rpc = self.rpc_processor();
|
let rpc = self.rpc_processor();
|
||||||
|
|
||||||
// Add the peer info to our routing table
|
// Add the peer info to our routing table
|
||||||
let mut peer_nr = match routing_table.register_node_with_signed_node_info(
|
let mut peer_nr = match routing_table.register_node_with_peer_info(
|
||||||
RoutingDomain::PublicInternet,
|
RoutingDomain::PublicInternet,
|
||||||
peer_info.node_id.key,
|
peer_info.node_id.key,
|
||||||
peer_info.signed_node_info,
|
peer_info.signed_node_info,
|
||||||
@ -1326,7 +1328,7 @@ impl NetworkManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Decode envelope header (may fail signature validation)
|
// Decode envelope header (may fail signature validation)
|
||||||
let envelope = match Envelope::from_signed_data(data) {
|
let envelope = match Envelope::from_signed_data(self.crypto(), data) {
|
||||||
Ok(v) => v,
|
Ok(v) => v,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
log_net!(debug "envelope failed to decode: {}", e);
|
log_net!(debug "envelope failed to decode: {}", e);
|
||||||
@ -1370,8 +1372,8 @@ impl NetworkManager {
|
|||||||
|
|
||||||
// Peek at header and see if we need to relay this
|
// Peek at header and see if we need to relay this
|
||||||
// If the recipient id is not our node id, then it needs relaying
|
// If the recipient id is not our node id, then it needs relaying
|
||||||
let sender_id = envelope.get_sender_id();
|
let sender_id = TypedKey::new(envelope.get_crypto_kind(), envelope.get_sender_id());
|
||||||
let recipient_id = envelope.get_recipient_id();
|
let recipient_id = TypedKey::new(envelope.get_crypto_kind(), envelope.get_recipient_id());
|
||||||
if recipient_id != routing_table.node_id() {
|
if recipient_id != routing_table.node_id() {
|
||||||
// See if the source node is allowed to resolve nodes
|
// See if the source node is allowed to resolve nodes
|
||||||
// This is a costly operation, so only outbound-relay permitted
|
// This is a costly operation, so only outbound-relay permitted
|
||||||
@ -1537,7 +1539,7 @@ impl NetworkManager {
|
|||||||
if let Some(nr) = routing_table.lookup_node_ref(k) {
|
if let Some(nr) = routing_table.lookup_node_ref(k) {
|
||||||
let peer_stats = nr.peer_stats();
|
let peer_stats = nr.peer_stats();
|
||||||
let peer = PeerTableData {
|
let peer = PeerTableData {
|
||||||
node_id: k,
|
node_ids: k,
|
||||||
peer_address: v.last_connection.remote(),
|
peer_address: v.last_connection.remote(),
|
||||||
peer_stats,
|
peer_stats,
|
||||||
};
|
};
|
||||||
|
@ -12,15 +12,15 @@ pub(super) type EntriesIter<'a> =
|
|||||||
|
|
||||||
#[derive(Debug, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
|
#[derive(Debug, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
struct BucketEntryData {
|
struct SerializedBucketEntryData {
|
||||||
key: PublicKey,
|
key: PublicKey,
|
||||||
value: Vec<u8>,
|
value: u32, // index into serialized entries list
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
|
#[derive(Debug, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
struct BucketData {
|
struct SerializedBucketData {
|
||||||
entries: Vec<BucketEntryData>,
|
entries: Vec<SerializedBucketEntryData>,
|
||||||
newest_entry: Option<PublicKey>,
|
newest_entry: Option<PublicKey>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -41,29 +41,41 @@ impl Bucket {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) fn load_bucket(&mut self, data: Vec<u8>) -> EyreResult<()> {
|
pub(super) fn load_bucket(
|
||||||
let bucket_data: BucketData = from_rkyv(data)?;
|
&mut self,
|
||||||
|
data: Vec<u8>,
|
||||||
|
all_entries: &[Arc<BucketEntry>],
|
||||||
|
) -> EyreResult<()> {
|
||||||
|
let bucket_data: SerializedBucketData = from_rkyv(data)?;
|
||||||
|
|
||||||
for e in bucket_data.entries {
|
for e in bucket_data.entries {
|
||||||
let entryinner = from_rkyv(e.value).wrap_err("failed to deserialize bucket entry")?;
|
|
||||||
self.entries
|
self.entries
|
||||||
.insert(e.key, Arc::new(BucketEntry::new_with_inner(entryinner)));
|
.insert(e.key, all_entries[e.value as usize].clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
self.newest_entry = bucket_data.newest_entry;
|
self.newest_entry = bucket_data.newest_entry;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
pub(super) fn save_bucket(&self) -> EyreResult<Vec<u8>> {
|
|
||||||
|
pub(super) fn save_bucket(
|
||||||
|
&self,
|
||||||
|
all_entries: &mut Vec<Arc<BucketEntry>>,
|
||||||
|
entry_map: &mut HashMap<*const BucketEntry, u32>,
|
||||||
|
) -> EyreResult<Vec<u8>> {
|
||||||
let mut entries = Vec::new();
|
let mut entries = Vec::new();
|
||||||
for (k, v) in &self.entries {
|
for (k, v) in &self.entries {
|
||||||
let entry_bytes = v.with_inner(|e| to_rkyv(e))?;
|
let entry_index = entry_map.entry(Arc::as_ptr(v)).or_insert_with(|| {
|
||||||
entries.push(BucketEntryData {
|
let entry_index = all_entries.len();
|
||||||
|
all_entries.push(v.clone());
|
||||||
|
entry_index as u32
|
||||||
|
});
|
||||||
|
entries.push(SerializedBucketEntryData {
|
||||||
key: *k,
|
key: *k,
|
||||||
value: entry_bytes,
|
value: *entry_index,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
let bucket_data = BucketData {
|
let bucket_data = SerializedBucketData {
|
||||||
entries,
|
entries,
|
||||||
newest_entry: self.newest_entry.clone(),
|
newest_entry: self.newest_entry.clone(),
|
||||||
};
|
};
|
||||||
|
@ -82,9 +82,10 @@ pub struct VersionRange {
|
|||||||
#[derive(Debug, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
|
#[derive(Debug, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
pub struct BucketEntryInner {
|
pub struct BucketEntryInner {
|
||||||
/// The minimum and maximum range of cryptography versions supported by the node,
|
/// The node ids matching this bucket entry, with the cryptography versions supported by this node as the 'kind' field
|
||||||
/// inclusive of the requirements of any relay the node may be using
|
node_ids: Vec<TypedKey>,
|
||||||
min_max_version: Option<VersionRange>,
|
/// The set of envelope versions supported by the node inclusive of the requirements of any relay the node may be using
|
||||||
|
envelope_support: Vec<u8>,
|
||||||
/// If this node has updated it's SignedNodeInfo since our network
|
/// If this node has updated it's SignedNodeInfo since our network
|
||||||
/// and dial info has last changed, for example when our IP address changes
|
/// and dial info has last changed, for example when our IP address changes
|
||||||
/// Used to determine if we should make this entry 'live' again when we receive a signednodeinfo update that
|
/// Used to determine if we should make this entry 'live' again when we receive a signednodeinfo update that
|
||||||
@ -131,6 +132,11 @@ impl BucketEntryInner {
|
|||||||
self.node_ref_tracks.remove(&track_id);
|
self.node_ref_tracks.remove(&track_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Node ids
|
||||||
|
pub fn node_ids(&self) -> Vec<TypedKey> {
|
||||||
|
self.node_ids.clone()
|
||||||
|
}
|
||||||
|
|
||||||
// Less is faster
|
// Less is faster
|
||||||
pub fn cmp_fastest(e1: &Self, e2: &Self) -> std::cmp::Ordering {
|
pub fn cmp_fastest(e1: &Self, e2: &Self) -> std::cmp::Ordering {
|
||||||
// Lower latency to the front
|
// Lower latency to the front
|
||||||
@ -310,13 +316,13 @@ impl BucketEntryInner {
|
|||||||
opt_current_sni.as_ref().map(|s| s.as_ref())
|
opt_current_sni.as_ref().map(|s| s.as_ref())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn make_peer_info(&self, key: PublicKey, routing_domain: RoutingDomain) -> Option<PeerInfo> {
|
pub fn make_peer_info(&self, routing_domain: RoutingDomain) -> Option<PeerInfo> {
|
||||||
let opt_current_sni = match routing_domain {
|
let opt_current_sni = match routing_domain {
|
||||||
RoutingDomain::LocalNetwork => &self.local_network.signed_node_info,
|
RoutingDomain::LocalNetwork => &self.local_network.signed_node_info,
|
||||||
RoutingDomain::PublicInternet => &self.public_internet.signed_node_info,
|
RoutingDomain::PublicInternet => &self.public_internet.signed_node_info,
|
||||||
};
|
};
|
||||||
opt_current_sni.as_ref().map(|s| PeerInfo {
|
opt_current_sni.as_ref().map(|s| PeerInfo {
|
||||||
node_id: NodeId::new(key),
|
node_ids: self.node_ids.clone(),
|
||||||
signed_node_info: *s.clone(),
|
signed_node_info: *s.clone(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -73,7 +73,7 @@ pub(super) struct RoutingTableUnlockedInner {
|
|||||||
/// The current node's public DHT keys and secrets
|
/// The current node's public DHT keys and secrets
|
||||||
node_id_keypairs: BTreeMap<CryptoKind, KeyPair>,
|
node_id_keypairs: BTreeMap<CryptoKind, KeyPair>,
|
||||||
/// Buckets to kick on our next kick task
|
/// Buckets to kick on our next kick task
|
||||||
kick_queue: Mutex<BTreeSet<usize>>,
|
kick_queue: Mutex<BTreeSet<(CryptoKind, usize)>>,
|
||||||
/// Background process for computing statistics
|
/// Background process for computing statistics
|
||||||
rolling_transfers_task: TickTask<EyreReport>,
|
rolling_transfers_task: TickTask<EyreReport>,
|
||||||
/// Background process to purge dead routing table entries when necessary
|
/// Background process to purge dead routing table entries when necessary
|
||||||
@ -90,6 +90,56 @@ pub(super) struct RoutingTableUnlockedInner {
|
|||||||
private_route_management_task: TickTask<EyreReport>,
|
private_route_management_task: TickTask<EyreReport>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl RoutingTableUnlockedInner {
|
||||||
|
pub fn network_manager(&self) -> NetworkManager {
|
||||||
|
self.network_manager.clone()
|
||||||
|
}
|
||||||
|
pub fn crypto(&self) -> Crypto {
|
||||||
|
self.network_manager().crypto()
|
||||||
|
}
|
||||||
|
pub fn rpc_processor(&self) -> RPCProcessor {
|
||||||
|
self.network_manager().rpc_processor()
|
||||||
|
}
|
||||||
|
pub fn update_callback(&self) -> UpdateCallback {
|
||||||
|
self.network_manager().update_callback()
|
||||||
|
}
|
||||||
|
pub fn with_config<F, R>(&self, f: F) -> R
|
||||||
|
where
|
||||||
|
F: FnOnce(&VeilidConfigInner) -> R,
|
||||||
|
{
|
||||||
|
f(&*self.config.get())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn node_id(&self, kind: CryptoKind) -> PublicKey {
|
||||||
|
self.node_id_keypairs.get(&kind).unwrap().key
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn node_id_secret(&self, kind: CryptoKind) -> SecretKey {
|
||||||
|
self.node_id_keypairs.get(&kind).unwrap().secret
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn matches_own_node_id(&self, node_ids: &[TypedKey]) -> bool {
|
||||||
|
for ni in node_ids {
|
||||||
|
if let Some(v) = self.node_id_keypairs.get(&ni.kind) {
|
||||||
|
if v.key == ni.key {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
false
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn find_bucket_index(&self, node_id: TypedKey) -> Option<(CryptoKind, usize)> {
|
||||||
|
let crypto = self.crypto();
|
||||||
|
let self_node_id = self.node_id_keypairs.get(&node_id.kind)?.key;
|
||||||
|
let vcrypto = crypto.get(node_id.kind)?;
|
||||||
|
vcrypto
|
||||||
|
.distance(&node_id.key, &self_node_id)
|
||||||
|
.first_nonzero_bit()
|
||||||
|
.map(|x| (node_id.kind, x))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct RoutingTable {
|
pub struct RoutingTable {
|
||||||
inner: Arc<RwLock<RoutingTableInner>>,
|
inner: Arc<RwLock<RoutingTableInner>>,
|
||||||
@ -142,37 +192,6 @@ impl RoutingTable {
|
|||||||
this
|
this
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn network_manager(&self) -> NetworkManager {
|
|
||||||
self.unlocked_inner.network_manager.clone()
|
|
||||||
}
|
|
||||||
pub fn crypto(&self) -> Crypto {
|
|
||||||
self.network_manager().crypto()
|
|
||||||
}
|
|
||||||
pub fn rpc_processor(&self) -> RPCProcessor {
|
|
||||||
self.network_manager().rpc_processor()
|
|
||||||
}
|
|
||||||
pub fn update_callback(&self) -> UpdateCallback {
|
|
||||||
self.network_manager().update_callback()
|
|
||||||
}
|
|
||||||
pub fn with_config<F, R>(&self, f: F) -> R
|
|
||||||
where
|
|
||||||
F: FnOnce(&VeilidConfigInner) -> R,
|
|
||||||
{
|
|
||||||
f(&*self.unlocked_inner.config.get())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn node_id(&self, kind: CryptoKind) -> PublicKey {
|
|
||||||
self.unlocked_inner.node_id_keypairs.get(&kind).unwrap().key
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn node_id_secret(&self, kind: CryptoKind) -> SecretKey {
|
|
||||||
self.unlocked_inner
|
|
||||||
.node_id_keypairs
|
|
||||||
.get(&kind)
|
|
||||||
.unwrap()
|
|
||||||
.secret
|
|
||||||
}
|
|
||||||
|
|
||||||
/////////////////////////////////////
|
/////////////////////////////////////
|
||||||
/// Initialization
|
/// Initialization
|
||||||
|
|
||||||
@ -245,56 +264,93 @@ impl RoutingTable {
|
|||||||
debug!("finished routing table terminate");
|
debug!("finished routing table terminate");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Serialize routing table to table store
|
||||||
async fn save_buckets(&self) -> EyreResult<()> {
|
async fn save_buckets(&self) -> EyreResult<()> {
|
||||||
// Serialize all entries
|
// Since entries are shared by multiple buckets per cryptokind
|
||||||
let mut bucketvec: Vec<Vec<u8>> = Vec::new();
|
// we need to get the list of all unique entries when serializing
|
||||||
|
let mut all_entries: Vec<Arc<BucketEntry>> = Vec::new();
|
||||||
|
|
||||||
|
// Serialize all buckets and get map of entries
|
||||||
|
let mut serialized_bucket_map: BTreeMap<CryptoKind, Vec<Vec<u8>>> = BTreeMap::new();
|
||||||
{
|
{
|
||||||
|
let mut entry_map: HashMap<*const BucketEntry, u32> = HashMap::new();
|
||||||
let inner = &*self.inner.read();
|
let inner = &*self.inner.read();
|
||||||
for bucket in &inner.buckets {
|
for ck in VALID_CRYPTO_KINDS {
|
||||||
bucketvec.push(bucket.save_bucket()?)
|
let buckets = inner.buckets.get(&ck).unwrap();
|
||||||
|
let mut serialized_buckets = Vec::new();
|
||||||
|
for bucket in buckets.iter() {
|
||||||
|
serialized_buckets.push(bucket.save_bucket(&mut all_entries, &mut entry_map)?)
|
||||||
|
}
|
||||||
|
serialized_bucket_map.insert(ck, serialized_buckets);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
let table_store = self.network_manager().table_store();
|
|
||||||
|
// Serialize all the entries
|
||||||
|
let mut all_entry_bytes = Vec::with_capacity(all_entries.len());
|
||||||
|
for entry in all_entries {
|
||||||
|
// Serialize entry
|
||||||
|
let entry_bytes = entry.with_inner(|e| to_rkyv(e))?;
|
||||||
|
all_entry_bytes.push(entry_bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
let table_store = self.unlocked_inner.network_manager().table_store();
|
||||||
let tdb = table_store.open("routing_table", 1).await?;
|
let tdb = table_store.open("routing_table", 1).await?;
|
||||||
let bucket_count = bucketvec.len();
|
|
||||||
let dbx = tdb.transact();
|
let dbx = tdb.transact();
|
||||||
if let Err(e) = dbx.store_rkyv(0, b"bucket_count", &bucket_count) {
|
if let Err(e) = dbx.store_rkyv(0, b"serialized_bucket_map", &serialized_bucket_map) {
|
||||||
dbx.rollback();
|
dbx.rollback();
|
||||||
return Err(e);
|
return Err(e);
|
||||||
}
|
}
|
||||||
|
if let Err(e) = dbx.store_rkyv(0, b"all_entry_bytes", &all_entry_bytes) {
|
||||||
for (n, b) in bucketvec.iter().enumerate() {
|
dbx.rollback();
|
||||||
dbx.store(0, format!("bucket_{}", n).as_bytes(), b)
|
return Err(e);
|
||||||
}
|
}
|
||||||
dbx.commit().await?;
|
dbx.commit().await?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Deserialize routing table from table store
|
||||||
async fn load_buckets(&self) -> EyreResult<()> {
|
async fn load_buckets(&self) -> EyreResult<()> {
|
||||||
// Deserialize all entries
|
// Deserialize bucket map and all entries from the table store
|
||||||
let tstore = self.network_manager().table_store();
|
let tstore = self.unlocked_inner.network_manager().table_store();
|
||||||
let tdb = tstore.open("routing_table", 1).await?;
|
let tdb = tstore.open("routing_table", 1).await?;
|
||||||
let Some(bucket_count): Option<usize> = tdb.load_rkyv(0, b"bucket_count")? else {
|
let Some(serialized_bucket_map): Option<BTreeMap<CryptoKind, Vec<Vec<u8>>>> = tdb.load_rkyv(0, b"serialized_bucket_map")? else {
|
||||||
log_rtab!(debug "no bucket count in saved routing table");
|
log_rtab!(debug "no bucket map in saved routing table");
|
||||||
return Ok(());
|
return Ok(());
|
||||||
};
|
};
|
||||||
let inner = &mut *self.inner.write();
|
let Some(all_entry_bytes): Option<Vec<Vec<u8>>> = tdb.load_rkyv(0, b"all_entry_bytes")? else {
|
||||||
if bucket_count != inner.buckets.len() {
|
log_rtab!(debug "no all_entry_bytes in saved routing table");
|
||||||
// Must have the same number of buckets
|
|
||||||
warn!("bucket count is different, not loading routing table");
|
|
||||||
return Ok(());
|
return Ok(());
|
||||||
|
};
|
||||||
|
|
||||||
|
// Reconstruct all entries
|
||||||
|
let mut all_entries: Vec<Arc<BucketEntry>> = Vec::with_capacity(all_entry_bytes.len());
|
||||||
|
for entry_bytes in all_entry_bytes {
|
||||||
|
let entryinner =
|
||||||
|
from_rkyv(entry_bytes).wrap_err("failed to deserialize bucket entry")?;
|
||||||
|
all_entries.push(Arc::new(BucketEntry::new_with_inner(entryinner)));
|
||||||
}
|
}
|
||||||
let mut bucketdata_vec: Vec<Vec<u8>> = Vec::new();
|
|
||||||
for n in 0..bucket_count {
|
// Validate serialized bucket map
|
||||||
let Some(bucketdata): Option<Vec<u8>> =
|
for (k, v) in &serialized_bucket_map {
|
||||||
tdb.load(0, format!("bucket_{}", n).as_bytes())? else {
|
if !VALID_CRYPTO_KINDS.contains(k) {
|
||||||
warn!("bucket data not loading, skipping loading routing table");
|
warn!("crypto kind is not valid, not loading routing table");
|
||||||
return Ok(());
|
return Ok(());
|
||||||
};
|
}
|
||||||
bucketdata_vec.push(bucketdata);
|
if v.len() != PUBLIC_KEY_LENGTH * 8 {
|
||||||
|
warn!("bucket count is different, not loading routing table");
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
for (n, bucketdata) in bucketdata_vec.into_iter().enumerate() {
|
|
||||||
inner.buckets[n].load_bucket(bucketdata)?;
|
// Recreate buckets
|
||||||
|
let inner = &mut *self.inner.write();
|
||||||
|
|
||||||
|
for (k, v) in serialized_bucket_map {
|
||||||
|
let buckets = inner.buckets.get_mut(&k).unwrap();
|
||||||
|
|
||||||
|
for n in 0..v.len() {
|
||||||
|
buckets[n].load_bucket(v[n].clone(), &all_entries)?;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -469,14 +525,6 @@ impl RoutingTable {
|
|||||||
self.inner.write().purge_last_connections();
|
self.inner.write().purge_last_connections();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn find_bucket_index(&self, node_id: TypedKey) -> usize {
|
|
||||||
let crypto = self.crypto().get(node_id.kind).unwrap();
|
|
||||||
|
|
||||||
.distance(&node_id, &self.unlocked_inner.node_id)
|
|
||||||
.first_nonzero_bit()
|
|
||||||
.unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_entry_count(
|
pub fn get_entry_count(
|
||||||
&self,
|
&self,
|
||||||
routing_domain_set: RoutingDomainSet,
|
routing_domain_set: RoutingDomainSet,
|
||||||
@ -502,23 +550,11 @@ impl RoutingTable {
|
|||||||
inner.get_all_nodes(self.clone(), cur_ts)
|
inner.get_all_nodes(self.clone(), cur_ts)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn queue_bucket_kick(&self, node_id: PublicKey) {
|
fn queue_bucket_kick(&self, node_id: TypedKey) {
|
||||||
let idx = self.find_bucket_index(node_id);
|
let idx = self.unlocked_inner.find_bucket_index(node_id).unwrap();
|
||||||
self.unlocked_inner.kick_queue.lock().insert(idx);
|
self.unlocked_inner.kick_queue.lock().insert(idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a node reference, possibly creating a bucket entry
|
|
||||||
/// the 'update_func' closure is called on the node, and, if created,
|
|
||||||
/// in a locked fashion as to ensure the bucket entry state is always valid
|
|
||||||
pub fn create_node_ref<F>(&self, node_id: PublicKey, update_func: F) -> Option<NodeRef>
|
|
||||||
where
|
|
||||||
F: FnOnce(&mut RoutingTableInner, &mut BucketEntryInner),
|
|
||||||
{
|
|
||||||
self.inner
|
|
||||||
.write()
|
|
||||||
.create_node_ref(self.clone(), node_id, update_func)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Resolve an existing routing table entry and return a reference to it
|
/// Resolve an existing routing table entry and return a reference to it
|
||||||
pub fn lookup_node_ref(&self, node_id: PublicKey) -> Option<NodeRef> {
|
pub fn lookup_node_ref(&self, node_id: PublicKey) -> Option<NodeRef> {
|
||||||
self.inner.read().lookup_node_ref(self.clone(), node_id)
|
self.inner.read().lookup_node_ref(self.clone(), node_id)
|
||||||
@ -542,18 +578,16 @@ impl RoutingTable {
|
|||||||
/// Shortcut function to add a node to our routing table if it doesn't exist
|
/// Shortcut function to add a node to our routing table if it doesn't exist
|
||||||
/// and add the dial info we have for it. Returns a noderef filtered to
|
/// and add the dial info we have for it. Returns a noderef filtered to
|
||||||
/// the routing domain in which this node was registered for convenience.
|
/// the routing domain in which this node was registered for convenience.
|
||||||
pub fn register_node_with_signed_node_info(
|
pub fn register_node_with_peer_info(
|
||||||
&self,
|
&self,
|
||||||
routing_domain: RoutingDomain,
|
routing_domain: RoutingDomain,
|
||||||
node_id: PublicKey,
|
peer_info: PeerInfo,
|
||||||
signed_node_info: SignedNodeInfo,
|
|
||||||
allow_invalid: bool,
|
allow_invalid: bool,
|
||||||
) -> Option<NodeRef> {
|
) -> Option<NodeRef> {
|
||||||
self.inner.write().register_node_with_signed_node_info(
|
self.inner.write().register_node_with_peer_info(
|
||||||
self.clone(),
|
self.clone(),
|
||||||
routing_domain,
|
routing_domain,
|
||||||
node_id,
|
peer_info,
|
||||||
signed_node_info,
|
|
||||||
allow_invalid,
|
allow_invalid,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -844,30 +878,13 @@ impl RoutingTable {
|
|||||||
|
|
||||||
#[instrument(level = "trace", skip(self), ret)]
|
#[instrument(level = "trace", skip(self), ret)]
|
||||||
pub fn register_find_node_answer(&self, peers: Vec<PeerInfo>) -> Vec<NodeRef> {
|
pub fn register_find_node_answer(&self, peers: Vec<PeerInfo>) -> Vec<NodeRef> {
|
||||||
let node_id = self.node_id();
|
|
||||||
|
|
||||||
// register nodes we'd found
|
// register nodes we'd found
|
||||||
let mut out = Vec::<NodeRef>::with_capacity(peers.len());
|
let mut out = Vec::<NodeRef>::with_capacity(peers.len());
|
||||||
for p in peers {
|
for p in peers {
|
||||||
// if our own node if is in the list then ignore it, as we don't add ourselves to our own routing table
|
|
||||||
if p.node_id.key == node_id {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// node can not be its own relay
|
|
||||||
if let Some(rid) = &p.signed_node_info.relay_id() {
|
|
||||||
if rid.key == p.node_id.key {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// register the node if it's new
|
// register the node if it's new
|
||||||
if let Some(nr) = self.register_node_with_signed_node_info(
|
if let Some(nr) =
|
||||||
RoutingDomain::PublicInternet,
|
self.register_node_with_peer_info(RoutingDomain::PublicInternet, p, false)
|
||||||
p.node_id.key,
|
{
|
||||||
p.signed_node_info.clone(),
|
|
||||||
false,
|
|
||||||
) {
|
|
||||||
out.push(nr);
|
out.push(nr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -6,7 +6,6 @@ use alloc::fmt;
|
|||||||
|
|
||||||
pub struct NodeRefBaseCommon {
|
pub struct NodeRefBaseCommon {
|
||||||
routing_table: RoutingTable,
|
routing_table: RoutingTable,
|
||||||
node_id: PublicKey,
|
|
||||||
entry: Arc<BucketEntry>,
|
entry: Arc<BucketEntry>,
|
||||||
filter: Option<NodeRefFilter>,
|
filter: Option<NodeRefFilter>,
|
||||||
sequencing: Sequencing,
|
sequencing: Sequencing,
|
||||||
@ -99,8 +98,8 @@ pub trait NodeRefBase: Sized {
|
|||||||
fn routing_table(&self) -> RoutingTable {
|
fn routing_table(&self) -> RoutingTable {
|
||||||
self.common().routing_table.clone()
|
self.common().routing_table.clone()
|
||||||
}
|
}
|
||||||
fn node_id(&self) -> PublicKey {
|
fn node_ids(&self) -> Vec<TypedKey> {
|
||||||
self.common().node_id
|
self.operate(|_rti, e| e.node_ids())
|
||||||
}
|
}
|
||||||
fn has_updated_since_last_network_change(&self) -> bool {
|
fn has_updated_since_last_network_change(&self) -> bool {
|
||||||
self.operate(|_rti, e| e.has_updated_since_last_network_change())
|
self.operate(|_rti, e| e.has_updated_since_last_network_change())
|
||||||
@ -128,7 +127,7 @@ pub trait NodeRefBase: Sized {
|
|||||||
|
|
||||||
// Per-RoutingDomain accessors
|
// Per-RoutingDomain accessors
|
||||||
fn make_peer_info(&self, routing_domain: RoutingDomain) -> Option<PeerInfo> {
|
fn make_peer_info(&self, routing_domain: RoutingDomain) -> Option<PeerInfo> {
|
||||||
self.operate(|_rti, e| e.make_peer_info(self.node_id(), routing_domain))
|
self.operate(|_rti, e| e.make_peer_info(routing_domain))
|
||||||
}
|
}
|
||||||
fn node_info(&self, routing_domain: RoutingDomain) -> Option<NodeInfo> {
|
fn node_info(&self, routing_domain: RoutingDomain) -> Option<NodeInfo> {
|
||||||
self.operate(|_rti, e| e.node_info(routing_domain).cloned())
|
self.operate(|_rti, e| e.node_info(routing_domain).cloned())
|
||||||
@ -180,19 +179,18 @@ pub trait NodeRefBase: Sized {
|
|||||||
self.operate_mut(|rti, e| {
|
self.operate_mut(|rti, e| {
|
||||||
e.signed_node_info(routing_domain)
|
e.signed_node_info(routing_domain)
|
||||||
.and_then(|n| n.relay_peer_info())
|
.and_then(|n| n.relay_peer_info())
|
||||||
.and_then(|t| {
|
.and_then(|rpi| {
|
||||||
// If relay is ourselves, then return None, because we can't relay through ourselves
|
// If relay is ourselves, then return None, because we can't relay through ourselves
|
||||||
// and to contact this node we should have had an existing inbound connection
|
// and to contact this node we should have had an existing inbound connection
|
||||||
if t.node_id.key == rti.unlocked_inner.node_id {
|
if rti.unlocked_inner.matches_own_node_id(&rpi.node_ids) {
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Register relay node and return noderef
|
// Register relay node and return noderef
|
||||||
rti.register_node_with_signed_node_info(
|
rti.register_node_with_peer_info(
|
||||||
self.routing_table(),
|
self.routing_table(),
|
||||||
routing_domain,
|
routing_domain,
|
||||||
t.node_id.key,
|
rpi,
|
||||||
t.signed_node_info,
|
|
||||||
false,
|
false,
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
@ -346,7 +344,6 @@ pub struct NodeRef {
|
|||||||
impl NodeRef {
|
impl NodeRef {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
routing_table: RoutingTable,
|
routing_table: RoutingTable,
|
||||||
node_id: PublicKey,
|
|
||||||
entry: Arc<BucketEntry>,
|
entry: Arc<BucketEntry>,
|
||||||
filter: Option<NodeRefFilter>,
|
filter: Option<NodeRefFilter>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
@ -355,7 +352,6 @@ impl NodeRef {
|
|||||||
Self {
|
Self {
|
||||||
common: NodeRefBaseCommon {
|
common: NodeRefBaseCommon {
|
||||||
routing_table,
|
routing_table,
|
||||||
node_id,
|
|
||||||
entry,
|
entry,
|
||||||
filter,
|
filter,
|
||||||
sequencing: Sequencing::NoPreference,
|
sequencing: Sequencing::NoPreference,
|
||||||
@ -415,7 +411,6 @@ impl Clone for NodeRef {
|
|||||||
Self {
|
Self {
|
||||||
common: NodeRefBaseCommon {
|
common: NodeRefBaseCommon {
|
||||||
routing_table: self.common.routing_table.clone(),
|
routing_table: self.common.routing_table.clone(),
|
||||||
node_id: self.common.node_id,
|
|
||||||
entry: self.common.entry.clone(),
|
entry: self.common.entry.clone(),
|
||||||
filter: self.common.filter.clone(),
|
filter: self.common.filter.clone(),
|
||||||
sequencing: self.common.sequencing,
|
sequencing: self.common.sequencing,
|
||||||
|
@ -32,13 +32,6 @@ pub struct CompiledRoute {
|
|||||||
pub first_hop: NodeRef,
|
pub first_hop: NodeRef,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
|
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
|
||||||
pub struct KeyPair {
|
|
||||||
key: PublicKey,
|
|
||||||
secret: SecretKey,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Default, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
|
#[derive(Clone, Debug, Default, RkyvArchive, RkyvSerialize, RkyvDeserialize)]
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
pub struct RouteStats {
|
pub struct RouteStats {
|
||||||
@ -1377,7 +1370,7 @@ impl RouteSpecStore {
|
|||||||
|
|
||||||
let opt_first_hop = match pr_first_hop_node {
|
let opt_first_hop = match pr_first_hop_node {
|
||||||
RouteNode::NodeId(id) => rti.lookup_node_ref(routing_table.clone(), id.key),
|
RouteNode::NodeId(id) => rti.lookup_node_ref(routing_table.clone(), id.key),
|
||||||
RouteNode::PeerInfo(pi) => rti.register_node_with_signed_node_info(
|
RouteNode::PeerInfo(pi) => rti.register_node_with_peer_info(
|
||||||
routing_table.clone(),
|
routing_table.clone(),
|
||||||
RoutingDomain::PublicInternet,
|
RoutingDomain::PublicInternet,
|
||||||
pi.node_id.key,
|
pi.node_id.key,
|
||||||
|
@ -13,8 +13,8 @@ pub struct RecentPeersEntry {
|
|||||||
pub struct RoutingTableInner {
|
pub struct RoutingTableInner {
|
||||||
/// Extra pointer to unlocked members to simplify access
|
/// Extra pointer to unlocked members to simplify access
|
||||||
pub(super) unlocked_inner: Arc<RoutingTableUnlockedInner>,
|
pub(super) unlocked_inner: Arc<RoutingTableUnlockedInner>,
|
||||||
/// Routing table buckets that hold entries
|
/// Routing table buckets that hold references to entries, per crypto kind
|
||||||
pub(super) buckets: Vec<Bucket>,
|
pub(super) buckets: BTreeMap<CryptoKind, Vec<Bucket>>,
|
||||||
/// A fast counter for the number of entries in the table, total
|
/// A fast counter for the number of entries in the table, total
|
||||||
pub(super) bucket_entry_count: usize,
|
pub(super) bucket_entry_count: usize,
|
||||||
/// The public internet routing domain
|
/// The public internet routing domain
|
||||||
@ -37,7 +37,7 @@ impl RoutingTableInner {
|
|||||||
pub(super) fn new(unlocked_inner: Arc<RoutingTableUnlockedInner>) -> RoutingTableInner {
|
pub(super) fn new(unlocked_inner: Arc<RoutingTableUnlockedInner>) -> RoutingTableInner {
|
||||||
RoutingTableInner {
|
RoutingTableInner {
|
||||||
unlocked_inner,
|
unlocked_inner,
|
||||||
buckets: Vec::new(),
|
buckets: BTreeMap::new(),
|
||||||
public_internet_routing_domain: PublicInternetRoutingDomainDetail::default(),
|
public_internet_routing_domain: PublicInternetRoutingDomainDetail::default(),
|
||||||
local_network_routing_domain: LocalNetworkRoutingDomainDetail::default(),
|
local_network_routing_domain: LocalNetworkRoutingDomainDetail::default(),
|
||||||
bucket_entry_count: 0,
|
bucket_entry_count: 0,
|
||||||
@ -49,28 +49,6 @@ impl RoutingTableInner {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn network_manager(&self) -> NetworkManager {
|
|
||||||
self.unlocked_inner.network_manager.clone()
|
|
||||||
}
|
|
||||||
pub fn crypto(&self) -> Crypto {
|
|
||||||
self.network_manager().crypto()
|
|
||||||
}
|
|
||||||
pub fn rpc_processor(&self) -> RPCProcessor {
|
|
||||||
self.network_manager().rpc_processor()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn node_id(&self, kind: CryptoKind) -> PublicKey {
|
|
||||||
self.unlocked_inner.node_id
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn node_id_secret(&self, kind: CryptoKind) -> SecretKey {
|
|
||||||
self.unlocked_inner.node_id_secret
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn config(&self) -> VeilidConfig {
|
|
||||||
self.unlocked_inner.config.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn transfer_stats_accounting(&mut self) -> &mut TransferStatsAccounting {
|
pub fn transfer_stats_accounting(&mut self) -> &mut TransferStatsAccounting {
|
||||||
&mut self.self_transfer_stats_accounting
|
&mut self.self_transfer_stats_accounting
|
||||||
}
|
}
|
||||||
@ -327,12 +305,15 @@ impl RoutingTableInner {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn init_buckets(&mut self, routing_table: RoutingTable) {
|
pub fn init_buckets(&mut self, routing_table: RoutingTable) {
|
||||||
// Size the buckets (one per bit)
|
// Size the buckets (one per bit), one bucket set per crypto kind
|
||||||
self.buckets.clear();
|
self.buckets.clear();
|
||||||
self.buckets.reserve(PUBLIC_KEY_LENGTH * 8);
|
for ck in VALID_CRYPTO_KINDS {
|
||||||
for _ in 0..PUBLIC_KEY_LENGTH * 8 {
|
let ckbuckets = Vec::with_capacity(PUBLIC_KEY_LENGTH * 8);
|
||||||
let bucket = Bucket::new(routing_table.clone());
|
for _ in 0..PUBLIC_KEY_LENGTH * 8 {
|
||||||
self.buckets.push(bucket);
|
let bucket = Bucket::new(routing_table.clone());
|
||||||
|
ckbuckets.push(bucket);
|
||||||
|
}
|
||||||
|
self.buckets.insert(ck, ckbuckets);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -415,12 +396,6 @@ impl RoutingTableInner {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn find_bucket_index(&self, node_id: PublicKey) -> usize {
|
|
||||||
distance(&node_id, &self.unlocked_inner.node_id)
|
|
||||||
.first_nonzero_bit()
|
|
||||||
.unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_entry_count(
|
pub fn get_entry_count(
|
||||||
&self,
|
&self,
|
||||||
routing_domain_set: RoutingDomainSet,
|
routing_domain_set: RoutingDomainSet,
|
||||||
@ -547,23 +522,25 @@ impl RoutingTableInner {
|
|||||||
/// Create a node reference, possibly creating a bucket entry
|
/// Create a node reference, possibly creating a bucket entry
|
||||||
/// the 'update_func' closure is called on the node, and, if created,
|
/// the 'update_func' closure is called on the node, and, if created,
|
||||||
/// in a locked fashion as to ensure the bucket entry state is always valid
|
/// in a locked fashion as to ensure the bucket entry state is always valid
|
||||||
pub fn create_node_ref<F>(
|
fn create_node_ref<F>(
|
||||||
&mut self,
|
&mut self,
|
||||||
outer_self: RoutingTable,
|
outer_self: RoutingTable,
|
||||||
node_id: PublicKey,
|
node_ids: &[TypedKey],
|
||||||
update_func: F,
|
update_func: F,
|
||||||
) -> Option<NodeRef>
|
) -> Option<NodeRef>
|
||||||
where
|
where
|
||||||
F: FnOnce(&mut RoutingTableInner, &mut BucketEntryInner),
|
F: FnOnce(&mut RoutingTableInner, &mut BucketEntryInner),
|
||||||
{
|
{
|
||||||
// Ensure someone isn't trying register this node itself
|
// Ensure someone isn't trying register this node itself
|
||||||
if node_id == self.node_id() {
|
if self.unlocked_inner.matches_own_node_id(node_ids) {
|
||||||
log_rtab!(debug "can't register own node");
|
log_rtab!(debug "can't register own node");
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Look up existing entry
|
// Look up existing entry
|
||||||
let idx = self.find_bucket_index(node_id);
|
let idx = node_ids
|
||||||
|
.iter()
|
||||||
|
.find_map(|x| self.unlocked_inner.find_bucket_index(x));
|
||||||
let noderef = {
|
let noderef = {
|
||||||
let bucket = &self.buckets[idx];
|
let bucket = &self.buckets[idx];
|
||||||
let entry = bucket.entry(&node_id);
|
let entry = bucket.entry(&node_id);
|
||||||
@ -608,7 +585,7 @@ impl RoutingTableInner {
|
|||||||
log_rtab!(error "can't look up own node id in routing table");
|
log_rtab!(error "can't look up own node id in routing table");
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
let idx = self.find_bucket_index(node_id);
|
let idx = self.unlocked_inner.find_bucket_index(node_id);
|
||||||
let bucket = &self.buckets[idx];
|
let bucket = &self.buckets[idx];
|
||||||
bucket
|
bucket
|
||||||
.entry(&node_id)
|
.entry(&node_id)
|
||||||
@ -642,7 +619,7 @@ impl RoutingTableInner {
|
|||||||
log_rtab!(error "can't look up own node id in routing table");
|
log_rtab!(error "can't look up own node id in routing table");
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
let idx = self.find_bucket_index(node_id);
|
let idx = self.unlocked_inner.find_bucket_index(node_id);
|
||||||
let bucket = &self.buckets[idx];
|
let bucket = &self.buckets[idx];
|
||||||
if let Some(e) = bucket.entry(&node_id) {
|
if let Some(e) = bucket.entry(&node_id) {
|
||||||
return Some(f(e));
|
return Some(f(e));
|
||||||
@ -653,41 +630,44 @@ impl RoutingTableInner {
|
|||||||
/// Shortcut function to add a node to our routing table if it doesn't exist
|
/// Shortcut function to add a node to our routing table if it doesn't exist
|
||||||
/// and add the dial info we have for it. Returns a noderef filtered to
|
/// and add the dial info we have for it. Returns a noderef filtered to
|
||||||
/// the routing domain in which this node was registered for convenience.
|
/// the routing domain in which this node was registered for convenience.
|
||||||
pub fn register_node_with_signed_node_info(
|
pub fn register_node_with_peer_info(
|
||||||
&mut self,
|
&mut self,
|
||||||
outer_self: RoutingTable,
|
outer_self: RoutingTable,
|
||||||
routing_domain: RoutingDomain,
|
routing_domain: RoutingDomain,
|
||||||
node_ids: Vec<TypedKey>,
|
peer_info: PeerInfo,
|
||||||
signed_node_info: SignedNodeInfo,
|
|
||||||
allow_invalid: bool,
|
allow_invalid: bool,
|
||||||
) -> Option<NodeRef> {
|
) -> Option<NodeRef> {
|
||||||
// validate signed node info is not something malicious
|
// if our own node if is in the list then ignore it, as we don't add ourselves to our own routing table
|
||||||
if node_id == self.node_id() {
|
if self.unlocked_inner.matches_own_node_id(&peer_info.node_ids) {
|
||||||
log_rtab!(debug "can't register own node id in routing table");
|
log_rtab!(debug "can't register own node id in routing table");
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
if let Some(relay_id) = signed_node_info.relay_id() {
|
|
||||||
if relay_id.key == node_id {
|
// node can not be its own relay
|
||||||
log_rtab!(debug "node can not be its own relay");
|
let rids = peer_info.signed_node_info.relay_ids();
|
||||||
return None;
|
if self.unlocked_inner.matches_own_node_id(&rids) {
|
||||||
}
|
log_rtab!(debug "node can not be its own relay");
|
||||||
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
if !allow_invalid {
|
if !allow_invalid {
|
||||||
// verify signature
|
// verify signature
|
||||||
if !signed_node_info.has_any_signature() {
|
if !peer_info.signed_node_info.has_any_signature() {
|
||||||
log_rtab!(debug "signed node info for {} has invalid signature", node_id);
|
log_rtab!(debug "signed node info for {:?} has invalid signature", &peer_info.node_ids);
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
// verify signed node info is valid in this routing domain
|
// verify signed node info is valid in this routing domain
|
||||||
if !self.signed_node_info_is_valid_in_routing_domain(routing_domain, &signed_node_info)
|
if !self.signed_node_info_is_valid_in_routing_domain(
|
||||||
{
|
routing_domain,
|
||||||
log_rtab!(debug "signed node info for {} not valid in the {:?} routing domain", node_id, routing_domain);
|
&peer_info.signed_node_info,
|
||||||
|
) {
|
||||||
|
log_rtab!(debug "signed node info for {:?} not valid in the {:?} routing domain", peer_info.node_ids, routing_domain);
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
self.create_node_ref(outer_self, node_id, |_rti, e| {
|
self.create_node_ref(outer_self, &peer_info.node_ids, |_rti, e| {
|
||||||
e.update_signed_node_info(routing_domain, signed_node_info);
|
e.update_signed_node_info(routing_domain, peer_info.signed_node_info);
|
||||||
})
|
})
|
||||||
.map(|mut nr| {
|
.map(|mut nr| {
|
||||||
nr.set_filter(Some(
|
nr.set_filter(Some(
|
||||||
|
@ -205,7 +205,7 @@ impl RoutingTable {
|
|||||||
for pi in peer_info {
|
for pi in peer_info {
|
||||||
let k = pi.node_id.key;
|
let k = pi.node_id.key;
|
||||||
// Register the node
|
// Register the node
|
||||||
if let Some(nr) = self.register_node_with_signed_node_info(
|
if let Some(nr) = self.register_node_with_peer_info(
|
||||||
RoutingDomain::PublicInternet,
|
RoutingDomain::PublicInternet,
|
||||||
k,
|
k,
|
||||||
pi.signed_node_info,
|
pi.signed_node_info,
|
||||||
@ -301,7 +301,7 @@ impl RoutingTable {
|
|||||||
log_rtab!("--- bootstrapping {} with {:?}", k.encode(), &v);
|
log_rtab!("--- bootstrapping {} with {:?}", k.encode(), &v);
|
||||||
|
|
||||||
// Make invalid signed node info (no signature)
|
// Make invalid signed node info (no signature)
|
||||||
if let Some(nr) = self.register_node_with_signed_node_info(
|
if let Some(nr) = self.register_node_with_peer_info(
|
||||||
RoutingDomain::PublicInternet,
|
RoutingDomain::PublicInternet,
|
||||||
k,
|
k,
|
||||||
SignedNodeInfo::Direct(SignedDirectNodeInfo::with_no_signature(NodeInfo {
|
SignedNodeInfo::Direct(SignedDirectNodeInfo::with_no_signature(NodeInfo {
|
||||||
|
@ -51,7 +51,7 @@ impl RoutingTable {
|
|||||||
// The outbound relay is the host of the PWA
|
// The outbound relay is the host of the PWA
|
||||||
if let Some(outbound_relay_peerinfo) = intf::get_outbound_relay_peer().await {
|
if let Some(outbound_relay_peerinfo) = intf::get_outbound_relay_peer().await {
|
||||||
// Register new outbound relay
|
// Register new outbound relay
|
||||||
if let Some(nr) = self.register_node_with_signed_node_info(
|
if let Some(nr) = self.register_node_with_peer_info(
|
||||||
RoutingDomain::PublicInternet,
|
RoutingDomain::PublicInternet,
|
||||||
outbound_relay_peerinfo.node_id.key,
|
outbound_relay_peerinfo.node_id.key,
|
||||||
outbound_relay_peerinfo.signed_node_info,
|
outbound_relay_peerinfo.signed_node_info,
|
||||||
|
@ -1218,7 +1218,7 @@ impl RPCProcessor {
|
|||||||
"sender signednodeinfo has invalid peer scope",
|
"sender signednodeinfo has invalid peer scope",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
opt_sender_nr = self.routing_table().register_node_with_signed_node_info(
|
opt_sender_nr = self.routing_table().register_node_with_peer_info(
|
||||||
routing_domain,
|
routing_domain,
|
||||||
sender_node_id,
|
sender_node_id,
|
||||||
sender_node_info.clone(),
|
sender_node_info.clone(),
|
||||||
|
@ -37,7 +37,7 @@ impl RPCProcessor {
|
|||||||
RouteNode::PeerInfo(pi) => {
|
RouteNode::PeerInfo(pi) => {
|
||||||
//
|
//
|
||||||
let Some(nr) = self.routing_table
|
let Some(nr) = self.routing_table
|
||||||
.register_node_with_signed_node_info(
|
.register_node_with_peer_info(
|
||||||
RoutingDomain::PublicInternet,
|
RoutingDomain::PublicInternet,
|
||||||
pi.node_id.key,
|
pi.node_id.key,
|
||||||
pi.signed_node_info,
|
pi.signed_node_info,
|
||||||
@ -98,7 +98,7 @@ impl RPCProcessor {
|
|||||||
RouteNode::PeerInfo(pi) => {
|
RouteNode::PeerInfo(pi) => {
|
||||||
//
|
//
|
||||||
self.routing_table
|
self.routing_table
|
||||||
.register_node_with_signed_node_info(
|
.register_node_with_peer_info(
|
||||||
RoutingDomain::PublicInternet,
|
RoutingDomain::PublicInternet,
|
||||||
pi.node_id.key,
|
pi.node_id.key,
|
||||||
pi.signed_node_info.clone(),
|
pi.signed_node_info.clone(),
|
||||||
|
@ -256,7 +256,7 @@ pub struct VeilidStateAttachment {
|
|||||||
)]
|
)]
|
||||||
#[archive_attr(repr(C), derive(CheckBytes))]
|
#[archive_attr(repr(C), derive(CheckBytes))]
|
||||||
pub struct PeerTableData {
|
pub struct PeerTableData {
|
||||||
pub node_id: TypedKey,
|
pub node_ids: Vec<TypedKey>,
|
||||||
pub peer_address: PeerAddress,
|
pub peer_address: PeerAddress,
|
||||||
pub peer_stats: PeerStats,
|
pub peer_stats: PeerStats,
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user