mirror of
https://gitlab.com/veilid/veilid.git
synced 2024-10-01 01:26:08 -04:00
fix loose node refs
This commit is contained in:
parent
7ca202440b
commit
ce36df5cad
@ -12,6 +12,7 @@ crate-type = ["cdylib", "staticlib", "rlib"]
|
||||
[features]
|
||||
android_tests = []
|
||||
ios_tests = [ "simplelog", "backtrace" ]
|
||||
tracking = [ "backtrace" ]
|
||||
|
||||
[dependencies]
|
||||
capnp = { version = "^0", default_features = false }
|
||||
@ -36,6 +37,7 @@ once_cell = "^1"
|
||||
json = "^0"
|
||||
flume = { version = "^0", features = ["async"] }
|
||||
enumset = { version= "^1", features = ["serde"] }
|
||||
backtrace = { version = "^0", optional = true }
|
||||
|
||||
ed25519-dalek = { version = "^1", default_features = false, features = ["alloc", "u64_backend"] }
|
||||
x25519-dalek = { package = "x25519-dalek-ng", version = "^1", default_features = false, features = ["u64_backend"] }
|
||||
@ -135,7 +137,6 @@ windows-permissions = "^0"
|
||||
# Dependencies for iOS
|
||||
[target.'cfg(target_os = "ios")'.dependencies]
|
||||
simplelog = { version = "^0", optional = true }
|
||||
backtrace = { version = "^0", optional = true }
|
||||
|
||||
# Rusqlite configuration to ensure platforms that don't come with sqlite get it bundled
|
||||
# Except WASM which doesn't use sqlite
|
||||
|
@ -503,6 +503,11 @@ impl Network {
|
||||
let network_manager = self.network_manager();
|
||||
let routing_table = self.routing_table();
|
||||
|
||||
// Cancel all tasks
|
||||
if let Err(e) = self.unlocked_inner.update_network_class_task.cancel().await {
|
||||
warn!("update_network_class_task not cancelled: {}", e);
|
||||
}
|
||||
|
||||
// Drop all dial info
|
||||
routing_table.clear_dial_info_details(RoutingDomain::PublicInternet);
|
||||
routing_table.clear_dial_info_details(RoutingDomain::LocalNetwork);
|
||||
|
@ -270,6 +270,14 @@ impl NetworkManager {
|
||||
pub async fn shutdown(&self) {
|
||||
trace!("NetworkManager::shutdown begin");
|
||||
|
||||
// Cancel all tasks
|
||||
if let Err(e) = self.unlocked_inner.rolling_transfers_task.cancel().await {
|
||||
warn!("rolling_transfers_task not cancelled: {}", e);
|
||||
}
|
||||
if let Err(e) = self.unlocked_inner.relay_management_task.cancel().await {
|
||||
warn!("relay_management_task not cancelled: {}", e);
|
||||
}
|
||||
|
||||
// Shutdown network components if they started up
|
||||
let components = self.inner.lock().components.clone();
|
||||
if let Some(components) = components {
|
||||
|
@ -45,6 +45,10 @@ pub struct BucketEntry {
|
||||
peer_stats: PeerStats,
|
||||
latency_stats_accounting: LatencyStatsAccounting,
|
||||
transfer_stats_accounting: TransferStatsAccounting,
|
||||
#[cfg(feature = "tracking")]
|
||||
next_track_id: usize,
|
||||
#[cfg(feature = "tracking")]
|
||||
node_ref_tracks: HashMap<usize, backtrace::Backtrace>,
|
||||
}
|
||||
|
||||
impl BucketEntry {
|
||||
@ -57,8 +61,6 @@ impl BucketEntry {
|
||||
last_connection: None,
|
||||
opt_signed_node_info: None,
|
||||
opt_local_node_info: None,
|
||||
latency_stats_accounting: LatencyStatsAccounting::new(),
|
||||
transfer_stats_accounting: TransferStatsAccounting::new(),
|
||||
peer_stats: PeerStats {
|
||||
time_added: now,
|
||||
rpc_stats: RPCStats::default(),
|
||||
@ -66,9 +68,29 @@ impl BucketEntry {
|
||||
transfer: TransferStatsDownUp::default(),
|
||||
status: None,
|
||||
},
|
||||
latency_stats_accounting: LatencyStatsAccounting::new(),
|
||||
transfer_stats_accounting: TransferStatsAccounting::new(),
|
||||
#[cfg(feature = "tracking")]
|
||||
next_track_id: 0,
|
||||
#[cfg(feature = "tracking")]
|
||||
node_ref_tracks: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "tracking")]
|
||||
pub fn track(&mut self) -> usize {
|
||||
let track_id = self.next_track_id;
|
||||
self.next_track_id += 1;
|
||||
self.node_ref_tracks
|
||||
.insert(track_id, backtrace::Backtrace::new_unresolved());
|
||||
track_id
|
||||
}
|
||||
|
||||
#[cfg(feature = "tracking")]
|
||||
pub fn untrack(&mut self, track_id: usize) {
|
||||
self.node_ref_tracks.remove(&track_id);
|
||||
}
|
||||
|
||||
pub fn sort_fastest(e1: &Self, e2: &Self) -> std::cmp::Ordering {
|
||||
// Lower latency to the front
|
||||
if let Some(e1_latency) = &e1.peer_stats.latency {
|
||||
@ -410,6 +432,15 @@ impl BucketEntry {
|
||||
impl Drop for BucketEntry {
|
||||
fn drop(&mut self) {
|
||||
if self.ref_count != 0 {
|
||||
#[cfg(feature = "tracking")]
|
||||
{
|
||||
println!("NodeRef Tracking");
|
||||
for (id, bt) in &mut self.node_ref_tracks {
|
||||
bt.resolve();
|
||||
println!("Id: {}\n----------------\n{:#?}", id, bt);
|
||||
}
|
||||
}
|
||||
|
||||
panic!(
|
||||
"bucket entry dropped with non-zero refcount: {:#?}",
|
||||
self.node_info()
|
||||
|
@ -364,6 +364,29 @@ impl RoutingTable {
|
||||
}
|
||||
|
||||
pub async fn terminate(&self) {
|
||||
// Cancel all tasks being ticked
|
||||
if let Err(e) = self.unlocked_inner.rolling_transfers_task.cancel().await {
|
||||
warn!("rolling_transfers_task not cancelled: {}", e);
|
||||
}
|
||||
if let Err(e) = self.unlocked_inner.bootstrap_task.cancel().await {
|
||||
warn!("bootstrap_task not cancelled: {}", e);
|
||||
}
|
||||
if let Err(e) = self.unlocked_inner.peer_minimum_refresh_task.cancel().await {
|
||||
warn!("peer_minimum_refresh_task not cancelled: {}", e);
|
||||
}
|
||||
if let Err(e) = self.unlocked_inner.ping_validator_task.cancel().await {
|
||||
warn!("ping_validator_task not cancelled: {}", e);
|
||||
}
|
||||
if self
|
||||
.unlocked_inner
|
||||
.node_info_update_single_future
|
||||
.cancel()
|
||||
.await
|
||||
.is_err()
|
||||
{
|
||||
warn!("node_info_update_single_future not cancelled");
|
||||
}
|
||||
|
||||
*self.inner.lock() = Self::new_inner(self.network_manager());
|
||||
}
|
||||
|
||||
|
@ -10,6 +10,8 @@ pub struct NodeRef {
|
||||
routing_table: RoutingTable,
|
||||
node_id: DHTKey,
|
||||
filter: Option<DialInfoFilter>,
|
||||
#[cfg(feature = "tracking")]
|
||||
track_id: usize,
|
||||
}
|
||||
|
||||
impl NodeRef {
|
||||
@ -20,10 +22,13 @@ impl NodeRef {
|
||||
filter: Option<DialInfoFilter>,
|
||||
) -> Self {
|
||||
entry.ref_count += 1;
|
||||
|
||||
Self {
|
||||
routing_table,
|
||||
node_id: key,
|
||||
filter,
|
||||
#[cfg(feature = "tracking")]
|
||||
track_id: entry.track(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -255,12 +260,15 @@ impl Clone for NodeRef {
|
||||
fn clone(&self) -> Self {
|
||||
self.operate(move |e| {
|
||||
e.ref_count += 1;
|
||||
});
|
||||
Self {
|
||||
routing_table: self.routing_table.clone(),
|
||||
node_id: self.node_id,
|
||||
filter: self.filter.clone(),
|
||||
}
|
||||
|
||||
Self {
|
||||
routing_table: self.routing_table.clone(),
|
||||
node_id: self.node_id,
|
||||
filter: self.filter.clone(),
|
||||
#[cfg(feature = "tracking")]
|
||||
track_id: e.track(),
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -272,23 +280,25 @@ impl PartialEq for NodeRef {
|
||||
|
||||
impl Eq for NodeRef {}
|
||||
|
||||
impl fmt::Display for NodeRef {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.node_id.encode())
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for NodeRef {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
if f.alternate() {
|
||||
write!(
|
||||
f,
|
||||
"{{\n id: {}\n filter: {:?}\n}}",
|
||||
self.node_id.encode(),
|
||||
self.filter
|
||||
)
|
||||
} else {
|
||||
write!(f, "{}", self.node_id.encode())
|
||||
}
|
||||
f.debug_struct("NodeRef")
|
||||
.field("node_id", &self.node_id)
|
||||
.field("filter", &self.filter)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for NodeRef {
|
||||
fn drop(&mut self) {
|
||||
#[cfg(feature = "tracking")]
|
||||
self.operate(|e| e.untrack(self.track_id));
|
||||
self.routing_table.drop_node_ref(self.node_id);
|
||||
}
|
||||
}
|
||||
|
@ -52,3 +52,6 @@ serial_test = "^0"
|
||||
|
||||
[build-dependencies]
|
||||
capnpc = "^0"
|
||||
|
||||
[features]
|
||||
tracking = ["veilid-core/tracking"]
|
Loading…
Reference in New Issue
Block a user