mirror of
https://github.com/comit-network/xmr-btc-swap.git
synced 2025-08-24 14:15:55 -04:00
feat(monero): Remote node load balancing (#420)
This commit is contained in:
parent
a201c13b5d
commit
ff5e1c02bc
55 changed files with 4537 additions and 154 deletions
5
.gitignore
vendored
5
.gitignore
vendored
|
@ -3,3 +3,8 @@ target/
|
|||
.claude/settings.local.json
|
||||
.DS_Store
|
||||
build/
|
||||
release-build.sh
|
||||
cn_macos
|
||||
target-check
|
||||
monero-rpc-pool/temp_db.sqlite
|
||||
monero-rpc-pool/temp.db
|
||||
|
|
9
.vscode/settings.json
vendored
9
.vscode/settings.json
vendored
|
@ -69,6 +69,13 @@
|
|||
"unordered_set": "cpp",
|
||||
"variant": "cpp",
|
||||
"algorithm": "cpp",
|
||||
"*.rs": "rust"
|
||||
"*.rs": "rust",
|
||||
"shared_mutex": "cpp",
|
||||
"source_location": "cpp",
|
||||
"strstream": "cpp",
|
||||
"typeindex": "cpp"
|
||||
},
|
||||
"rust-analyzer.cargo.extraEnv": {
|
||||
"CARGO_TARGET_DIR": "target-check"
|
||||
}
|
||||
}
|
185
Cargo.lock
generated
185
Cargo.lock
generated
|
@ -586,6 +586,28 @@ dependencies = [
|
|||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "async-stream"
|
||||
version = "0.3.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476"
|
||||
dependencies = [
|
||||
"async-stream-impl",
|
||||
"futures-core",
|
||||
"pin-project-lite",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "async-stream-impl"
|
||||
version = "0.3.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.101",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "async-task"
|
||||
version = "4.7.1"
|
||||
|
@ -725,6 +747,73 @@ version = "1.4.0"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26"
|
||||
|
||||
[[package]]
|
||||
name = "axum"
|
||||
version = "0.7.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"axum-core",
|
||||
"axum-macros",
|
||||
"bytes",
|
||||
"futures-util",
|
||||
"http 1.3.1",
|
||||
"http-body 1.0.1",
|
||||
"http-body-util",
|
||||
"hyper 1.6.0",
|
||||
"hyper-util",
|
||||
"itoa 1.0.15",
|
||||
"matchit",
|
||||
"memchr",
|
||||
"mime",
|
||||
"percent-encoding",
|
||||
"pin-project-lite",
|
||||
"rustversion",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_path_to_error",
|
||||
"serde_urlencoded",
|
||||
"sync_wrapper",
|
||||
"tokio",
|
||||
"tower 0.5.2",
|
||||
"tower-layer",
|
||||
"tower-service",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "axum-core"
|
||||
version = "0.4.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bytes",
|
||||
"futures-util",
|
||||
"http 1.3.1",
|
||||
"http-body 1.0.1",
|
||||
"http-body-util",
|
||||
"mime",
|
||||
"pin-project-lite",
|
||||
"rustversion",
|
||||
"sync_wrapper",
|
||||
"tower-layer",
|
||||
"tower-service",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "axum-macros"
|
||||
version = "0.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "57d123550fa8d071b7255cb0cc04dc302baa6c8c4a79f55701552684d8399bce"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.101",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "backoff"
|
||||
version = "0.4.0"
|
||||
|
@ -1572,6 +1661,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||
checksum = "ed93b9805f8ba930df42c2590f05453d5ec36cbb85d018868a5b24d31f6ac000"
|
||||
dependencies = [
|
||||
"clap_builder",
|
||||
"clap_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -1586,6 +1676,18 @@ dependencies = [
|
|||
"strsim 0.11.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "clap_derive"
|
||||
version = "4.5.32"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "09176aae279615badda0765c0c0b3f6ed53f4709118af73cf4655d85d1530cd7"
|
||||
dependencies = [
|
||||
"heck 0.5.0",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.101",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "clap_lex"
|
||||
version = "0.7.4"
|
||||
|
@ -5589,6 +5691,12 @@ version = "0.1.10"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5"
|
||||
|
||||
[[package]]
|
||||
name = "matchit"
|
||||
version = "0.7.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94"
|
||||
|
||||
[[package]]
|
||||
name = "md-5"
|
||||
version = "0.10.6"
|
||||
|
@ -5815,6 +5923,35 @@ dependencies = [
|
|||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "monero-rpc-pool"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"axum",
|
||||
"chrono",
|
||||
"clap 4.5.38",
|
||||
"dirs 5.0.1",
|
||||
"futures",
|
||||
"monero",
|
||||
"monero-rpc",
|
||||
"rand 0.8.5",
|
||||
"regex",
|
||||
"reqwest",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sqlx",
|
||||
"tokio",
|
||||
"tokio-test",
|
||||
"tower 0.4.13",
|
||||
"tower-http 0.5.2",
|
||||
"tracing",
|
||||
"tracing-subscriber",
|
||||
"typeshare",
|
||||
"url",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "monero-sys"
|
||||
version = "0.1.0"
|
||||
|
@ -8599,6 +8736,16 @@ dependencies = [
|
|||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_path_to_error"
|
||||
version = "0.1.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "59fab13f937fa393d08645bf3a84bdfe86e296747b506ada67bb15f10f218b2a"
|
||||
dependencies = [
|
||||
"itoa 1.0.15",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_repr"
|
||||
version = "0.1.20"
|
||||
|
@ -9087,6 +9234,7 @@ checksum = "ee6798b1838b6a0f69c007c133b8df5866302197e404e8b6ee8ed3e3a5e68dc6"
|
|||
dependencies = [
|
||||
"base64 0.22.1",
|
||||
"bytes",
|
||||
"chrono",
|
||||
"crc",
|
||||
"crossbeam-queue",
|
||||
"either",
|
||||
|
@ -9164,6 +9312,7 @@ dependencies = [
|
|||
"bitflags 2.9.1",
|
||||
"byteorder",
|
||||
"bytes",
|
||||
"chrono",
|
||||
"crc",
|
||||
"digest 0.10.7",
|
||||
"dotenvy",
|
||||
|
@ -9205,6 +9354,7 @@ dependencies = [
|
|||
"base64 0.22.1",
|
||||
"bitflags 2.9.1",
|
||||
"byteorder",
|
||||
"chrono",
|
||||
"crc",
|
||||
"dotenvy",
|
||||
"etcetera",
|
||||
|
@ -9239,6 +9389,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||
checksum = "c2d12fe70b2c1b4401038055f90f151b78208de1f9f89a7dbfd41587a10c3eea"
|
||||
dependencies = [
|
||||
"atoi",
|
||||
"chrono",
|
||||
"flume",
|
||||
"futures-channel",
|
||||
"futures-core",
|
||||
|
@ -9483,6 +9634,7 @@ dependencies = [
|
|||
"monero",
|
||||
"monero-harness",
|
||||
"monero-rpc",
|
||||
"monero-rpc-pool",
|
||||
"monero-sys",
|
||||
"once_cell",
|
||||
"pem",
|
||||
|
@ -9518,7 +9670,7 @@ dependencies = [
|
|||
"toml",
|
||||
"tor-rtcompat",
|
||||
"tower 0.4.13",
|
||||
"tower-http",
|
||||
"tower-http 0.3.5",
|
||||
"tracing",
|
||||
"tracing-appender",
|
||||
"tracing-subscriber",
|
||||
|
@ -10442,6 +10594,19 @@ dependencies = [
|
|||
"xattr",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-test"
|
||||
version = "0.4.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2468baabc3311435b55dd935f702f42cd1b8abb7e754fb7dfb16bd36aa88f9f7"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"bytes",
|
||||
"futures-core",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-tungstenite"
|
||||
version = "0.15.0"
|
||||
|
@ -11528,6 +11693,7 @@ dependencies = [
|
|||
"tokio",
|
||||
"tower-layer",
|
||||
"tower-service",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -11560,6 +11726,22 @@ dependencies = [
|
|||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tower-http"
|
||||
version = "0.5.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5"
|
||||
dependencies = [
|
||||
"bitflags 2.9.1",
|
||||
"bytes",
|
||||
"http 1.3.1",
|
||||
"http-body 1.0.1",
|
||||
"http-body-util",
|
||||
"pin-project-lite",
|
||||
"tower-layer",
|
||||
"tower-service",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tower-layer"
|
||||
version = "0.3.3"
|
||||
|
@ -11962,6 +12144,7 @@ name = "unstoppableswap-gui-rs"
|
|||
version = "2.2.0-beta.2"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"monero-rpc-pool",
|
||||
"rustls 0.23.27",
|
||||
"serde",
|
||||
"serde_json",
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[workspace]
|
||||
resolver = "2"
|
||||
members = ["monero-rpc", "monero-sys", "src-tauri", "swap"]
|
||||
members = ["monero-rpc", "monero-rpc-pool", "monero-sys", "src-tauri", "swap"]
|
||||
|
||||
[profile.release]
|
||||
opt-level = 0
|
||||
|
|
26
dev_scripts/bump-version.sh
Executable file
26
dev_scripts/bump-version.sh
Executable file
|
@ -0,0 +1,26 @@
|
|||
#!/bin/bash
|
||||
set -eu
|
||||
|
||||
if [ "$#" -ne 1 ]; then
|
||||
echo "Usage: $0 <version>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
VERSION=$1
|
||||
TODAY=$(date +%Y-%m-%d)
|
||||
echo "Bumping version to $VERSION"
|
||||
|
||||
# Using sed and assuming GNU sed syntax as this is for the github workflow.
|
||||
|
||||
# Update version in tauri.conf.json
|
||||
sed -i 's/"version": "[^"]*"/"version": "'"$VERSION"'"/' src-tauri/tauri.conf.json
|
||||
|
||||
# Update version in Cargo.toml files
|
||||
sed -i -E 's/^version = "[0-9]+\.[0-9]+\.[0-9]+"/version = "'"$VERSION"'"/' swap/Cargo.toml src-tauri/Cargo.toml
|
||||
|
||||
# Update changelog
|
||||
sed -i "s/^## \\[Unreleased\\]/## [$VERSION] - $TODAY/" CHANGELOG.md
|
||||
# Add a new [Unreleased] section at the top
|
||||
sed -i '3i## [Unreleased]\n' CHANGELOG.md
|
||||
|
||||
echo "Updated all files to version $VERSION."
|
5
justfile
5
justfile
|
@ -101,3 +101,8 @@ docker-prune-network:
|
|||
# Install dependencies required for building monero-sys
|
||||
prepare_mac_os_brew_dependencies:
|
||||
cd dev_scripts && chmod +x ./brew_dependencies_install.sh && ./brew_dependencies_install.sh
|
||||
|
||||
# Takes a crate (e.g monero-rpc-pool) and uses code2prompt to copy to clipboard
|
||||
# E.g code2prompt . --exclude "*.lock" --exclude ".sqlx/*" --exclude "target"
|
||||
code2prompt_single_crate crate:
|
||||
cd {{crate}} && code2prompt . --exclude "*.lock" --exclude ".sqlx/*" --exclude "target"
|
26
monero-rpc-pool/.sqlx/query-132666c849bf0db14e50ef41f429e17b7c1afd21031edf3af40fadfb79ef2597.json
generated
Normal file
26
monero-rpc-pool/.sqlx/query-132666c849bf0db14e50ef41f429e17b7c1afd21031edf3af40fadfb79ef2597.json
generated
Normal file
|
@ -0,0 +1,26 @@
|
|||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "\n SELECT \n CAST(SUM(CASE WHEN hc.was_successful THEN 1 ELSE 0 END) AS INTEGER) as \"successful!: i64\",\n CAST(SUM(CASE WHEN NOT hc.was_successful THEN 1 ELSE 0 END) AS INTEGER) as \"unsuccessful!: i64\"\n FROM health_checks hc\n JOIN monero_nodes n ON hc.node_id = n.id\n WHERE n.network = ?\n ",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "successful!: i64",
|
||||
"ordinal": 0,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "unsuccessful!: i64",
|
||||
"ordinal": 1,
|
||||
"type_info": "Integer"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
},
|
||||
"nullable": [
|
||||
true,
|
||||
true
|
||||
]
|
||||
},
|
||||
"hash": "132666c849bf0db14e50ef41f429e17b7c1afd21031edf3af40fadfb79ef2597"
|
||||
}
|
56
monero-rpc-pool/.sqlx/query-3e8f39a6ec4443cec6497672891d12bbf7c1d0aca061827740af88ced863ae23.json
generated
Normal file
56
monero-rpc-pool/.sqlx/query-3e8f39a6ec4443cec6497672891d12bbf7c1d0aca061827740af88ced863ae23.json
generated
Normal file
|
@ -0,0 +1,56 @@
|
|||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "\n SELECT \n id as \"id!: i64\",\n scheme,\n host,\n port,\n full_url,\n network as \"network!: String\",\n first_seen_at\n FROM monero_nodes \n ORDER BY id\n ",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "id!: i64",
|
||||
"ordinal": 0,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "scheme",
|
||||
"ordinal": 1,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "host",
|
||||
"ordinal": 2,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "port",
|
||||
"ordinal": 3,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "full_url",
|
||||
"ordinal": 4,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "network!: String",
|
||||
"ordinal": 5,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "first_seen_at",
|
||||
"ordinal": 6,
|
||||
"type_info": "Text"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 0
|
||||
},
|
||||
"nullable": [
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false
|
||||
]
|
||||
},
|
||||
"hash": "3e8f39a6ec4443cec6497672891d12bbf7c1d0aca061827740af88ced863ae23"
|
||||
}
|
116
monero-rpc-pool/.sqlx/query-549f5ef13ec7bf5d987dcb893753a9c903edcafa3a66bd82965b40a9e7f238b6.json
generated
Normal file
116
monero-rpc-pool/.sqlx/query-549f5ef13ec7bf5d987dcb893753a9c903edcafa3a66bd82965b40a9e7f238b6.json
generated
Normal file
|
@ -0,0 +1,116 @@
|
|||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "\n SELECT \n n.id as \"id!: i64\",\n n.scheme,\n n.host,\n n.port,\n n.full_url,\n n.network,\n n.first_seen_at,\n CAST(COALESCE(stats.success_count, 0) AS INTEGER) as \"success_count!: i64\",\n CAST(COALESCE(stats.failure_count, 0) AS INTEGER) as \"failure_count!: i64\",\n stats.last_success as \"last_success?: String\",\n stats.last_failure as \"last_failure?: String\",\n stats.last_checked as \"last_checked?: String\",\n CAST(CASE WHEN reliable_nodes.node_id IS NOT NULL THEN 1 ELSE 0 END AS INTEGER) as \"is_reliable!: i64\",\n stats.avg_latency_ms as \"avg_latency_ms?: f64\",\n stats.min_latency_ms as \"min_latency_ms?: f64\",\n stats.max_latency_ms as \"max_latency_ms?: f64\",\n stats.last_latency_ms as \"last_latency_ms?: f64\"\n FROM monero_nodes n\n LEFT JOIN (\n SELECT \n node_id,\n SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,\n SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,\n MAX(CASE WHEN was_successful THEN timestamp END) as last_success,\n MAX(CASE WHEN NOT was_successful THEN timestamp END) as last_failure,\n MAX(timestamp) as last_checked,\n AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms,\n MIN(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as min_latency_ms,\n MAX(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as max_latency_ms,\n (SELECT latency_ms FROM health_checks hc2 WHERE hc2.node_id = health_checks.node_id ORDER BY timestamp DESC LIMIT 1) as last_latency_ms\n FROM health_checks \n GROUP BY node_id\n ) stats ON n.id = stats.node_id\n LEFT JOIN (\n SELECT DISTINCT node_id FROM (\n SELECT \n n2.id as node_id,\n COALESCE(s2.success_count, 0) as success_count,\n COALESCE(s2.failure_count, 0) as failure_count,\n s2.avg_latency_ms,\n (CAST(COALESCE(s2.success_count, 0) AS REAL) / CAST(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0) AS REAL)) * \n (MIN(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0), 200) / 200.0) * 0.8 +\n CASE \n WHEN s2.avg_latency_ms IS NOT NULL THEN (1.0 - (MIN(s2.avg_latency_ms, 2000) / 2000.0)) * 0.2\n ELSE 0.0 \n END as reliability_score\n FROM monero_nodes n2\n LEFT JOIN (\n SELECT \n node_id,\n SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,\n SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,\n AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms\n FROM health_checks \n GROUP BY node_id\n ) s2 ON n2.id = s2.node_id\n WHERE n2.network = ? AND (COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0)) > 0\n ORDER BY reliability_score DESC\n LIMIT 4\n )\n ) reliable_nodes ON n.id = reliable_nodes.node_id\n WHERE n.network = ? AND (COALESCE(stats.success_count, 0) + COALESCE(stats.failure_count, 0)) > 0\n ORDER BY \n (CAST(COALESCE(stats.success_count, 0) AS REAL) / CAST(COALESCE(stats.success_count, 0) + COALESCE(stats.failure_count, 0) AS REAL)) DESC,\n stats.avg_latency_ms ASC\n LIMIT ?\n ",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "id!: i64",
|
||||
"ordinal": 0,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "scheme",
|
||||
"ordinal": 1,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "host",
|
||||
"ordinal": 2,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "port",
|
||||
"ordinal": 3,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "full_url",
|
||||
"ordinal": 4,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "network",
|
||||
"ordinal": 5,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "first_seen_at",
|
||||
"ordinal": 6,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "success_count!: i64",
|
||||
"ordinal": 7,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "failure_count!: i64",
|
||||
"ordinal": 8,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_success?: String",
|
||||
"ordinal": 9,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_failure?: String",
|
||||
"ordinal": 10,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_checked?: String",
|
||||
"ordinal": 11,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "is_reliable!: i64",
|
||||
"ordinal": 12,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "avg_latency_ms?: f64",
|
||||
"ordinal": 13,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "min_latency_ms?: f64",
|
||||
"ordinal": 14,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "max_latency_ms?: f64",
|
||||
"ordinal": 15,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_latency_ms?: f64",
|
||||
"ordinal": 16,
|
||||
"type_info": "Float"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 3
|
||||
},
|
||||
"nullable": [
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
true
|
||||
]
|
||||
},
|
||||
"hash": "549f5ef13ec7bf5d987dcb893753a9c903edcafa3a66bd82965b40a9e7f238b6"
|
||||
}
|
12
monero-rpc-pool/.sqlx/query-56549d93f0e2106297b85565a52b2d9ac64d5b50fb7aa6028be3fcf266fc1d5d.json
generated
Normal file
12
monero-rpc-pool/.sqlx/query-56549d93f0e2106297b85565a52b2d9ac64d5b50fb7aa6028be3fcf266fc1d5d.json
generated
Normal file
|
@ -0,0 +1,12 @@
|
|||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "\n INSERT INTO health_checks (node_id, timestamp, was_successful, latency_ms)\n VALUES (?, ?, ?, ?)\n ",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"parameters": {
|
||||
"Right": 4
|
||||
},
|
||||
"nullable": []
|
||||
},
|
||||
"hash": "56549d93f0e2106297b85565a52b2d9ac64d5b50fb7aa6028be3fcf266fc1d5d"
|
||||
}
|
12
monero-rpc-pool/.sqlx/query-5736de2aac47eb69d7f6835d266aa28732b02a5e8e055ffaebcb452ed1b5044c.json
generated
Normal file
12
monero-rpc-pool/.sqlx/query-5736de2aac47eb69d7f6835d266aa28732b02a5e8e055ffaebcb452ed1b5044c.json
generated
Normal file
|
@ -0,0 +1,12 @@
|
|||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "\n UPDATE monero_nodes \n SET network = ?, updated_at = ?\n WHERE full_url = ?\n ",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"parameters": {
|
||||
"Right": 3
|
||||
},
|
||||
"nullable": []
|
||||
},
|
||||
"hash": "5736de2aac47eb69d7f6835d266aa28732b02a5e8e055ffaebcb452ed1b5044c"
|
||||
}
|
20
monero-rpc-pool/.sqlx/query-5798d9589772742f074e0ecc2551a40d943bfb7ed2e295f09f12d77cb65ce821.json
generated
Normal file
20
monero-rpc-pool/.sqlx/query-5798d9589772742f074e0ecc2551a40d943bfb7ed2e295f09f12d77cb65ce821.json
generated
Normal file
|
@ -0,0 +1,20 @@
|
|||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "\n INSERT INTO monero_nodes (scheme, host, port, full_url, network, first_seen_at, updated_at)\n VALUES (?, ?, ?, ?, ?, ?, ?)\n ON CONFLICT(full_url) DO UPDATE SET\n network = excluded.network,\n updated_at = excluded.updated_at\n RETURNING id\n ",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "id",
|
||||
"ordinal": 0,
|
||||
"type_info": "Integer"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 7
|
||||
},
|
||||
"nullable": [
|
||||
false
|
||||
]
|
||||
},
|
||||
"hash": "5798d9589772742f074e0ecc2551a40d943bfb7ed2e295f09f12d77cb65ce821"
|
||||
}
|
116
monero-rpc-pool/.sqlx/query-5a25c95c04b11a60a04ad97b5fb684e9a0cc2eb5daf64f33e924f0c38a2edfec.json
generated
Normal file
116
monero-rpc-pool/.sqlx/query-5a25c95c04b11a60a04ad97b5fb684e9a0cc2eb5daf64f33e924f0c38a2edfec.json
generated
Normal file
|
@ -0,0 +1,116 @@
|
|||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "\n SELECT \n n.id as \"id!: i64\",\n n.scheme,\n n.host,\n n.port,\n n.full_url,\n n.network,\n n.first_seen_at,\n CAST(COALESCE(stats.success_count, 0) AS INTEGER) as \"success_count!: i64\",\n CAST(COALESCE(stats.failure_count, 0) AS INTEGER) as \"failure_count!: i64\",\n stats.last_success as \"last_success?: String\",\n stats.last_failure as \"last_failure?: String\",\n stats.last_checked as \"last_checked?: String\",\n CAST(CASE WHEN reliable_nodes.node_id IS NOT NULL THEN 1 ELSE 0 END AS INTEGER) as \"is_reliable!: i64\",\n stats.avg_latency_ms as \"avg_latency_ms?: f64\",\n stats.min_latency_ms as \"min_latency_ms?: f64\",\n stats.max_latency_ms as \"max_latency_ms?: f64\",\n stats.last_latency_ms as \"last_latency_ms?: f64\"\n FROM monero_nodes n\n LEFT JOIN (\n SELECT \n node_id,\n SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,\n SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,\n MAX(CASE WHEN was_successful THEN timestamp END) as last_success,\n MAX(CASE WHEN NOT was_successful THEN timestamp END) as last_failure,\n MAX(timestamp) as last_checked,\n AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms,\n MIN(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as min_latency_ms,\n MAX(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as max_latency_ms,\n (SELECT latency_ms FROM health_checks hc2 WHERE hc2.node_id = health_checks.node_id ORDER BY timestamp DESC LIMIT 1) as last_latency_ms\n FROM health_checks \n GROUP BY node_id\n ) stats ON n.id = stats.node_id\n LEFT JOIN (\n SELECT DISTINCT node_id FROM (\n SELECT \n n2.id as node_id,\n COALESCE(s2.success_count, 0) as success_count,\n COALESCE(s2.failure_count, 0) as failure_count,\n s2.avg_latency_ms,\n (CAST(COALESCE(s2.success_count, 0) AS REAL) / CAST(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0) AS REAL)) * \n (MIN(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0), 200) / 200.0) * 0.8 +\n CASE \n WHEN s2.avg_latency_ms IS NOT NULL THEN (1.0 - (MIN(s2.avg_latency_ms, 2000) / 2000.0)) * 0.2\n ELSE 0.0 \n END as reliability_score\n FROM monero_nodes n2\n LEFT JOIN (\n SELECT \n node_id,\n SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,\n SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,\n AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms\n FROM health_checks \n GROUP BY node_id\n ) s2 ON n2.id = s2.node_id\n WHERE n2.network = ? AND (COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0)) > 0\n ORDER BY reliability_score DESC\n LIMIT 4\n )\n ) reliable_nodes ON n.id = reliable_nodes.node_id\n WHERE n.network = ?\n ORDER BY RANDOM()\n LIMIT ?\n ",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "id!: i64",
|
||||
"ordinal": 0,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "scheme",
|
||||
"ordinal": 1,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "host",
|
||||
"ordinal": 2,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "port",
|
||||
"ordinal": 3,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "full_url",
|
||||
"ordinal": 4,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "network",
|
||||
"ordinal": 5,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "first_seen_at",
|
||||
"ordinal": 6,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "success_count!: i64",
|
||||
"ordinal": 7,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "failure_count!: i64",
|
||||
"ordinal": 8,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_success?: String",
|
||||
"ordinal": 9,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_failure?: String",
|
||||
"ordinal": 10,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_checked?: String",
|
||||
"ordinal": 11,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "is_reliable!: i64",
|
||||
"ordinal": 12,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "avg_latency_ms?: f64",
|
||||
"ordinal": 13,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "min_latency_ms?: f64",
|
||||
"ordinal": 14,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "max_latency_ms?: f64",
|
||||
"ordinal": 15,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_latency_ms?: f64",
|
||||
"ordinal": 16,
|
||||
"type_info": "Float"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 3
|
||||
},
|
||||
"nullable": [
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
true
|
||||
]
|
||||
},
|
||||
"hash": "5a25c95c04b11a60a04ad97b5fb684e9a0cc2eb5daf64f33e924f0c38a2edfec"
|
||||
}
|
116
monero-rpc-pool/.sqlx/query-5ff27bdd9b6e7aadc8dd4936e0ee7e6a611aaef28697a0e9535dfb30d1c4861d.json
generated
Normal file
116
monero-rpc-pool/.sqlx/query-5ff27bdd9b6e7aadc8dd4936e0ee7e6a611aaef28697a0e9535dfb30d1c4861d.json
generated
Normal file
|
@ -0,0 +1,116 @@
|
|||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "\n SELECT \n n.id as \"id!: i64\",\n n.scheme,\n n.host,\n n.port,\n n.full_url,\n n.network,\n n.first_seen_at,\n CAST(COALESCE(stats.success_count, 0) AS INTEGER) as \"success_count!: i64\",\n CAST(COALESCE(stats.failure_count, 0) AS INTEGER) as \"failure_count!: i64\",\n stats.last_success as \"last_success?: String\",\n stats.last_failure as \"last_failure?: String\",\n stats.last_checked as \"last_checked?: String\",\n CAST(CASE WHEN reliable_nodes.node_id IS NOT NULL THEN 1 ELSE 0 END AS INTEGER) as \"is_reliable!: i64\",\n stats.avg_latency_ms as \"avg_latency_ms?: f64\",\n stats.min_latency_ms as \"min_latency_ms?: f64\",\n stats.max_latency_ms as \"max_latency_ms?: f64\",\n stats.last_latency_ms as \"last_latency_ms?: f64\"\n FROM monero_nodes n\n LEFT JOIN (\n SELECT \n node_id,\n SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,\n SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,\n MAX(CASE WHEN was_successful THEN timestamp END) as last_success,\n MAX(CASE WHEN NOT was_successful THEN timestamp END) as last_failure,\n MAX(timestamp) as last_checked,\n AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms,\n MIN(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as min_latency_ms,\n MAX(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as max_latency_ms,\n (SELECT latency_ms FROM health_checks hc2 WHERE hc2.node_id = health_checks.node_id ORDER BY timestamp DESC LIMIT 1) as last_latency_ms\n FROM health_checks \n GROUP BY node_id\n ) stats ON n.id = stats.node_id\n LEFT JOIN (\n SELECT DISTINCT node_id FROM (\n SELECT \n n2.id as node_id,\n COALESCE(s2.success_count, 0) as success_count,\n COALESCE(s2.failure_count, 0) as failure_count,\n s2.avg_latency_ms,\n (CAST(COALESCE(s2.success_count, 0) AS REAL) / CAST(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0) AS REAL)) * \n (MIN(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0), 200) / 200.0) * 0.8 +\n CASE \n WHEN s2.avg_latency_ms IS NOT NULL THEN (1.0 - (MIN(s2.avg_latency_ms, 2000) / 2000.0)) * 0.2\n ELSE 0.0 \n END as reliability_score\n FROM monero_nodes n2\n LEFT JOIN (\n SELECT \n node_id,\n SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,\n SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,\n AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms\n FROM health_checks \n GROUP BY node_id\n ) s2 ON n2.id = s2.node_id\n WHERE n2.network = ? AND (COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0)) > 0\n ORDER BY reliability_score DESC\n LIMIT 4\n )\n ) reliable_nodes ON n.id = reliable_nodes.node_id\n WHERE n.network = ?\n ORDER BY stats.avg_latency_ms ASC, stats.success_count DESC\n ",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "id!: i64",
|
||||
"ordinal": 0,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "scheme",
|
||||
"ordinal": 1,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "host",
|
||||
"ordinal": 2,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "port",
|
||||
"ordinal": 3,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "full_url",
|
||||
"ordinal": 4,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "network",
|
||||
"ordinal": 5,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "first_seen_at",
|
||||
"ordinal": 6,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "success_count!: i64",
|
||||
"ordinal": 7,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "failure_count!: i64",
|
||||
"ordinal": 8,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_success?: String",
|
||||
"ordinal": 9,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_failure?: String",
|
||||
"ordinal": 10,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_checked?: String",
|
||||
"ordinal": 11,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "is_reliable!: i64",
|
||||
"ordinal": 12,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "avg_latency_ms?: f64",
|
||||
"ordinal": 13,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "min_latency_ms?: f64",
|
||||
"ordinal": 14,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "max_latency_ms?: f64",
|
||||
"ordinal": 15,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_latency_ms?: f64",
|
||||
"ordinal": 16,
|
||||
"type_info": "Float"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 2
|
||||
},
|
||||
"nullable": [
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
true
|
||||
]
|
||||
},
|
||||
"hash": "5ff27bdd9b6e7aadc8dd4936e0ee7e6a611aaef28697a0e9535dfb30d1c4861d"
|
||||
}
|
116
monero-rpc-pool/.sqlx/query-a032eb9773d4553aeaff4fb15ed99dbaef7d16d48750ee7bd4ab83233a9a732b.json
generated
Normal file
116
monero-rpc-pool/.sqlx/query-a032eb9773d4553aeaff4fb15ed99dbaef7d16d48750ee7bd4ab83233a9a732b.json
generated
Normal file
|
@ -0,0 +1,116 @@
|
|||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "\n SELECT \n n.id as \"id!: i64\",\n n.scheme,\n n.host,\n n.port,\n n.full_url,\n n.network,\n n.first_seen_at,\n CAST(COALESCE(stats.success_count, 0) AS INTEGER) as \"success_count!: i64\",\n CAST(COALESCE(stats.failure_count, 0) AS INTEGER) as \"failure_count!: i64\",\n stats.last_success as \"last_success?: String\",\n stats.last_failure as \"last_failure?: String\",\n stats.last_checked as \"last_checked?: String\",\n CAST(1 AS INTEGER) as \"is_reliable!: i64\",\n stats.avg_latency_ms as \"avg_latency_ms?: f64\",\n stats.min_latency_ms as \"min_latency_ms?: f64\",\n stats.max_latency_ms as \"max_latency_ms?: f64\",\n stats.last_latency_ms as \"last_latency_ms?: f64\"\n FROM monero_nodes n\n LEFT JOIN (\n SELECT \n node_id,\n SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,\n SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,\n MAX(CASE WHEN was_successful THEN timestamp END) as last_success,\n MAX(CASE WHEN NOT was_successful THEN timestamp END) as last_failure,\n MAX(timestamp) as last_checked,\n AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms,\n MIN(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as min_latency_ms,\n MAX(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as max_latency_ms,\n (SELECT latency_ms FROM health_checks hc2 WHERE hc2.node_id = health_checks.node_id ORDER BY timestamp DESC LIMIT 1) as last_latency_ms\n FROM health_checks \n GROUP BY node_id\n ) stats ON n.id = stats.node_id\n WHERE n.network = ? AND (COALESCE(stats.success_count, 0) + COALESCE(stats.failure_count, 0)) > 0\n ORDER BY \n (CAST(COALESCE(stats.success_count, 0) AS REAL) / CAST(COALESCE(stats.success_count, 0) + COALESCE(stats.failure_count, 0) AS REAL)) * \n (MIN(COALESCE(stats.success_count, 0) + COALESCE(stats.failure_count, 0), 200) / 200.0) * 0.8 +\n CASE \n WHEN stats.avg_latency_ms IS NOT NULL THEN (1.0 - (MIN(stats.avg_latency_ms, 2000) / 2000.0)) * 0.2\n ELSE 0.0 \n END DESC\n LIMIT 4\n ",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "id!: i64",
|
||||
"ordinal": 0,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "scheme",
|
||||
"ordinal": 1,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "host",
|
||||
"ordinal": 2,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "port",
|
||||
"ordinal": 3,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "full_url",
|
||||
"ordinal": 4,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "network",
|
||||
"ordinal": 5,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "first_seen_at",
|
||||
"ordinal": 6,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "success_count!: i64",
|
||||
"ordinal": 7,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "failure_count!: i64",
|
||||
"ordinal": 8,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_success?: String",
|
||||
"ordinal": 9,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_failure?: String",
|
||||
"ordinal": 10,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_checked?: String",
|
||||
"ordinal": 11,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "is_reliable!: i64",
|
||||
"ordinal": 12,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "avg_latency_ms?: f64",
|
||||
"ordinal": 13,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "min_latency_ms?: f64",
|
||||
"ordinal": 14,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "max_latency_ms?: f64",
|
||||
"ordinal": 15,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_latency_ms?: f64",
|
||||
"ordinal": 16,
|
||||
"type_info": "Float"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
},
|
||||
"nullable": [
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
true
|
||||
]
|
||||
},
|
||||
"hash": "a032eb9773d4553aeaff4fb15ed99dbaef7d16d48750ee7bd4ab83233a9a732b"
|
||||
}
|
116
monero-rpc-pool/.sqlx/query-ba231efaf208a42fa857f716ef296b428c937f2eb7c8ce9c631f7f721e914c14.json
generated
Normal file
116
monero-rpc-pool/.sqlx/query-ba231efaf208a42fa857f716ef296b428c937f2eb7c8ce9c631f7f721e914c14.json
generated
Normal file
|
@ -0,0 +1,116 @@
|
|||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "\n SELECT \n n.id as \"id!: i64\",\n n.scheme,\n n.host,\n n.port,\n n.full_url,\n n.network,\n n.first_seen_at,\n CAST(COALESCE(stats.success_count, 0) AS INTEGER) as \"success_count!: i64\",\n CAST(COALESCE(stats.failure_count, 0) AS INTEGER) as \"failure_count!: i64\",\n stats.last_success as \"last_success?: String\",\n stats.last_failure as \"last_failure?: String\",\n stats.last_checked as \"last_checked?: String\",\n CAST(CASE WHEN reliable_nodes.node_id IS NOT NULL THEN 1 ELSE 0 END AS INTEGER) as \"is_reliable!: i64\",\n stats.avg_latency_ms as \"avg_latency_ms?: f64\",\n stats.min_latency_ms as \"min_latency_ms?: f64\",\n stats.max_latency_ms as \"max_latency_ms?: f64\",\n stats.last_latency_ms as \"last_latency_ms?: f64\"\n FROM monero_nodes n\n LEFT JOIN (\n SELECT \n node_id,\n SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,\n SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,\n MAX(CASE WHEN was_successful THEN timestamp END) as last_success,\n MAX(CASE WHEN NOT was_successful THEN timestamp END) as last_failure,\n MAX(timestamp) as last_checked,\n AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms,\n MIN(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as min_latency_ms,\n MAX(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as max_latency_ms,\n (SELECT latency_ms FROM health_checks hc2 WHERE hc2.node_id = health_checks.node_id ORDER BY timestamp DESC LIMIT 1) as last_latency_ms\n FROM health_checks \n GROUP BY node_id\n ) stats ON n.id = stats.node_id\n LEFT JOIN (\n SELECT DISTINCT node_id FROM (\n SELECT \n n2.id as node_id,\n COALESCE(s2.success_count, 0) as success_count,\n COALESCE(s2.failure_count, 0) as failure_count,\n s2.avg_latency_ms,\n (CAST(COALESCE(s2.success_count, 0) AS REAL) / CAST(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0) AS REAL)) * \n (MIN(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0), 200) / 200.0) * 0.8 +\n CASE \n WHEN s2.avg_latency_ms IS NOT NULL THEN (1.0 - (MIN(s2.avg_latency_ms, 2000) / 2000.0)) * 0.2\n ELSE 0.0 \n END as reliability_score\n FROM monero_nodes n2\n LEFT JOIN (\n SELECT \n node_id,\n SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,\n SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,\n AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms\n FROM health_checks \n GROUP BY node_id\n ) s2 ON n2.id = s2.node_id\n WHERE n2.network = ? AND (COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0)) > 0\n ORDER BY reliability_score DESC\n LIMIT 4\n )\n ) reliable_nodes ON n.id = reliable_nodes.node_id\n WHERE n.network = ?\n ORDER BY RANDOM()\n LIMIT ?\n ",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "id!: i64",
|
||||
"ordinal": 0,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "scheme",
|
||||
"ordinal": 1,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "host",
|
||||
"ordinal": 2,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "port",
|
||||
"ordinal": 3,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "full_url",
|
||||
"ordinal": 4,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "network",
|
||||
"ordinal": 5,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "first_seen_at",
|
||||
"ordinal": 6,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "success_count!: i64",
|
||||
"ordinal": 7,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "failure_count!: i64",
|
||||
"ordinal": 8,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_success?: String",
|
||||
"ordinal": 9,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_failure?: String",
|
||||
"ordinal": 10,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_checked?: String",
|
||||
"ordinal": 11,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "is_reliable!: i64",
|
||||
"ordinal": 12,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "avg_latency_ms?: f64",
|
||||
"ordinal": 13,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "min_latency_ms?: f64",
|
||||
"ordinal": 14,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "max_latency_ms?: f64",
|
||||
"ordinal": 15,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_latency_ms?: f64",
|
||||
"ordinal": 16,
|
||||
"type_info": "Float"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 3
|
||||
},
|
||||
"nullable": [
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
true
|
||||
]
|
||||
},
|
||||
"hash": "ba231efaf208a42fa857f716ef296b428c937f2eb7c8ce9c631f7f721e914c14"
|
||||
}
|
20
monero-rpc-pool/.sqlx/query-e0865335c2dcb040a34e3f1305fe1a823d6fcde4a061def602cba30971817781.json
generated
Normal file
20
monero-rpc-pool/.sqlx/query-e0865335c2dcb040a34e3f1305fe1a823d6fcde4a061def602cba30971817781.json
generated
Normal file
|
@ -0,0 +1,20 @@
|
|||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "SELECT id FROM monero_nodes WHERE full_url = ?",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "id",
|
||||
"ordinal": 0,
|
||||
"type_info": "Integer"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
},
|
||||
"nullable": [
|
||||
true
|
||||
]
|
||||
},
|
||||
"hash": "e0865335c2dcb040a34e3f1305fe1a823d6fcde4a061def602cba30971817781"
|
||||
}
|
116
monero-rpc-pool/.sqlx/query-fac12e3ca6ac1db1a4812a5390a333ec95a2e5e2cd554c169ceecc61b7ff2864.json
generated
Normal file
116
monero-rpc-pool/.sqlx/query-fac12e3ca6ac1db1a4812a5390a333ec95a2e5e2cd554c169ceecc61b7ff2864.json
generated
Normal file
|
@ -0,0 +1,116 @@
|
|||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "\n SELECT \n n.id as \"id!: i64\",\n n.scheme,\n n.host,\n n.port,\n n.full_url,\n n.network,\n n.first_seen_at,\n CAST(COALESCE(stats.success_count, 0) AS INTEGER) as \"success_count!: i64\",\n CAST(COALESCE(stats.failure_count, 0) AS INTEGER) as \"failure_count!: i64\",\n stats.last_success as \"last_success?: String\",\n stats.last_failure as \"last_failure?: String\",\n stats.last_checked as \"last_checked?: String\",\n CAST(CASE WHEN reliable_nodes.node_id IS NOT NULL THEN 1 ELSE 0 END AS INTEGER) as \"is_reliable!: i64\",\n stats.avg_latency_ms as \"avg_latency_ms?: f64\",\n stats.min_latency_ms as \"min_latency_ms?: f64\",\n stats.max_latency_ms as \"max_latency_ms?: f64\",\n stats.last_latency_ms as \"last_latency_ms?: f64\"\n FROM monero_nodes n\n LEFT JOIN (\n SELECT \n node_id,\n SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,\n SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,\n MAX(CASE WHEN was_successful THEN timestamp END) as last_success,\n MAX(CASE WHEN NOT was_successful THEN timestamp END) as last_failure,\n MAX(timestamp) as last_checked,\n AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms,\n MIN(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as min_latency_ms,\n MAX(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as max_latency_ms,\n (SELECT latency_ms FROM health_checks hc2 WHERE hc2.node_id = health_checks.node_id ORDER BY timestamp DESC LIMIT 1) as last_latency_ms\n FROM health_checks \n GROUP BY node_id\n ) stats ON n.id = stats.node_id\n LEFT JOIN (\n SELECT DISTINCT node_id FROM (\n SELECT \n n2.id as node_id,\n COALESCE(s2.success_count, 0) as success_count,\n COALESCE(s2.failure_count, 0) as failure_count,\n s2.avg_latency_ms,\n (CAST(COALESCE(s2.success_count, 0) AS REAL) / CAST(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0) AS REAL)) * \n (MIN(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0), 200) / 200.0) * 0.8 +\n CASE \n WHEN s2.avg_latency_ms IS NOT NULL THEN (1.0 - (MIN(s2.avg_latency_ms, 2000) / 2000.0)) * 0.2\n ELSE 0.0 \n END as reliability_score\n FROM monero_nodes n2\n LEFT JOIN (\n SELECT \n node_id,\n SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,\n SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,\n AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms\n FROM health_checks \n GROUP BY node_id\n ) s2 ON n2.id = s2.node_id\n WHERE n2.network = ? AND (COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0)) > 0\n ORDER BY reliability_score DESC\n LIMIT 4\n )\n ) reliable_nodes ON n.id = reliable_nodes.node_id\n WHERE n.network = ? AND stats.success_count > 0\n ORDER BY stats.avg_latency_ms ASC, stats.success_count DESC\n ",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "id!: i64",
|
||||
"ordinal": 0,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "scheme",
|
||||
"ordinal": 1,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "host",
|
||||
"ordinal": 2,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "port",
|
||||
"ordinal": 3,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "full_url",
|
||||
"ordinal": 4,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "network",
|
||||
"ordinal": 5,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "first_seen_at",
|
||||
"ordinal": 6,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "success_count!: i64",
|
||||
"ordinal": 7,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "failure_count!: i64",
|
||||
"ordinal": 8,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_success?: String",
|
||||
"ordinal": 9,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_failure?: String",
|
||||
"ordinal": 10,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_checked?: String",
|
||||
"ordinal": 11,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "is_reliable!: i64",
|
||||
"ordinal": 12,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "avg_latency_ms?: f64",
|
||||
"ordinal": 13,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "min_latency_ms?: f64",
|
||||
"ordinal": 14,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "max_latency_ms?: f64",
|
||||
"ordinal": 15,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_latency_ms?: f64",
|
||||
"ordinal": 16,
|
||||
"type_info": "Float"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 2
|
||||
},
|
||||
"nullable": [
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
true
|
||||
]
|
||||
},
|
||||
"hash": "fac12e3ca6ac1db1a4812a5390a333ec95a2e5e2cd554c169ceecc61b7ff2864"
|
||||
}
|
32
monero-rpc-pool/.sqlx/query-ffa1b76d20c86d6bea02bd03e5e7de159adbb7c7c0ef585ce4df9ec648bea7f8.json
generated
Normal file
32
monero-rpc-pool/.sqlx/query-ffa1b76d20c86d6bea02bd03e5e7de159adbb7c7c0ef585ce4df9ec648bea7f8.json
generated
Normal file
|
@ -0,0 +1,32 @@
|
|||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "\n SELECT \n COUNT(*) as total,\n CAST(SUM(CASE WHEN stats.success_count > 0 THEN 1 ELSE 0 END) AS INTEGER) as \"reachable!: i64\",\n CAST((SELECT COUNT(*) FROM (\n SELECT n2.id\n FROM monero_nodes n2\n LEFT JOIN (\n SELECT \n node_id,\n SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,\n SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,\n AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms\n FROM health_checks \n GROUP BY node_id\n ) s2 ON n2.id = s2.node_id\n WHERE n2.network = ? AND (COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0)) > 0\n ORDER BY \n (CAST(COALESCE(s2.success_count, 0) AS REAL) / CAST(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0) AS REAL)) * \n (MIN(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0), 200) / 200.0) * 0.8 +\n CASE \n WHEN s2.avg_latency_ms IS NOT NULL THEN (1.0 - (MIN(s2.avg_latency_ms, 2000) / 2000.0)) * 0.2\n ELSE 0.0 \n END DESC\n LIMIT 4\n )) AS INTEGER) as \"reliable!: i64\"\n FROM monero_nodes n\n LEFT JOIN (\n SELECT \n node_id,\n SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,\n SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count\n FROM health_checks \n GROUP BY node_id\n ) stats ON n.id = stats.node_id\n WHERE n.network = ?\n ",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "total",
|
||||
"ordinal": 0,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "reachable!: i64",
|
||||
"ordinal": 1,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "reliable!: i64",
|
||||
"ordinal": 2,
|
||||
"type_info": "Integer"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 2
|
||||
},
|
||||
"nullable": [
|
||||
false,
|
||||
true,
|
||||
false
|
||||
]
|
||||
},
|
||||
"hash": "ffa1b76d20c86d6bea02bd03e5e7de159adbb7c7c0ef585ce4df9ec648bea7f8"
|
||||
}
|
36
monero-rpc-pool/Cargo.toml
Normal file
36
monero-rpc-pool/Cargo.toml
Normal file
|
@ -0,0 +1,36 @@
|
|||
[package]
|
||||
name = "monero-rpc-pool"
|
||||
version = "0.1.0"
|
||||
authors = ["UnstoppableSwap Team <help@unstoppableswap.net>"]
|
||||
edition = "2021"
|
||||
|
||||
[[bin]]
|
||||
name = "monero-rpc-pool"
|
||||
path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1"
|
||||
axum = { version = "0.7", features = ["macros"] }
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
clap = { version = "4.0", features = ["derive"] }
|
||||
dirs = "5.0"
|
||||
futures = "0.3"
|
||||
monero = { version = "0.12", features = ["serde_support"] }
|
||||
monero-rpc = { path = "../monero-rpc" }
|
||||
rand = "0.8"
|
||||
regex = "1.0"
|
||||
reqwest = { version = "0.12", default-features = false, features = ["json", "rustls-tls"] }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
sqlx = { version = "0.8", features = ["runtime-tokio-rustls", "sqlite", "chrono", "migrate"] }
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tower = "0.4"
|
||||
tower-http = { version = "0.5", features = ["cors"] }
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
typeshare = "1.0.3"
|
||||
url = "2.0"
|
||||
uuid = { version = "1.0", features = ["v4"] }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio-test = "0.4"
|
30
monero-rpc-pool/migrations/20250618212026_initial_schema.sql
Normal file
30
monero-rpc-pool/migrations/20250618212026_initial_schema.sql
Normal file
|
@ -0,0 +1,30 @@
|
|||
-- Add migration script here
|
||||
|
||||
-- Create monero_nodes table - stores node identity and current state
|
||||
CREATE TABLE IF NOT EXISTS monero_nodes (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
scheme TEXT NOT NULL,
|
||||
host TEXT NOT NULL,
|
||||
port INTEGER NOT NULL,
|
||||
full_url TEXT NOT NULL UNIQUE,
|
||||
network TEXT NOT NULL, -- mainnet/stagenet/testnet - always known at insertion time
|
||||
first_seen_at TEXT NOT NULL,
|
||||
created_at TEXT NOT NULL DEFAULT (datetime('now')),
|
||||
updated_at TEXT NOT NULL DEFAULT (datetime('now'))
|
||||
);
|
||||
|
||||
-- Create health_checks table - stores raw event data
|
||||
CREATE TABLE IF NOT EXISTS health_checks (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
node_id INTEGER NOT NULL,
|
||||
timestamp TEXT NOT NULL,
|
||||
was_successful BOOLEAN NOT NULL,
|
||||
latency_ms REAL,
|
||||
FOREIGN KEY (node_id) REFERENCES monero_nodes(id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
-- Create indexes for performance
|
||||
CREATE INDEX IF NOT EXISTS idx_nodes_full_url ON monero_nodes(full_url);
|
||||
CREATE INDEX IF NOT EXISTS idx_nodes_network ON monero_nodes(network);
|
||||
CREATE INDEX IF NOT EXISTS idx_health_checks_node_id ON health_checks(node_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_health_checks_timestamp ON health_checks(timestamp);
|
|
@ -0,0 +1,31 @@
|
|||
-- Insert default mainnet bootstrap nodes
|
||||
INSERT OR IGNORE INTO monero_nodes (scheme, host, port, full_url, network, first_seen_at) VALUES
|
||||
('http', 'node.supportxmr.com', 18081, 'http://node.supportxmr.com:18081', 'mainnet', datetime('now')),
|
||||
('http', 'nodes.hashvault.pro', 18081, 'http://nodes.hashvault.pro:18081', 'mainnet', datetime('now')),
|
||||
('http', 'xmr-node.cakewallet.com', 18081, 'http://xmr-node.cakewallet.com:18081', 'mainnet', datetime('now')),
|
||||
('http', 'node.xmr.to', 18081, 'http://node.xmr.to:18081', 'mainnet', datetime('now')),
|
||||
('https', 'opennode.xmr-tw.org', 18089, 'https://opennode.xmr-tw.org:18089', 'mainnet', datetime('now')),
|
||||
('https', 'monero.stackwallet.com', 18081, 'https://monero.stackwallet.com:18081', 'mainnet', datetime('now')),
|
||||
('https', 'node.sethforprivacy.com', 18089, 'https://node.sethforprivacy.com:18089', 'mainnet', datetime('now')),
|
||||
('https', 'node.monero.net', 18081, 'https://node.monero.net:18081', 'mainnet', datetime('now')),
|
||||
('https', 'moneronode.org', 18081, 'https://moneronode.org:18081', 'mainnet', datetime('now')),
|
||||
('http', 'node.majesticbank.at', 18089, 'http://node.majesticbank.at:18089', 'mainnet', datetime('now')),
|
||||
('http', 'node.majesticbank.is', 18089, 'http://node.majesticbank.is:18089', 'mainnet', datetime('now')),
|
||||
('https', 'xmr.cryptostorm.is', 18081, 'https://xmr.cryptostorm.is:18081', 'mainnet', datetime('now')),
|
||||
('https', 'xmr.privex.io', 18081, 'https://xmr.privex.io:18081', 'mainnet', datetime('now')),
|
||||
('https', 'nodes.hashvault.pro', 18081, 'https://nodes.hashvault.pro:18081', 'mainnet', datetime('now')),
|
||||
('http', 'hashvaultsvg2rinvxz7kos77hdfm6zrd5yco3tx2yh2linsmusfwyad.onion', 18081, 'http://hashvaultsvg2rinvxz7kos77hdfm6zrd5yco3tx2yh2linsmusfwyad.onion:18081', 'mainnet', datetime('now')),
|
||||
('https', 'plowsof3t5hogddwabaeiyrno25efmzfxyro2vligremt7sxpsclfaid.onion', 18089, 'https://plowsof3t5hogddwabaeiyrno25efmzfxyro2vligremt7sxpsclfaid.onion:18089', 'mainnet', datetime('now')),
|
||||
('http', 'moneroexnovtlp4datcwbgjznnulgm7q34wcl6r4gcvccruhkceb2xyd.onion', 18089, 'http://moneroexnovtlp4datcwbgjznnulgm7q34wcl6r4gcvccruhkceb2xyd.onion:18089', 'mainnet', datetime('now')),
|
||||
('https', 'yqz7oikk5fyxhyy32lyy3bkwcfw4rh2o5i77wuwslqll24g3bgd44iid.onion', 18081, 'https://yqz7oikk5fyxhyy32lyy3bkwcfw4rh2o5i77wuwslqll24g3bgd44iid.onion:18081', 'mainnet', datetime('now'));
|
||||
|
||||
-- Insert default stagenet bootstrap nodes
|
||||
INSERT OR IGNORE INTO monero_nodes (scheme, host, port, full_url, network, first_seen_at) VALUES
|
||||
('http', 'stagenet.xmr-tw.org', 38081, 'http://stagenet.xmr-tw.org:38081', 'stagenet', datetime('now')),
|
||||
('https', 'node.monerodevs.org', 38089, 'https://node.monerodevs.org:38089', 'stagenet', datetime('now')),
|
||||
('https', 'node2.monerodevs.org', 38089, 'https://node2.monerodevs.org:38089', 'stagenet', datetime('now')),
|
||||
('https', 'node3.monerodevs.org', 38089, 'https://node3.monerodevs.org:38089', 'stagenet', datetime('now')),
|
||||
('https', 'xmr-lux.boldsuck.org', 38081, 'https://xmr-lux.boldsuck.org:38081', 'stagenet', datetime('now')),
|
||||
('http', 'plowsofe6cleftfmk2raiw5h2x66atrik3nja4bfd3zrfa2hdlgworad.onion', 38089, 'http://plowsofe6cleftfmk2raiw5h2x66atrik3nja4bfd3zrfa2hdlgworad.onion:38089', 'stagenet', datetime('now')),
|
||||
('http', 'plowsoffjexmxalw73tkjmf422gq6575fc7vicuu4javzn2ynnte6tyd.onion', 38089, 'http://plowsoffjexmxalw73tkjmf422gq6575fc7vicuu4javzn2ynnte6tyd.onion:38089', 'stagenet', datetime('now')),
|
||||
('https', 'stagenet.xmr.ditatompel.com', 38081, 'https://stagenet.xmr.ditatompel.com:38081', 'stagenet', datetime('now'));
|
56
monero-rpc-pool/regenerate_sqlx_cache.sh
Executable file
56
monero-rpc-pool/regenerate_sqlx_cache.sh
Executable file
|
@ -0,0 +1,56 @@
|
|||
#!/bin/bash
|
||||
|
||||
# regenerate_sqlx_cache.sh
|
||||
#
|
||||
# Script to regenerate SQLx query cache for monero-rpc-pool
|
||||
#
|
||||
# This script:
|
||||
# 1. Creates a temporary SQLite database in a temp directory
|
||||
# 2. Runs all database migrations to set up the schema
|
||||
# 3. Regenerates the SQLx query cache (.sqlx directory)
|
||||
# 4. Cleans up temporary files automatically
|
||||
#
|
||||
# Usage:
|
||||
# ./regenerate_sqlx_cache.sh
|
||||
#
|
||||
# Requirements:
|
||||
# - cargo and sqlx-cli must be installed
|
||||
# - Must be run from the monero-rpc-pool directory
|
||||
# - migrations/ directory must exist with valid migration files
|
||||
#
|
||||
# The generated .sqlx directory should be committed to version control
|
||||
# to enable offline compilation without requiring DATABASE_URL.
|
||||
|
||||
set -e # Exit on any error
|
||||
|
||||
echo "🔄 Regenerating SQLx query cache..."
|
||||
|
||||
# Create a temporary directory for the database
|
||||
TEMP_DIR=$(mktemp -d)
|
||||
TEMP_DB="$TEMP_DIR/temp_sqlx_cache.sqlite"
|
||||
DATABASE_URL="sqlite:$TEMP_DB"
|
||||
|
||||
echo "📁 Using temporary database: $TEMP_DB"
|
||||
|
||||
# Function to cleanup on exit
|
||||
cleanup() {
|
||||
echo "🧹 Cleaning up temporary files..."
|
||||
rm -rf "$TEMP_DIR"
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
# Export DATABASE_URL for sqlx commands
|
||||
export DATABASE_URL
|
||||
|
||||
echo "🗄️ Creating database..."
|
||||
cargo sqlx database create
|
||||
|
||||
echo "🔄 Running migrations..."
|
||||
cargo sqlx migrate run
|
||||
|
||||
echo "⚡ Preparing SQLx query cache..."
|
||||
cargo sqlx prepare
|
||||
|
||||
echo "✅ SQLx query cache regenerated successfully!"
|
||||
echo "📝 The .sqlx directory has been updated with the latest query metadata."
|
||||
echo "💡 Make sure to commit the .sqlx directory to version control."
|
27
monero-rpc-pool/src/config.rs
Normal file
27
monero-rpc-pool/src/config.rs
Normal file
|
@ -0,0 +1,27 @@
|
|||
use serde::{Deserialize, Serialize};
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Config {
|
||||
pub host: String,
|
||||
pub port: u16,
|
||||
pub data_dir: PathBuf,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
pub fn new_with_port(host: String, port: u16, data_dir: PathBuf) -> Self {
|
||||
Self {
|
||||
host,
|
||||
port,
|
||||
data_dir,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_random_port(host: String, data_dir: PathBuf) -> Self {
|
||||
Self {
|
||||
host,
|
||||
port: 0,
|
||||
data_dir,
|
||||
}
|
||||
}
|
||||
}
|
947
monero-rpc-pool/src/database.rs
Normal file
947
monero-rpc-pool/src/database.rs
Normal file
|
@ -0,0 +1,947 @@
|
|||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::Result;
|
||||
use dirs::data_dir;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sqlx::SqlitePool;
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, sqlx::FromRow)]
|
||||
pub struct MoneroNode {
|
||||
pub id: Option<i64>,
|
||||
pub scheme: String, // http or https
|
||||
pub host: String,
|
||||
pub port: i64,
|
||||
pub full_url: String,
|
||||
pub network: String, // mainnet, stagenet, or testnet - always known at insertion time
|
||||
pub first_seen_at: String, // ISO 8601 timestamp when first discovered
|
||||
// Computed fields from health_checks (not stored in monero_nodes table)
|
||||
#[sqlx(default)]
|
||||
pub success_count: i64,
|
||||
#[sqlx(default)]
|
||||
pub failure_count: i64,
|
||||
#[sqlx(default)]
|
||||
pub last_success: Option<String>,
|
||||
#[sqlx(default)]
|
||||
pub last_failure: Option<String>,
|
||||
#[sqlx(default)]
|
||||
pub last_checked: Option<String>,
|
||||
#[sqlx(default)]
|
||||
pub is_reliable: bool,
|
||||
#[sqlx(default)]
|
||||
pub avg_latency_ms: Option<f64>,
|
||||
#[sqlx(default)]
|
||||
pub min_latency_ms: Option<f64>,
|
||||
#[sqlx(default)]
|
||||
pub max_latency_ms: Option<f64>,
|
||||
#[sqlx(default)]
|
||||
pub last_latency_ms: Option<f64>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, sqlx::FromRow)]
|
||||
pub struct HealthCheck {
|
||||
pub id: Option<i64>,
|
||||
pub node_id: i64,
|
||||
pub timestamp: String, // ISO 8601 timestamp
|
||||
pub was_successful: bool,
|
||||
pub latency_ms: Option<f64>,
|
||||
}
|
||||
|
||||
impl MoneroNode {
|
||||
pub fn new(scheme: String, host: String, port: i64, network: String) -> Self {
|
||||
let full_url = format!("{}://{}:{}", scheme, host, port);
|
||||
let now = chrono::Utc::now().to_rfc3339();
|
||||
Self {
|
||||
id: None,
|
||||
scheme,
|
||||
host,
|
||||
port,
|
||||
full_url,
|
||||
network,
|
||||
first_seen_at: now,
|
||||
// These are computed from health_checks
|
||||
success_count: 0,
|
||||
failure_count: 0,
|
||||
last_success: None,
|
||||
last_failure: None,
|
||||
last_checked: None,
|
||||
is_reliable: false,
|
||||
avg_latency_ms: None,
|
||||
min_latency_ms: None,
|
||||
max_latency_ms: None,
|
||||
last_latency_ms: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn success_rate(&self) -> f64 {
|
||||
let total = self.success_count + self.failure_count;
|
||||
if total == 0 {
|
||||
0.0
|
||||
} else {
|
||||
self.success_count as f64 / total as f64
|
||||
}
|
||||
}
|
||||
|
||||
pub fn reliability_score(&self) -> f64 {
|
||||
let success_rate = self.success_rate();
|
||||
let total_requests = self.success_count + self.failure_count;
|
||||
|
||||
// Weight success rate by total requests (more requests = more reliable data)
|
||||
let request_weight = (total_requests as f64).min(200.0) / 200.0;
|
||||
let mut score = success_rate * request_weight;
|
||||
|
||||
// Factor in latency - lower latency = higher score
|
||||
if let Some(avg_latency) = self.avg_latency_ms {
|
||||
// Normalize latency to 0-1 range (assuming 0-2000ms range)
|
||||
let latency_factor = 1.0 - (avg_latency.min(2000.0) / 2000.0);
|
||||
score = score * 0.8 + latency_factor * 0.2; // 80% success rate, 20% latency
|
||||
}
|
||||
|
||||
score
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Database {
|
||||
pub pool: SqlitePool,
|
||||
}
|
||||
|
||||
impl Database {
|
||||
pub async fn new() -> Result<Self> {
|
||||
let app_data_dir = get_app_data_dir()?;
|
||||
Self::new_with_data_dir(app_data_dir).await
|
||||
}
|
||||
|
||||
pub async fn new_with_data_dir(data_dir: PathBuf) -> Result<Self> {
|
||||
if !data_dir.exists() {
|
||||
std::fs::create_dir_all(&data_dir)?;
|
||||
info!("Created application data directory: {}", data_dir.display());
|
||||
}
|
||||
|
||||
let db_path = data_dir.join("nodes.db");
|
||||
info!("Using database at: {}", db_path.display());
|
||||
|
||||
let database_url = format!("sqlite:{}?mode=rwc", db_path.display());
|
||||
let pool = SqlitePool::connect(&database_url).await?;
|
||||
|
||||
let db = Self { pool };
|
||||
db.migrate().await?;
|
||||
|
||||
Ok(db)
|
||||
}
|
||||
|
||||
async fn migrate(&self) -> Result<()> {
|
||||
// Run sqlx migrations
|
||||
sqlx::migrate!("./migrations").run(&self.pool).await?;
|
||||
|
||||
info!("Database migration completed");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Insert a node if it doesn't exist, return the node_id
|
||||
pub async fn upsert_node(
|
||||
&self,
|
||||
scheme: &str,
|
||||
host: &str,
|
||||
port: i64,
|
||||
network: &str,
|
||||
) -> Result<i64> {
|
||||
let full_url = format!("{}://{}:{}", scheme, host, port);
|
||||
let now = chrono::Utc::now().to_rfc3339();
|
||||
|
||||
let result = sqlx::query!(
|
||||
r#"
|
||||
INSERT INTO monero_nodes (scheme, host, port, full_url, network, first_seen_at, updated_at)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?)
|
||||
ON CONFLICT(full_url) DO UPDATE SET
|
||||
network = excluded.network,
|
||||
updated_at = excluded.updated_at
|
||||
RETURNING id
|
||||
"#,
|
||||
scheme,
|
||||
host,
|
||||
port,
|
||||
full_url,
|
||||
network,
|
||||
now,
|
||||
now
|
||||
)
|
||||
.fetch_one(&self.pool)
|
||||
.await?;
|
||||
|
||||
Ok(result.id)
|
||||
}
|
||||
|
||||
/// Update a node's network after it has been identified
|
||||
pub async fn update_node_network(&self, url: &str, network: &str) -> Result<()> {
|
||||
let now = chrono::Utc::now().to_rfc3339();
|
||||
|
||||
let result = sqlx::query!(
|
||||
r#"
|
||||
UPDATE monero_nodes
|
||||
SET network = ?, updated_at = ?
|
||||
WHERE full_url = ?
|
||||
"#,
|
||||
network,
|
||||
now,
|
||||
url
|
||||
)
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
|
||||
if result.rows_affected() > 0 {
|
||||
debug!("Updated network for node {} to {}", url, network);
|
||||
} else {
|
||||
warn!("Failed to update network for node {}: not found", url);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Record a health check event
|
||||
pub async fn record_health_check(
|
||||
&self,
|
||||
url: &str,
|
||||
was_successful: bool,
|
||||
latency_ms: Option<f64>,
|
||||
) -> Result<()> {
|
||||
let now = chrono::Utc::now().to_rfc3339();
|
||||
|
||||
// First get the node_id
|
||||
let node_row = sqlx::query!("SELECT id FROM monero_nodes WHERE full_url = ?", url)
|
||||
.fetch_optional(&self.pool)
|
||||
.await?;
|
||||
|
||||
let node_id = match node_row {
|
||||
Some(row) => row.id,
|
||||
None => {
|
||||
warn!("Cannot record health check for unknown node: {}", url);
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
sqlx::query!(
|
||||
r#"
|
||||
INSERT INTO health_checks (node_id, timestamp, was_successful, latency_ms)
|
||||
VALUES (?, ?, ?, ?)
|
||||
"#,
|
||||
node_id,
|
||||
now,
|
||||
was_successful,
|
||||
latency_ms
|
||||
)
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get nodes that have been identified (have network set)
|
||||
pub async fn get_identified_nodes(&self, network: &str) -> Result<Vec<MoneroNode>> {
|
||||
let rows = sqlx::query!(
|
||||
r#"
|
||||
SELECT
|
||||
n.id as "id!: i64",
|
||||
n.scheme,
|
||||
n.host,
|
||||
n.port,
|
||||
n.full_url,
|
||||
n.network,
|
||||
n.first_seen_at,
|
||||
CAST(COALESCE(stats.success_count, 0) AS INTEGER) as "success_count!: i64",
|
||||
CAST(COALESCE(stats.failure_count, 0) AS INTEGER) as "failure_count!: i64",
|
||||
stats.last_success as "last_success?: String",
|
||||
stats.last_failure as "last_failure?: String",
|
||||
stats.last_checked as "last_checked?: String",
|
||||
CAST(CASE WHEN reliable_nodes.node_id IS NOT NULL THEN 1 ELSE 0 END AS INTEGER) as "is_reliable!: i64",
|
||||
stats.avg_latency_ms as "avg_latency_ms?: f64",
|
||||
stats.min_latency_ms as "min_latency_ms?: f64",
|
||||
stats.max_latency_ms as "max_latency_ms?: f64",
|
||||
stats.last_latency_ms as "last_latency_ms?: f64"
|
||||
FROM monero_nodes n
|
||||
LEFT JOIN (
|
||||
SELECT
|
||||
node_id,
|
||||
SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,
|
||||
SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,
|
||||
MAX(CASE WHEN was_successful THEN timestamp END) as last_success,
|
||||
MAX(CASE WHEN NOT was_successful THEN timestamp END) as last_failure,
|
||||
MAX(timestamp) as last_checked,
|
||||
AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms,
|
||||
MIN(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as min_latency_ms,
|
||||
MAX(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as max_latency_ms,
|
||||
(SELECT latency_ms FROM health_checks hc2 WHERE hc2.node_id = health_checks.node_id ORDER BY timestamp DESC LIMIT 1) as last_latency_ms
|
||||
FROM health_checks
|
||||
GROUP BY node_id
|
||||
) stats ON n.id = stats.node_id
|
||||
LEFT JOIN (
|
||||
SELECT DISTINCT node_id FROM (
|
||||
SELECT
|
||||
n2.id as node_id,
|
||||
COALESCE(s2.success_count, 0) as success_count,
|
||||
COALESCE(s2.failure_count, 0) as failure_count,
|
||||
s2.avg_latency_ms,
|
||||
(CAST(COALESCE(s2.success_count, 0) AS REAL) / CAST(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0) AS REAL)) *
|
||||
(MIN(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0), 200) / 200.0) * 0.8 +
|
||||
CASE
|
||||
WHEN s2.avg_latency_ms IS NOT NULL THEN (1.0 - (MIN(s2.avg_latency_ms, 2000) / 2000.0)) * 0.2
|
||||
ELSE 0.0
|
||||
END as reliability_score
|
||||
FROM monero_nodes n2
|
||||
LEFT JOIN (
|
||||
SELECT
|
||||
node_id,
|
||||
SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,
|
||||
SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,
|
||||
AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms
|
||||
FROM health_checks
|
||||
GROUP BY node_id
|
||||
) s2 ON n2.id = s2.node_id
|
||||
WHERE n2.network = ? AND (COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0)) > 0
|
||||
ORDER BY reliability_score DESC
|
||||
LIMIT 4
|
||||
)
|
||||
) reliable_nodes ON n.id = reliable_nodes.node_id
|
||||
WHERE n.network = ?
|
||||
ORDER BY stats.avg_latency_ms ASC, stats.success_count DESC
|
||||
"#,
|
||||
network,
|
||||
network
|
||||
)
|
||||
.fetch_all(&self.pool)
|
||||
.await?;
|
||||
|
||||
let nodes: Vec<MoneroNode> = rows
|
||||
.into_iter()
|
||||
.map(|row| MoneroNode {
|
||||
id: Some(row.id),
|
||||
scheme: row.scheme,
|
||||
host: row.host,
|
||||
port: row.port,
|
||||
full_url: row.full_url,
|
||||
network: row.network,
|
||||
first_seen_at: row.first_seen_at,
|
||||
success_count: row.success_count,
|
||||
failure_count: row.failure_count,
|
||||
last_success: row.last_success,
|
||||
last_failure: row.last_failure,
|
||||
last_checked: row.last_checked,
|
||||
is_reliable: row.is_reliable != 0,
|
||||
avg_latency_ms: row.avg_latency_ms,
|
||||
min_latency_ms: row.min_latency_ms,
|
||||
max_latency_ms: row.max_latency_ms,
|
||||
last_latency_ms: row.last_latency_ms,
|
||||
})
|
||||
.collect();
|
||||
|
||||
debug!(
|
||||
"Retrieved {} identified nodes for network {}",
|
||||
nodes.len(),
|
||||
network
|
||||
);
|
||||
Ok(nodes)
|
||||
}
|
||||
|
||||
/// Get reliable nodes (top 4 by reliability score)
|
||||
pub async fn get_reliable_nodes(&self, network: &str) -> Result<Vec<MoneroNode>> {
|
||||
let rows = sqlx::query!(
|
||||
r#"
|
||||
SELECT
|
||||
n.id as "id!: i64",
|
||||
n.scheme,
|
||||
n.host,
|
||||
n.port,
|
||||
n.full_url,
|
||||
n.network,
|
||||
n.first_seen_at,
|
||||
CAST(COALESCE(stats.success_count, 0) AS INTEGER) as "success_count!: i64",
|
||||
CAST(COALESCE(stats.failure_count, 0) AS INTEGER) as "failure_count!: i64",
|
||||
stats.last_success as "last_success?: String",
|
||||
stats.last_failure as "last_failure?: String",
|
||||
stats.last_checked as "last_checked?: String",
|
||||
CAST(1 AS INTEGER) as "is_reliable!: i64",
|
||||
stats.avg_latency_ms as "avg_latency_ms?: f64",
|
||||
stats.min_latency_ms as "min_latency_ms?: f64",
|
||||
stats.max_latency_ms as "max_latency_ms?: f64",
|
||||
stats.last_latency_ms as "last_latency_ms?: f64"
|
||||
FROM monero_nodes n
|
||||
LEFT JOIN (
|
||||
SELECT
|
||||
node_id,
|
||||
SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,
|
||||
SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,
|
||||
MAX(CASE WHEN was_successful THEN timestamp END) as last_success,
|
||||
MAX(CASE WHEN NOT was_successful THEN timestamp END) as last_failure,
|
||||
MAX(timestamp) as last_checked,
|
||||
AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms,
|
||||
MIN(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as min_latency_ms,
|
||||
MAX(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as max_latency_ms,
|
||||
(SELECT latency_ms FROM health_checks hc2 WHERE hc2.node_id = health_checks.node_id ORDER BY timestamp DESC LIMIT 1) as last_latency_ms
|
||||
FROM health_checks
|
||||
GROUP BY node_id
|
||||
) stats ON n.id = stats.node_id
|
||||
WHERE n.network = ? AND (COALESCE(stats.success_count, 0) + COALESCE(stats.failure_count, 0)) > 0
|
||||
ORDER BY
|
||||
(CAST(COALESCE(stats.success_count, 0) AS REAL) / CAST(COALESCE(stats.success_count, 0) + COALESCE(stats.failure_count, 0) AS REAL)) *
|
||||
(MIN(COALESCE(stats.success_count, 0) + COALESCE(stats.failure_count, 0), 200) / 200.0) * 0.8 +
|
||||
CASE
|
||||
WHEN stats.avg_latency_ms IS NOT NULL THEN (1.0 - (MIN(stats.avg_latency_ms, 2000) / 2000.0)) * 0.2
|
||||
ELSE 0.0
|
||||
END DESC
|
||||
LIMIT 4
|
||||
"#,
|
||||
network
|
||||
)
|
||||
.fetch_all(&self.pool)
|
||||
.await?;
|
||||
|
||||
let nodes = rows
|
||||
.into_iter()
|
||||
.map(|row| MoneroNode {
|
||||
id: Some(row.id),
|
||||
scheme: row.scheme,
|
||||
host: row.host,
|
||||
port: row.port,
|
||||
full_url: row.full_url,
|
||||
network: row.network,
|
||||
first_seen_at: row.first_seen_at,
|
||||
success_count: row.success_count,
|
||||
failure_count: row.failure_count,
|
||||
last_success: row.last_success,
|
||||
last_failure: row.last_failure,
|
||||
last_checked: row.last_checked,
|
||||
is_reliable: true,
|
||||
avg_latency_ms: row.avg_latency_ms,
|
||||
min_latency_ms: row.min_latency_ms,
|
||||
max_latency_ms: row.max_latency_ms,
|
||||
last_latency_ms: row.last_latency_ms,
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(nodes)
|
||||
}
|
||||
|
||||
/// Get node statistics for a network
|
||||
pub async fn get_node_stats(&self, network: &str) -> Result<(i64, i64, i64)> {
|
||||
let row = sqlx::query!(
|
||||
r#"
|
||||
SELECT
|
||||
COUNT(*) as total,
|
||||
CAST(SUM(CASE WHEN stats.success_count > 0 THEN 1 ELSE 0 END) AS INTEGER) as "reachable!: i64",
|
||||
CAST((SELECT COUNT(*) FROM (
|
||||
SELECT n2.id
|
||||
FROM monero_nodes n2
|
||||
LEFT JOIN (
|
||||
SELECT
|
||||
node_id,
|
||||
SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,
|
||||
SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,
|
||||
AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms
|
||||
FROM health_checks
|
||||
GROUP BY node_id
|
||||
) s2 ON n2.id = s2.node_id
|
||||
WHERE n2.network = ? AND (COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0)) > 0
|
||||
ORDER BY
|
||||
(CAST(COALESCE(s2.success_count, 0) AS REAL) / CAST(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0) AS REAL)) *
|
||||
(MIN(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0), 200) / 200.0) * 0.8 +
|
||||
CASE
|
||||
WHEN s2.avg_latency_ms IS NOT NULL THEN (1.0 - (MIN(s2.avg_latency_ms, 2000) / 2000.0)) * 0.2
|
||||
ELSE 0.0
|
||||
END DESC
|
||||
LIMIT 4
|
||||
)) AS INTEGER) as "reliable!: i64"
|
||||
FROM monero_nodes n
|
||||
LEFT JOIN (
|
||||
SELECT
|
||||
node_id,
|
||||
SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,
|
||||
SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count
|
||||
FROM health_checks
|
||||
GROUP BY node_id
|
||||
) stats ON n.id = stats.node_id
|
||||
WHERE n.network = ?
|
||||
"#,
|
||||
network,
|
||||
network
|
||||
)
|
||||
.fetch_one(&self.pool)
|
||||
.await?;
|
||||
|
||||
let total = row.total;
|
||||
let reachable = row.reachable;
|
||||
let reliable = row.reliable;
|
||||
|
||||
Ok((total, reachable, reliable))
|
||||
}
|
||||
|
||||
/// Get health check statistics for a network
|
||||
pub async fn get_health_check_stats(&self, network: &str) -> Result<(u64, u64)> {
|
||||
let row = sqlx::query!(
|
||||
r#"
|
||||
SELECT
|
||||
CAST(SUM(CASE WHEN hc.was_successful THEN 1 ELSE 0 END) AS INTEGER) as "successful!: i64",
|
||||
CAST(SUM(CASE WHEN NOT hc.was_successful THEN 1 ELSE 0 END) AS INTEGER) as "unsuccessful!: i64"
|
||||
FROM health_checks hc
|
||||
JOIN monero_nodes n ON hc.node_id = n.id
|
||||
WHERE n.network = ?
|
||||
"#,
|
||||
network
|
||||
)
|
||||
.fetch_one(&self.pool)
|
||||
.await?;
|
||||
|
||||
let successful = row.successful as u64;
|
||||
let unsuccessful = row.unsuccessful as u64;
|
||||
|
||||
Ok((successful, unsuccessful))
|
||||
}
|
||||
|
||||
/// Get top nodes based on recent success rate and latency
|
||||
pub async fn get_top_nodes_by_recent_success(
|
||||
&self,
|
||||
network: &str,
|
||||
_recent_checks_limit: i64,
|
||||
limit: i64,
|
||||
) -> Result<Vec<MoneroNode>> {
|
||||
let rows = sqlx::query!(
|
||||
r#"
|
||||
SELECT
|
||||
n.id as "id!: i64",
|
||||
n.scheme,
|
||||
n.host,
|
||||
n.port,
|
||||
n.full_url,
|
||||
n.network,
|
||||
n.first_seen_at,
|
||||
CAST(COALESCE(stats.success_count, 0) AS INTEGER) as "success_count!: i64",
|
||||
CAST(COALESCE(stats.failure_count, 0) AS INTEGER) as "failure_count!: i64",
|
||||
stats.last_success as "last_success?: String",
|
||||
stats.last_failure as "last_failure?: String",
|
||||
stats.last_checked as "last_checked?: String",
|
||||
CAST(CASE WHEN reliable_nodes.node_id IS NOT NULL THEN 1 ELSE 0 END AS INTEGER) as "is_reliable!: i64",
|
||||
stats.avg_latency_ms as "avg_latency_ms?: f64",
|
||||
stats.min_latency_ms as "min_latency_ms?: f64",
|
||||
stats.max_latency_ms as "max_latency_ms?: f64",
|
||||
stats.last_latency_ms as "last_latency_ms?: f64"
|
||||
FROM monero_nodes n
|
||||
LEFT JOIN (
|
||||
SELECT
|
||||
node_id,
|
||||
SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,
|
||||
SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,
|
||||
MAX(CASE WHEN was_successful THEN timestamp END) as last_success,
|
||||
MAX(CASE WHEN NOT was_successful THEN timestamp END) as last_failure,
|
||||
MAX(timestamp) as last_checked,
|
||||
AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms,
|
||||
MIN(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as min_latency_ms,
|
||||
MAX(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as max_latency_ms,
|
||||
(SELECT latency_ms FROM health_checks hc2 WHERE hc2.node_id = health_checks.node_id ORDER BY timestamp DESC LIMIT 1) as last_latency_ms
|
||||
FROM health_checks
|
||||
GROUP BY node_id
|
||||
) stats ON n.id = stats.node_id
|
||||
LEFT JOIN (
|
||||
SELECT DISTINCT node_id FROM (
|
||||
SELECT
|
||||
n2.id as node_id,
|
||||
COALESCE(s2.success_count, 0) as success_count,
|
||||
COALESCE(s2.failure_count, 0) as failure_count,
|
||||
s2.avg_latency_ms,
|
||||
(CAST(COALESCE(s2.success_count, 0) AS REAL) / CAST(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0) AS REAL)) *
|
||||
(MIN(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0), 200) / 200.0) * 0.8 +
|
||||
CASE
|
||||
WHEN s2.avg_latency_ms IS NOT NULL THEN (1.0 - (MIN(s2.avg_latency_ms, 2000) / 2000.0)) * 0.2
|
||||
ELSE 0.0
|
||||
END as reliability_score
|
||||
FROM monero_nodes n2
|
||||
LEFT JOIN (
|
||||
SELECT
|
||||
node_id,
|
||||
SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,
|
||||
SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,
|
||||
AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms
|
||||
FROM health_checks
|
||||
GROUP BY node_id
|
||||
) s2 ON n2.id = s2.node_id
|
||||
WHERE n2.network = ? AND (COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0)) > 0
|
||||
ORDER BY reliability_score DESC
|
||||
LIMIT 4
|
||||
)
|
||||
) reliable_nodes ON n.id = reliable_nodes.node_id
|
||||
WHERE n.network = ? AND (COALESCE(stats.success_count, 0) + COALESCE(stats.failure_count, 0)) > 0
|
||||
ORDER BY
|
||||
(CAST(COALESCE(stats.success_count, 0) AS REAL) / CAST(COALESCE(stats.success_count, 0) + COALESCE(stats.failure_count, 0) AS REAL)) DESC,
|
||||
stats.avg_latency_ms ASC
|
||||
LIMIT ?
|
||||
"#,
|
||||
network,
|
||||
network,
|
||||
limit
|
||||
)
|
||||
.fetch_all(&self.pool)
|
||||
.await?;
|
||||
|
||||
let nodes = rows
|
||||
.into_iter()
|
||||
.map(|row| MoneroNode {
|
||||
id: Some(row.id),
|
||||
scheme: row.scheme,
|
||||
host: row.host,
|
||||
port: row.port,
|
||||
full_url: row.full_url,
|
||||
network: row.network,
|
||||
first_seen_at: row.first_seen_at,
|
||||
success_count: row.success_count,
|
||||
failure_count: row.failure_count,
|
||||
last_success: row.last_success,
|
||||
last_failure: row.last_failure,
|
||||
last_checked: row.last_checked,
|
||||
is_reliable: row.is_reliable != 0,
|
||||
avg_latency_ms: row.avg_latency_ms,
|
||||
min_latency_ms: row.min_latency_ms,
|
||||
max_latency_ms: row.max_latency_ms,
|
||||
last_latency_ms: row.last_latency_ms,
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(nodes)
|
||||
}
|
||||
|
||||
/// Get identified nodes that have at least one successful health check
|
||||
pub async fn get_identified_nodes_with_success(
|
||||
&self,
|
||||
network: &str,
|
||||
) -> Result<Vec<MoneroNode>> {
|
||||
let rows = sqlx::query!(
|
||||
r#"
|
||||
SELECT
|
||||
n.id as "id!: i64",
|
||||
n.scheme,
|
||||
n.host,
|
||||
n.port,
|
||||
n.full_url,
|
||||
n.network,
|
||||
n.first_seen_at,
|
||||
CAST(COALESCE(stats.success_count, 0) AS INTEGER) as "success_count!: i64",
|
||||
CAST(COALESCE(stats.failure_count, 0) AS INTEGER) as "failure_count!: i64",
|
||||
stats.last_success as "last_success?: String",
|
||||
stats.last_failure as "last_failure?: String",
|
||||
stats.last_checked as "last_checked?: String",
|
||||
CAST(CASE WHEN reliable_nodes.node_id IS NOT NULL THEN 1 ELSE 0 END AS INTEGER) as "is_reliable!: i64",
|
||||
stats.avg_latency_ms as "avg_latency_ms?: f64",
|
||||
stats.min_latency_ms as "min_latency_ms?: f64",
|
||||
stats.max_latency_ms as "max_latency_ms?: f64",
|
||||
stats.last_latency_ms as "last_latency_ms?: f64"
|
||||
FROM monero_nodes n
|
||||
LEFT JOIN (
|
||||
SELECT
|
||||
node_id,
|
||||
SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,
|
||||
SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,
|
||||
MAX(CASE WHEN was_successful THEN timestamp END) as last_success,
|
||||
MAX(CASE WHEN NOT was_successful THEN timestamp END) as last_failure,
|
||||
MAX(timestamp) as last_checked,
|
||||
AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms,
|
||||
MIN(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as min_latency_ms,
|
||||
MAX(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as max_latency_ms,
|
||||
(SELECT latency_ms FROM health_checks hc2 WHERE hc2.node_id = health_checks.node_id ORDER BY timestamp DESC LIMIT 1) as last_latency_ms
|
||||
FROM health_checks
|
||||
GROUP BY node_id
|
||||
) stats ON n.id = stats.node_id
|
||||
LEFT JOIN (
|
||||
SELECT DISTINCT node_id FROM (
|
||||
SELECT
|
||||
n2.id as node_id,
|
||||
COALESCE(s2.success_count, 0) as success_count,
|
||||
COALESCE(s2.failure_count, 0) as failure_count,
|
||||
s2.avg_latency_ms,
|
||||
(CAST(COALESCE(s2.success_count, 0) AS REAL) / CAST(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0) AS REAL)) *
|
||||
(MIN(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0), 200) / 200.0) * 0.8 +
|
||||
CASE
|
||||
WHEN s2.avg_latency_ms IS NOT NULL THEN (1.0 - (MIN(s2.avg_latency_ms, 2000) / 2000.0)) * 0.2
|
||||
ELSE 0.0
|
||||
END as reliability_score
|
||||
FROM monero_nodes n2
|
||||
LEFT JOIN (
|
||||
SELECT
|
||||
node_id,
|
||||
SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,
|
||||
SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,
|
||||
AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms
|
||||
FROM health_checks
|
||||
GROUP BY node_id
|
||||
) s2 ON n2.id = s2.node_id
|
||||
WHERE n2.network = ? AND (COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0)) > 0
|
||||
ORDER BY reliability_score DESC
|
||||
LIMIT 4
|
||||
)
|
||||
) reliable_nodes ON n.id = reliable_nodes.node_id
|
||||
WHERE n.network = ? AND stats.success_count > 0
|
||||
ORDER BY stats.avg_latency_ms ASC, stats.success_count DESC
|
||||
"#,
|
||||
network,
|
||||
network
|
||||
)
|
||||
.fetch_all(&self.pool)
|
||||
.await?;
|
||||
|
||||
let nodes: Vec<MoneroNode> = rows
|
||||
.into_iter()
|
||||
.map(|row| MoneroNode {
|
||||
id: Some(row.id),
|
||||
scheme: row.scheme,
|
||||
host: row.host,
|
||||
port: row.port,
|
||||
full_url: row.full_url,
|
||||
network: row.network,
|
||||
first_seen_at: row.first_seen_at,
|
||||
success_count: row.success_count,
|
||||
failure_count: row.failure_count,
|
||||
last_success: row.last_success,
|
||||
last_failure: row.last_failure,
|
||||
last_checked: row.last_checked,
|
||||
is_reliable: row.is_reliable != 0,
|
||||
avg_latency_ms: row.avg_latency_ms,
|
||||
min_latency_ms: row.min_latency_ms,
|
||||
max_latency_ms: row.max_latency_ms,
|
||||
last_latency_ms: row.last_latency_ms,
|
||||
})
|
||||
.collect();
|
||||
|
||||
debug!(
|
||||
"Retrieved {} identified nodes with success for network {}",
|
||||
nodes.len(),
|
||||
network
|
||||
);
|
||||
Ok(nodes)
|
||||
}
|
||||
|
||||
/// Get random nodes for the specified network, excluding specific IDs
|
||||
pub async fn get_random_nodes(
|
||||
&self,
|
||||
network: &str,
|
||||
limit: i64,
|
||||
exclude_ids: &[i64],
|
||||
) -> Result<Vec<MoneroNode>> {
|
||||
if exclude_ids.is_empty() {
|
||||
let rows = sqlx::query!(
|
||||
r#"
|
||||
SELECT
|
||||
n.id as "id!: i64",
|
||||
n.scheme,
|
||||
n.host,
|
||||
n.port,
|
||||
n.full_url,
|
||||
n.network,
|
||||
n.first_seen_at,
|
||||
CAST(COALESCE(stats.success_count, 0) AS INTEGER) as "success_count!: i64",
|
||||
CAST(COALESCE(stats.failure_count, 0) AS INTEGER) as "failure_count!: i64",
|
||||
stats.last_success as "last_success?: String",
|
||||
stats.last_failure as "last_failure?: String",
|
||||
stats.last_checked as "last_checked?: String",
|
||||
CAST(CASE WHEN reliable_nodes.node_id IS NOT NULL THEN 1 ELSE 0 END AS INTEGER) as "is_reliable!: i64",
|
||||
stats.avg_latency_ms as "avg_latency_ms?: f64",
|
||||
stats.min_latency_ms as "min_latency_ms?: f64",
|
||||
stats.max_latency_ms as "max_latency_ms?: f64",
|
||||
stats.last_latency_ms as "last_latency_ms?: f64"
|
||||
FROM monero_nodes n
|
||||
LEFT JOIN (
|
||||
SELECT
|
||||
node_id,
|
||||
SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,
|
||||
SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,
|
||||
MAX(CASE WHEN was_successful THEN timestamp END) as last_success,
|
||||
MAX(CASE WHEN NOT was_successful THEN timestamp END) as last_failure,
|
||||
MAX(timestamp) as last_checked,
|
||||
AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms,
|
||||
MIN(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as min_latency_ms,
|
||||
MAX(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as max_latency_ms,
|
||||
(SELECT latency_ms FROM health_checks hc2 WHERE hc2.node_id = health_checks.node_id ORDER BY timestamp DESC LIMIT 1) as last_latency_ms
|
||||
FROM health_checks
|
||||
GROUP BY node_id
|
||||
) stats ON n.id = stats.node_id
|
||||
LEFT JOIN (
|
||||
SELECT DISTINCT node_id FROM (
|
||||
SELECT
|
||||
n2.id as node_id,
|
||||
COALESCE(s2.success_count, 0) as success_count,
|
||||
COALESCE(s2.failure_count, 0) as failure_count,
|
||||
s2.avg_latency_ms,
|
||||
(CAST(COALESCE(s2.success_count, 0) AS REAL) / CAST(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0) AS REAL)) *
|
||||
(MIN(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0), 200) / 200.0) * 0.8 +
|
||||
CASE
|
||||
WHEN s2.avg_latency_ms IS NOT NULL THEN (1.0 - (MIN(s2.avg_latency_ms, 2000) / 2000.0)) * 0.2
|
||||
ELSE 0.0
|
||||
END as reliability_score
|
||||
FROM monero_nodes n2
|
||||
LEFT JOIN (
|
||||
SELECT
|
||||
node_id,
|
||||
SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,
|
||||
SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,
|
||||
AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms
|
||||
FROM health_checks
|
||||
GROUP BY node_id
|
||||
) s2 ON n2.id = s2.node_id
|
||||
WHERE n2.network = ? AND (COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0)) > 0
|
||||
ORDER BY reliability_score DESC
|
||||
LIMIT 4
|
||||
)
|
||||
) reliable_nodes ON n.id = reliable_nodes.node_id
|
||||
WHERE n.network = ?
|
||||
ORDER BY RANDOM()
|
||||
LIMIT ?
|
||||
"#,
|
||||
network,
|
||||
network,
|
||||
limit
|
||||
)
|
||||
.fetch_all(&self.pool)
|
||||
.await?;
|
||||
|
||||
return Ok(rows
|
||||
.into_iter()
|
||||
.map(|row| MoneroNode {
|
||||
id: Some(row.id),
|
||||
scheme: row.scheme,
|
||||
host: row.host,
|
||||
port: row.port,
|
||||
full_url: row.full_url,
|
||||
network: row.network,
|
||||
first_seen_at: row.first_seen_at,
|
||||
success_count: row.success_count,
|
||||
failure_count: row.failure_count,
|
||||
last_success: row.last_success,
|
||||
last_failure: row.last_failure,
|
||||
last_checked: row.last_checked,
|
||||
is_reliable: row.is_reliable != 0,
|
||||
avg_latency_ms: row.avg_latency_ms,
|
||||
min_latency_ms: row.min_latency_ms,
|
||||
max_latency_ms: row.max_latency_ms,
|
||||
last_latency_ms: row.last_latency_ms,
|
||||
})
|
||||
.collect());
|
||||
}
|
||||
|
||||
// If exclude_ids is not empty, we need to handle it differently
|
||||
// For now, get all nodes and filter in Rust (can be optimized with dynamic SQL)
|
||||
let fetch_limit = limit + exclude_ids.len() as i64 + 10; // Get extra to account for exclusions
|
||||
let all_rows = sqlx::query!(
|
||||
r#"
|
||||
SELECT
|
||||
n.id as "id!: i64",
|
||||
n.scheme,
|
||||
n.host,
|
||||
n.port,
|
||||
n.full_url,
|
||||
n.network,
|
||||
n.first_seen_at,
|
||||
CAST(COALESCE(stats.success_count, 0) AS INTEGER) as "success_count!: i64",
|
||||
CAST(COALESCE(stats.failure_count, 0) AS INTEGER) as "failure_count!: i64",
|
||||
stats.last_success as "last_success?: String",
|
||||
stats.last_failure as "last_failure?: String",
|
||||
stats.last_checked as "last_checked?: String",
|
||||
CAST(CASE WHEN reliable_nodes.node_id IS NOT NULL THEN 1 ELSE 0 END AS INTEGER) as "is_reliable!: i64",
|
||||
stats.avg_latency_ms as "avg_latency_ms?: f64",
|
||||
stats.min_latency_ms as "min_latency_ms?: f64",
|
||||
stats.max_latency_ms as "max_latency_ms?: f64",
|
||||
stats.last_latency_ms as "last_latency_ms?: f64"
|
||||
FROM monero_nodes n
|
||||
LEFT JOIN (
|
||||
SELECT
|
||||
node_id,
|
||||
SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,
|
||||
SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,
|
||||
MAX(CASE WHEN was_successful THEN timestamp END) as last_success,
|
||||
MAX(CASE WHEN NOT was_successful THEN timestamp END) as last_failure,
|
||||
MAX(timestamp) as last_checked,
|
||||
AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms,
|
||||
MIN(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as min_latency_ms,
|
||||
MAX(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as max_latency_ms,
|
||||
(SELECT latency_ms FROM health_checks hc2 WHERE hc2.node_id = health_checks.node_id ORDER BY timestamp DESC LIMIT 1) as last_latency_ms
|
||||
FROM health_checks
|
||||
GROUP BY node_id
|
||||
) stats ON n.id = stats.node_id
|
||||
LEFT JOIN (
|
||||
SELECT DISTINCT node_id FROM (
|
||||
SELECT
|
||||
n2.id as node_id,
|
||||
COALESCE(s2.success_count, 0) as success_count,
|
||||
COALESCE(s2.failure_count, 0) as failure_count,
|
||||
s2.avg_latency_ms,
|
||||
(CAST(COALESCE(s2.success_count, 0) AS REAL) / CAST(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0) AS REAL)) *
|
||||
(MIN(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0), 200) / 200.0) * 0.8 +
|
||||
CASE
|
||||
WHEN s2.avg_latency_ms IS NOT NULL THEN (1.0 - (MIN(s2.avg_latency_ms, 2000) / 2000.0)) * 0.2
|
||||
ELSE 0.0
|
||||
END as reliability_score
|
||||
FROM monero_nodes n2
|
||||
LEFT JOIN (
|
||||
SELECT
|
||||
node_id,
|
||||
SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,
|
||||
SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,
|
||||
AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms
|
||||
FROM health_checks
|
||||
GROUP BY node_id
|
||||
) s2 ON n2.id = s2.node_id
|
||||
WHERE n2.network = ? AND (COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0)) > 0
|
||||
ORDER BY reliability_score DESC
|
||||
LIMIT 4
|
||||
)
|
||||
) reliable_nodes ON n.id = reliable_nodes.node_id
|
||||
WHERE n.network = ?
|
||||
ORDER BY RANDOM()
|
||||
LIMIT ?
|
||||
"#,
|
||||
network,
|
||||
network,
|
||||
fetch_limit
|
||||
)
|
||||
.fetch_all(&self.pool)
|
||||
.await?;
|
||||
|
||||
// Convert exclude_ids to a HashSet for O(1) lookup
|
||||
let exclude_set: std::collections::HashSet<i64> = exclude_ids.iter().cloned().collect();
|
||||
|
||||
let nodes: Vec<MoneroNode> = all_rows
|
||||
.into_iter()
|
||||
.filter(|row| !exclude_set.contains(&row.id))
|
||||
.take(limit as usize)
|
||||
.map(|row| MoneroNode {
|
||||
id: Some(row.id),
|
||||
scheme: row.scheme,
|
||||
host: row.host,
|
||||
port: row.port,
|
||||
full_url: row.full_url,
|
||||
network: row.network,
|
||||
first_seen_at: row.first_seen_at,
|
||||
success_count: row.success_count,
|
||||
failure_count: row.failure_count,
|
||||
last_success: row.last_success,
|
||||
last_failure: row.last_failure,
|
||||
last_checked: row.last_checked,
|
||||
is_reliable: row.is_reliable != 0,
|
||||
avg_latency_ms: row.avg_latency_ms,
|
||||
min_latency_ms: row.min_latency_ms,
|
||||
max_latency_ms: row.max_latency_ms,
|
||||
last_latency_ms: row.last_latency_ms,
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(nodes)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_app_data_dir() -> Result<PathBuf> {
|
||||
let base_dir =
|
||||
data_dir().ok_or_else(|| anyhow::anyhow!("Could not determine system data directory"))?;
|
||||
|
||||
let app_dir = base_dir.join("monero-rpc-pool");
|
||||
|
||||
if !app_dir.exists() {
|
||||
std::fs::create_dir_all(&app_dir)?;
|
||||
info!("Created application data directory: {}", app_dir.display());
|
||||
}
|
||||
|
||||
Ok(app_dir)
|
||||
}
|
383
monero-rpc-pool/src/discovery.rs
Normal file
383
monero-rpc-pool/src/discovery.rs
Normal file
|
@ -0,0 +1,383 @@
|
|||
use std::collections::HashSet;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use anyhow::Result;
|
||||
use monero::Network;
|
||||
use rand::seq::SliceRandom;
|
||||
use reqwest::Client;
|
||||
use serde::Deserialize;
|
||||
use serde_json::Value;
|
||||
use tracing::{error, info, warn};
|
||||
use url;
|
||||
|
||||
use crate::database::Database;
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct MoneroFailResponse {
|
||||
monero: MoneroNodes,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct MoneroNodes {
|
||||
clear: Vec<String>,
|
||||
#[serde(default)]
|
||||
web_compatible: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct HealthCheckOutcome {
|
||||
pub was_successful: bool,
|
||||
pub latency: Duration,
|
||||
pub discovered_network: Option<Network>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct NodeDiscovery {
|
||||
client: Client,
|
||||
db: Database,
|
||||
}
|
||||
|
||||
fn network_to_string(network: &Network) -> String {
|
||||
match network {
|
||||
Network::Mainnet => "mainnet".to_string(),
|
||||
Network::Stagenet => "stagenet".to_string(),
|
||||
Network::Testnet => "testnet".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
impl NodeDiscovery {
|
||||
pub fn new(db: Database) -> Result<Self> {
|
||||
let client = Client::builder()
|
||||
.timeout(Duration::from_secs(10))
|
||||
.user_agent("monero-rpc-pool/1.0")
|
||||
.build()
|
||||
.map_err(|e| anyhow::anyhow!("Failed to build HTTP client: {}", e))?;
|
||||
|
||||
Ok(Self { client, db })
|
||||
}
|
||||
|
||||
/// Fetch nodes from monero.fail API
|
||||
pub async fn fetch_mainnet_nodes_from_api(&self) -> Result<Vec<String>> {
|
||||
let url = "https://monero.fail/nodes.json?chain=monero";
|
||||
|
||||
let response = self
|
||||
.client
|
||||
.get(url)
|
||||
.timeout(Duration::from_secs(30))
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
return Err(anyhow::anyhow!("HTTP error: {}", response.status()));
|
||||
}
|
||||
|
||||
let monero_fail_response: MoneroFailResponse = response.json().await?;
|
||||
|
||||
// Combine clear and web_compatible nodes
|
||||
let mut nodes = monero_fail_response.monero.web_compatible;
|
||||
nodes.extend(monero_fail_response.monero.clear);
|
||||
|
||||
// Remove duplicates using HashSet for O(n) complexity
|
||||
let mut seen = HashSet::new();
|
||||
let mut unique_nodes = Vec::new();
|
||||
for node in nodes {
|
||||
if seen.insert(node.clone()) {
|
||||
unique_nodes.push(node);
|
||||
}
|
||||
}
|
||||
|
||||
// Shuffle nodes in random order
|
||||
let mut rng = rand::thread_rng();
|
||||
unique_nodes.shuffle(&mut rng);
|
||||
|
||||
info!(
|
||||
"Fetched {} mainnet nodes from monero.fail API",
|
||||
unique_nodes.len()
|
||||
);
|
||||
Ok(unique_nodes)
|
||||
}
|
||||
|
||||
/// Fetch nodes from monero.fail API and discover from other sources
|
||||
pub async fn discover_nodes_from_sources(&self, target_network: Network) -> Result<()> {
|
||||
// Only fetch from external sources for mainnet to avoid polluting test networks
|
||||
if target_network == Network::Mainnet {
|
||||
match self.fetch_mainnet_nodes_from_api().await {
|
||||
Ok(nodes) => {
|
||||
self.discover_and_insert_nodes(target_network, nodes)
|
||||
.await?;
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Failed to fetch nodes from monero.fail API: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Enhanced health check that detects network and validates node identity
|
||||
pub async fn check_node_health(&self, url: &str) -> Result<HealthCheckOutcome> {
|
||||
let start_time = Instant::now();
|
||||
|
||||
let rpc_request = serde_json::json!({
|
||||
"jsonrpc": "2.0",
|
||||
"id": "0",
|
||||
"method": "get_info"
|
||||
});
|
||||
|
||||
let full_url = format!("{}/json_rpc", url);
|
||||
let response = self.client.post(&full_url).json(&rpc_request).send().await;
|
||||
|
||||
let latency = start_time.elapsed();
|
||||
|
||||
match response {
|
||||
Ok(resp) => {
|
||||
if resp.status().is_success() {
|
||||
match resp.json::<Value>().await {
|
||||
Ok(json) => {
|
||||
if let Some(result) = json.get("result") {
|
||||
// Extract network information from get_info response
|
||||
let discovered_network = self.extract_network_from_info(result);
|
||||
|
||||
Ok(HealthCheckOutcome {
|
||||
was_successful: true,
|
||||
latency,
|
||||
discovered_network,
|
||||
})
|
||||
} else {
|
||||
Ok(HealthCheckOutcome {
|
||||
was_successful: false,
|
||||
latency,
|
||||
discovered_network: None,
|
||||
})
|
||||
}
|
||||
}
|
||||
Err(_e) => Ok(HealthCheckOutcome {
|
||||
was_successful: false,
|
||||
latency,
|
||||
discovered_network: None,
|
||||
}),
|
||||
}
|
||||
} else {
|
||||
Ok(HealthCheckOutcome {
|
||||
was_successful: false,
|
||||
latency,
|
||||
discovered_network: None,
|
||||
})
|
||||
}
|
||||
}
|
||||
Err(_e) => Ok(HealthCheckOutcome {
|
||||
was_successful: false,
|
||||
latency,
|
||||
discovered_network: None,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
/// Extract network type from get_info response
|
||||
fn extract_network_from_info(&self, info_result: &Value) -> Option<Network> {
|
||||
// Check nettype field (0 = mainnet, 1 = testnet, 2 = stagenet)
|
||||
if let Some(nettype) = info_result.get("nettype").and_then(|v| v.as_u64()) {
|
||||
return match nettype {
|
||||
0 => Some(Network::Mainnet),
|
||||
1 => Some(Network::Testnet),
|
||||
2 => Some(Network::Stagenet),
|
||||
_ => None,
|
||||
};
|
||||
}
|
||||
|
||||
// Fallback: check if testnet or stagenet is mentioned in fields
|
||||
if let Some(testnet) = info_result.get("testnet").and_then(|v| v.as_bool()) {
|
||||
return if testnet {
|
||||
Some(Network::Testnet)
|
||||
} else {
|
||||
Some(Network::Mainnet)
|
||||
};
|
||||
}
|
||||
|
||||
// Additional heuristics could be added here
|
||||
None
|
||||
}
|
||||
|
||||
/// Updated health check workflow with identification and validation logic
|
||||
pub async fn health_check_all_nodes(&self, target_network: Network) -> Result<()> {
|
||||
info!(
|
||||
"Starting health check for all nodes targeting network: {}",
|
||||
network_to_string(&target_network)
|
||||
);
|
||||
|
||||
// Get all nodes from database with proper field mapping
|
||||
let all_nodes = sqlx::query!(
|
||||
r#"
|
||||
SELECT
|
||||
id as "id!: i64",
|
||||
scheme,
|
||||
host,
|
||||
port,
|
||||
full_url,
|
||||
network as "network!: String",
|
||||
first_seen_at
|
||||
FROM monero_nodes
|
||||
ORDER BY id
|
||||
"#
|
||||
)
|
||||
.fetch_all(&self.db.pool)
|
||||
.await?;
|
||||
|
||||
let mut checked_count = 0;
|
||||
let mut healthy_count = 0;
|
||||
let mut corrected_count = 0;
|
||||
|
||||
for node in all_nodes {
|
||||
match self.check_node_health(&node.full_url).await {
|
||||
Ok(outcome) => {
|
||||
// Always record the health check
|
||||
self.db
|
||||
.record_health_check(
|
||||
&node.full_url,
|
||||
outcome.was_successful,
|
||||
if outcome.was_successful {
|
||||
Some(outcome.latency.as_millis() as f64)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
if outcome.was_successful {
|
||||
healthy_count += 1;
|
||||
|
||||
// Validate network consistency
|
||||
if let Some(discovered_network) = outcome.discovered_network {
|
||||
let discovered_network_str = network_to_string(&discovered_network);
|
||||
if node.network != discovered_network_str {
|
||||
warn!("Network mismatch detected for node {}: stored={}, discovered={}. Correcting...",
|
||||
node.full_url, node.network, discovered_network_str);
|
||||
self.db
|
||||
.update_node_network(&node.full_url, &discovered_network_str)
|
||||
.await?;
|
||||
corrected_count += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
checked_count += 1;
|
||||
}
|
||||
Err(_e) => {
|
||||
self.db
|
||||
.record_health_check(&node.full_url, false, None)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
||||
// Small delay to avoid hammering nodes
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
}
|
||||
|
||||
info!(
|
||||
"Health check completed: {}/{} nodes healthy, {} corrected",
|
||||
healthy_count, checked_count, corrected_count
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Periodic discovery task with improved error handling
|
||||
pub async fn periodic_discovery_task(&self, target_network: Network) -> Result<()> {
|
||||
let mut interval = tokio::time::interval(Duration::from_secs(3600)); // Every hour
|
||||
|
||||
loop {
|
||||
interval.tick().await;
|
||||
|
||||
info!(
|
||||
"Running periodic node discovery for network: {}",
|
||||
network_to_string(&target_network)
|
||||
);
|
||||
|
||||
// Discover new nodes from sources
|
||||
if let Err(e) = self.discover_nodes_from_sources(target_network).await {
|
||||
error!("Failed to discover nodes: {}", e);
|
||||
}
|
||||
|
||||
// Health check all nodes (will identify networks automatically)
|
||||
if let Err(e) = self.health_check_all_nodes(target_network).await {
|
||||
error!("Failed to perform health check: {}", e);
|
||||
}
|
||||
|
||||
// Log stats for all networks
|
||||
for network in &[Network::Mainnet, Network::Stagenet, Network::Testnet] {
|
||||
let network_str = network_to_string(network);
|
||||
if let Ok((total, reachable, reliable)) = self.db.get_node_stats(&network_str).await
|
||||
{
|
||||
if total > 0 {
|
||||
info!(
|
||||
"Node stats for {}: {} total, {} reachable, {} reliable",
|
||||
network_str, total, reachable, reliable
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Insert configured nodes for a specific network
|
||||
pub async fn discover_and_insert_nodes(
|
||||
&self,
|
||||
target_network: Network,
|
||||
nodes: Vec<String>,
|
||||
) -> Result<()> {
|
||||
let mut success_count = 0;
|
||||
let mut error_count = 0;
|
||||
let target_network_str = network_to_string(&target_network);
|
||||
|
||||
for node_url in nodes.iter() {
|
||||
if let Ok(url) = url::Url::parse(node_url) {
|
||||
let scheme = url.scheme();
|
||||
|
||||
// Validate scheme - must be http or https
|
||||
if !matches!(scheme, "http" | "https") {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Validate host - must be non-empty
|
||||
let Some(host) = url.host_str() else {
|
||||
continue;
|
||||
};
|
||||
if host.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Validate port - must be present
|
||||
let Some(port) = url.port() else {
|
||||
continue;
|
||||
};
|
||||
let port = port as i64;
|
||||
|
||||
match self
|
||||
.db
|
||||
.upsert_node(scheme, host, port, &target_network_str)
|
||||
.await
|
||||
{
|
||||
Ok(_) => {
|
||||
success_count += 1;
|
||||
}
|
||||
Err(e) => {
|
||||
error_count += 1;
|
||||
error!(
|
||||
"Failed to insert configured node {}://{}:{}: {}",
|
||||
scheme, host, port, e
|
||||
);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
error_count += 1;
|
||||
error!("Failed to parse node URL: {}", node_url);
|
||||
}
|
||||
}
|
||||
|
||||
info!(
|
||||
"Configured node insertion complete: {} successful, {} errors",
|
||||
success_count, error_count
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
218
monero-rpc-pool/src/lib.rs
Normal file
218
monero-rpc-pool/src/lib.rs
Normal file
|
@ -0,0 +1,218 @@
|
|||
use std::sync::Arc;
|
||||
|
||||
use anyhow::Result;
|
||||
use axum::{
|
||||
routing::{any, get},
|
||||
Router,
|
||||
};
|
||||
use monero::Network;
|
||||
use tokio::sync::RwLock;
|
||||
use tokio::task::JoinHandle;
|
||||
use tower_http::cors::CorsLayer;
|
||||
use tracing::{error, info};
|
||||
|
||||
fn network_to_string(network: &Network) -> String {
|
||||
match network {
|
||||
Network::Mainnet => "mainnet".to_string(),
|
||||
Network::Stagenet => "stagenet".to_string(),
|
||||
Network::Testnet => "testnet".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
pub mod config;
|
||||
pub mod database;
|
||||
pub mod discovery;
|
||||
pub mod pool;
|
||||
pub mod simple_handlers;
|
||||
|
||||
use config::Config;
|
||||
use database::Database;
|
||||
use discovery::NodeDiscovery;
|
||||
use pool::{NodePool, PoolStatus};
|
||||
use simple_handlers::{simple_proxy_handler, simple_stats_handler};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct AppState {
|
||||
pub node_pool: Arc<RwLock<NodePool>>,
|
||||
}
|
||||
|
||||
/// Manages background tasks for the RPC pool
|
||||
pub struct TaskManager {
|
||||
pub status_update_handle: JoinHandle<()>,
|
||||
pub discovery_handle: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl Drop for TaskManager {
|
||||
fn drop(&mut self) {
|
||||
self.status_update_handle.abort();
|
||||
self.discovery_handle.abort();
|
||||
}
|
||||
}
|
||||
|
||||
/// Information about a running RPC pool server
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ServerInfo {
|
||||
pub port: u16,
|
||||
pub host: String,
|
||||
}
|
||||
|
||||
async fn create_app_with_receiver(
|
||||
config: Config,
|
||||
network: Network,
|
||||
) -> Result<(
|
||||
Router,
|
||||
tokio::sync::broadcast::Receiver<PoolStatus>,
|
||||
TaskManager,
|
||||
)> {
|
||||
// Initialize database
|
||||
let db = Database::new_with_data_dir(config.data_dir.clone()).await?;
|
||||
|
||||
// Initialize node pool with network
|
||||
let network_str = network_to_string(&network);
|
||||
let (node_pool, status_receiver) = NodePool::new(db.clone(), network_str.clone());
|
||||
let node_pool = Arc::new(RwLock::new(node_pool));
|
||||
|
||||
// Initialize discovery service
|
||||
let discovery = NodeDiscovery::new(db.clone())?;
|
||||
|
||||
// Start background tasks
|
||||
let node_pool_for_health_check = node_pool.clone();
|
||||
let status_update_handle = tokio::spawn(async move {
|
||||
loop {
|
||||
// Publish status update after health check
|
||||
let pool_guard = node_pool_for_health_check.read().await;
|
||||
if let Err(e) = pool_guard.publish_status_update().await {
|
||||
error!("Failed to publish status update after health check: {}", e);
|
||||
}
|
||||
|
||||
tokio::time::sleep(std::time::Duration::from_secs(10)).await;
|
||||
}
|
||||
});
|
||||
|
||||
// Start periodic discovery task
|
||||
let discovery_clone = discovery.clone();
|
||||
let network_clone = network;
|
||||
let discovery_handle = tokio::spawn(async move {
|
||||
if let Err(e) = discovery_clone.periodic_discovery_task(network_clone).await {
|
||||
error!(
|
||||
"Periodic discovery task failed for network {}: {}",
|
||||
network_to_string(&network_clone),
|
||||
e
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
let task_manager = TaskManager {
|
||||
status_update_handle,
|
||||
discovery_handle,
|
||||
};
|
||||
|
||||
let app_state = AppState { node_pool };
|
||||
|
||||
// Build the app
|
||||
let app = Router::new()
|
||||
.route("/stats", get(simple_stats_handler))
|
||||
.route("/*path", any(simple_proxy_handler))
|
||||
.layer(CorsLayer::permissive())
|
||||
.with_state(app_state);
|
||||
|
||||
Ok((app, status_receiver, task_manager))
|
||||
}
|
||||
|
||||
pub async fn create_app(config: Config, network: Network) -> Result<Router> {
|
||||
let (app, _, _task_manager) = create_app_with_receiver(config, network).await?;
|
||||
// Note: task_manager is dropped here, so tasks will be aborted when this function returns
|
||||
// This is intentional for the simple create_app use case
|
||||
Ok(app)
|
||||
}
|
||||
|
||||
/// Create an app with a custom data directory for the database
|
||||
pub async fn create_app_with_data_dir(
|
||||
config: Config,
|
||||
network: Network,
|
||||
data_dir: std::path::PathBuf,
|
||||
) -> Result<Router> {
|
||||
let config_with_data_dir = Config::new_with_port(config.host, config.port, data_dir);
|
||||
create_app(config_with_data_dir, network).await
|
||||
}
|
||||
|
||||
pub async fn run_server(config: Config, network: Network) -> Result<()> {
|
||||
let app = create_app(config.clone(), network).await?;
|
||||
|
||||
let bind_address = format!("{}:{}", config.host, config.port);
|
||||
info!("Starting server on {}", bind_address);
|
||||
|
||||
let listener = tokio::net::TcpListener::bind(&bind_address).await?;
|
||||
info!("Server listening on {}", bind_address);
|
||||
|
||||
axum::serve(listener, app).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Run a server with a custom data directory
|
||||
pub async fn run_server_with_data_dir(
|
||||
config: Config,
|
||||
network: Network,
|
||||
data_dir: std::path::PathBuf,
|
||||
) -> Result<()> {
|
||||
let config_with_data_dir = Config::new_with_port(config.host, config.port, data_dir);
|
||||
run_server(config_with_data_dir, network).await
|
||||
}
|
||||
|
||||
/// Start a server with a random port for library usage
|
||||
/// Returns the server info with the actual port used, a receiver for pool status updates, and task manager
|
||||
pub async fn start_server_with_random_port(
|
||||
config: Config,
|
||||
network: Network,
|
||||
) -> Result<(
|
||||
ServerInfo,
|
||||
tokio::sync::broadcast::Receiver<PoolStatus>,
|
||||
TaskManager,
|
||||
)> {
|
||||
// Clone the host before moving config
|
||||
let host = config.host.clone();
|
||||
|
||||
// If port is 0, the system will assign a random available port
|
||||
let config_with_random_port = Config::new_random_port(config.host, config.data_dir);
|
||||
|
||||
let (app, status_receiver, task_manager) =
|
||||
create_app_with_receiver(config_with_random_port, network).await?;
|
||||
|
||||
// Bind to port 0 to get a random available port
|
||||
let listener = tokio::net::TcpListener::bind(format!("{}:0", host)).await?;
|
||||
let actual_addr = listener.local_addr()?;
|
||||
|
||||
let server_info = ServerInfo {
|
||||
port: actual_addr.port(),
|
||||
host: host.clone(),
|
||||
};
|
||||
|
||||
info!(
|
||||
"Started server on {}:{} (random port)",
|
||||
server_info.host, server_info.port
|
||||
);
|
||||
|
||||
// Start the server in a background task
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = axum::serve(listener, app).await {
|
||||
error!("Server error: {}", e);
|
||||
}
|
||||
});
|
||||
|
||||
Ok((server_info, status_receiver, task_manager))
|
||||
}
|
||||
|
||||
/// Start a server with a random port and custom data directory for library usage
|
||||
/// Returns the server info with the actual port used, a receiver for pool status updates, and task manager
|
||||
pub async fn start_server_with_random_port_and_data_dir(
|
||||
config: Config,
|
||||
network: Network,
|
||||
data_dir: std::path::PathBuf,
|
||||
) -> Result<(
|
||||
ServerInfo,
|
||||
tokio::sync::broadcast::Receiver<PoolStatus>,
|
||||
TaskManager,
|
||||
)> {
|
||||
let config_with_data_dir = Config::new_random_port(config.host, data_dir);
|
||||
start_server_with_random_port(config_with_data_dir, network).await
|
||||
}
|
177
monero-rpc-pool/src/main.rs
Normal file
177
monero-rpc-pool/src/main.rs
Normal file
|
@ -0,0 +1,177 @@
|
|||
use clap::Parser;
|
||||
use tracing::{info, warn};
|
||||
use tracing_subscriber::{self, EnvFilter};
|
||||
|
||||
use monero_rpc_pool::database::Database;
|
||||
use monero_rpc_pool::discovery::NodeDiscovery;
|
||||
use monero_rpc_pool::{config::Config, run_server};
|
||||
|
||||
use monero::Network;
|
||||
|
||||
fn parse_network(s: &str) -> Result<Network, String> {
|
||||
match s.to_lowercase().as_str() {
|
||||
"mainnet" => Ok(Network::Mainnet),
|
||||
"stagenet" => Ok(Network::Stagenet),
|
||||
"testnet" => Ok(Network::Testnet),
|
||||
_ => Err(format!(
|
||||
"Invalid network: {}. Must be mainnet, stagenet, or testnet",
|
||||
s
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
fn network_to_string(network: &Network) -> String {
|
||||
match network {
|
||||
Network::Mainnet => "mainnet".to_string(),
|
||||
Network::Stagenet => "stagenet".to_string(),
|
||||
Network::Testnet => "testnet".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(name = "monero-rpc-pool")]
|
||||
#[command(about = "A load-balancing HTTP proxy for Monero RPC nodes")]
|
||||
#[command(version)]
|
||||
struct Args {
|
||||
#[arg(long, default_value = "127.0.0.1")]
|
||||
#[arg(help = "Host address to bind the server to")]
|
||||
host: String,
|
||||
|
||||
#[arg(short, long, default_value = "18081")]
|
||||
#[arg(help = "Port to bind the server to")]
|
||||
port: u16,
|
||||
|
||||
#[arg(long, value_delimiter = ',')]
|
||||
#[arg(help = "Comma-separated list of Monero node URLs (overrides network-based discovery)")]
|
||||
nodes: Option<Vec<String>>,
|
||||
|
||||
#[arg(short, long, default_value = "mainnet")]
|
||||
#[arg(help = "Network to use for automatic node discovery")]
|
||||
#[arg(value_parser = parse_network)]
|
||||
network: Network,
|
||||
|
||||
#[arg(short, long)]
|
||||
#[arg(help = "Enable verbose logging")]
|
||||
verbose: bool,
|
||||
}
|
||||
|
||||
// Custom filter function that overrides log levels for our crate
|
||||
fn create_level_override_filter(base_filter: &str) -> EnvFilter {
|
||||
// Parse the base filter and modify it to treat all monero_rpc_pool logs as trace
|
||||
let mut filter = EnvFilter::new(base_filter);
|
||||
|
||||
// Add a directive that treats all levels from our crate as trace
|
||||
filter = filter.add_directive("monero_rpc_pool=trace".parse().unwrap());
|
||||
|
||||
filter
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let args = Args::parse();
|
||||
|
||||
// Create a filter that treats all logs from our crate as traces
|
||||
let base_filter = if args.verbose {
|
||||
// In verbose mode, show logs from other crates at WARN level
|
||||
"warn"
|
||||
} else {
|
||||
// In normal mode, show logs from other crates at ERROR level
|
||||
"error"
|
||||
};
|
||||
|
||||
let filter = create_level_override_filter(base_filter);
|
||||
|
||||
tracing_subscriber::fmt()
|
||||
.with_env_filter(filter)
|
||||
.with_target(false)
|
||||
.with_file(true)
|
||||
.with_line_number(true)
|
||||
.init();
|
||||
|
||||
// Store node count for later logging before potentially moving args.nodes
|
||||
let manual_node_count = args.nodes.as_ref().map(|nodes| nodes.len());
|
||||
|
||||
// Determine nodes to use and set up discovery
|
||||
let _nodes = if let Some(manual_nodes) = args.nodes {
|
||||
info!(
|
||||
"Using manually specified nodes for network: {}",
|
||||
network_to_string(&args.network)
|
||||
);
|
||||
|
||||
// Insert manual nodes into database with network information
|
||||
let db = Database::new().await?;
|
||||
let discovery = NodeDiscovery::new(db.clone())?;
|
||||
let mut parsed_nodes = Vec::new();
|
||||
|
||||
for node_url in &manual_nodes {
|
||||
// Parse the URL to extract components
|
||||
if let Ok(url) = url::Url::parse(node_url) {
|
||||
let scheme = url.scheme().to_string();
|
||||
let _protocol = if scheme == "https" { "ssl" } else { "tcp" };
|
||||
let host = url.host_str().unwrap_or("").to_string();
|
||||
let port = url
|
||||
.port()
|
||||
.unwrap_or(if scheme == "https" { 443 } else { 80 })
|
||||
as i64;
|
||||
|
||||
let full_url = format!("{}://{}:{}", scheme, host, port);
|
||||
|
||||
// Insert into database
|
||||
if let Err(e) = db
|
||||
.upsert_node(&scheme, &host, port, &network_to_string(&args.network))
|
||||
.await
|
||||
{
|
||||
warn!("Failed to insert manual node {}: {}", node_url, e);
|
||||
} else {
|
||||
parsed_nodes.push(full_url);
|
||||
}
|
||||
} else {
|
||||
warn!("Failed to parse manual node URL: {}", node_url);
|
||||
}
|
||||
}
|
||||
|
||||
// Use manual nodes for discovery
|
||||
discovery
|
||||
.discover_and_insert_nodes(args.network, manual_nodes)
|
||||
.await?;
|
||||
parsed_nodes
|
||||
} else {
|
||||
info!(
|
||||
"Setting up automatic node discovery for {} network",
|
||||
network_to_string(&args.network)
|
||||
);
|
||||
let db = Database::new().await?;
|
||||
let discovery = NodeDiscovery::new(db.clone())?;
|
||||
|
||||
// Start discovery process
|
||||
discovery.discover_nodes_from_sources(args.network).await?;
|
||||
Vec::new() // Return empty vec for consistency
|
||||
};
|
||||
|
||||
let config = Config::new_with_port(
|
||||
args.host,
|
||||
args.port,
|
||||
std::env::temp_dir().join("monero-rpc-pool"),
|
||||
);
|
||||
|
||||
let node_count_msg = if args.verbose {
|
||||
match manual_node_count {
|
||||
Some(count) => format!("{} manual nodes configured", count),
|
||||
None => "using automatic discovery".to_string(),
|
||||
}
|
||||
} else {
|
||||
"configured".to_string()
|
||||
};
|
||||
|
||||
info!(
|
||||
"Starting Monero RPC Pool\nConfiguration:\n Host: {}\n Port: {}\n Network: {}\n Nodes: {}",
|
||||
config.host, config.port, network_to_string(&args.network), node_count_msg
|
||||
);
|
||||
|
||||
if let Err(e) = run_server(config, args.network).await {
|
||||
eprintln!("Server error: {}", e);
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
270
monero-rpc-pool/src/pool.rs
Normal file
270
monero-rpc-pool/src/pool.rs
Normal file
|
@ -0,0 +1,270 @@
|
|||
use anyhow::{Context, Result};
|
||||
use rand::prelude::*;
|
||||
use tokio::sync::broadcast;
|
||||
use tracing::debug;
|
||||
use typeshare::typeshare;
|
||||
|
||||
use crate::database::Database;
|
||||
|
||||
#[derive(Debug, Clone, serde::Serialize)]
|
||||
#[typeshare]
|
||||
pub struct PoolStatus {
|
||||
pub total_node_count: u32,
|
||||
pub healthy_node_count: u32,
|
||||
#[typeshare(serialized_as = "number")]
|
||||
pub successful_health_checks: u64,
|
||||
#[typeshare(serialized_as = "number")]
|
||||
pub unsuccessful_health_checks: u64,
|
||||
pub top_reliable_nodes: Vec<ReliableNodeInfo>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, serde::Serialize)]
|
||||
#[typeshare]
|
||||
pub struct ReliableNodeInfo {
|
||||
pub url: String,
|
||||
pub success_rate: f64,
|
||||
pub avg_latency_ms: Option<f64>,
|
||||
}
|
||||
|
||||
pub struct NodePool {
|
||||
db: Database,
|
||||
network: String,
|
||||
status_sender: broadcast::Sender<PoolStatus>,
|
||||
}
|
||||
|
||||
impl NodePool {
|
||||
pub fn new(db: Database, network: String) -> (Self, broadcast::Receiver<PoolStatus>) {
|
||||
let (status_sender, status_receiver) = broadcast::channel(100);
|
||||
let pool = Self {
|
||||
db,
|
||||
network,
|
||||
status_sender,
|
||||
};
|
||||
(pool, status_receiver)
|
||||
}
|
||||
|
||||
/// Get next node using Power of Two Choices algorithm
|
||||
/// Only considers identified nodes (nodes with network set)
|
||||
pub async fn get_next_node(&self) -> Result<Option<String>> {
|
||||
let candidate_nodes = self.db.get_identified_nodes(&self.network).await?;
|
||||
|
||||
if candidate_nodes.is_empty() {
|
||||
debug!("No identified nodes available for network {}", self.network);
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
if candidate_nodes.len() == 1 {
|
||||
return Ok(Some(candidate_nodes[0].full_url.clone()));
|
||||
}
|
||||
|
||||
// Power of Two Choices: pick 2 random nodes, select the better one
|
||||
let mut rng = thread_rng();
|
||||
let node1 = candidate_nodes.choose(&mut rng).unwrap();
|
||||
let node2 = candidate_nodes.choose(&mut rng).unwrap();
|
||||
|
||||
let selected =
|
||||
if self.calculate_goodness_score(node1) >= self.calculate_goodness_score(node2) {
|
||||
node1
|
||||
} else {
|
||||
node2
|
||||
};
|
||||
|
||||
debug!(
|
||||
"Selected node using P2C for network {}: {}",
|
||||
self.network, selected.full_url
|
||||
);
|
||||
Ok(Some(selected.full_url.clone()))
|
||||
}
|
||||
|
||||
/// Calculate goodness score based on usage-based recency
|
||||
/// Score is a function of success rate and latency from last N health checks
|
||||
fn calculate_goodness_score(&self, node: &crate::database::MoneroNode) -> f64 {
|
||||
let total_checks = node.success_count + node.failure_count;
|
||||
if total_checks == 0 {
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
let success_rate = node.success_count as f64 / total_checks as f64;
|
||||
|
||||
// Weight by recency (more recent interactions = higher weight)
|
||||
let recency_weight = (total_checks as f64).min(200.0) / 200.0;
|
||||
let mut score = success_rate * recency_weight;
|
||||
|
||||
// Factor in latency - lower latency = higher score
|
||||
if let Some(avg_latency) = node.avg_latency_ms {
|
||||
let latency_factor = 1.0 - (avg_latency.min(2000.0) / 2000.0);
|
||||
score = score * 0.8 + latency_factor * 0.2; // 80% success rate, 20% latency
|
||||
}
|
||||
|
||||
score
|
||||
}
|
||||
|
||||
pub async fn record_success(&self, url: &str, latency_ms: f64) -> Result<()> {
|
||||
self.db
|
||||
.record_health_check(url, true, Some(latency_ms))
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn record_failure(&self, url: &str) -> Result<()> {
|
||||
self.db.record_health_check(url, false, None).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn publish_status_update(&self) -> Result<()> {
|
||||
let status = self.get_current_status().await?;
|
||||
let _ = self.status_sender.send(status); // Ignore if no receivers
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn get_current_status(&self) -> Result<PoolStatus> {
|
||||
let (total, reachable, _reliable) = self.db.get_node_stats(&self.network).await?;
|
||||
let reliable_nodes = self.db.get_reliable_nodes(&self.network).await?;
|
||||
let (successful_checks, unsuccessful_checks) =
|
||||
self.db.get_health_check_stats(&self.network).await?;
|
||||
|
||||
let top_reliable_nodes = reliable_nodes
|
||||
.into_iter()
|
||||
.take(5)
|
||||
.map(|node| ReliableNodeInfo {
|
||||
url: node.full_url.clone(),
|
||||
success_rate: node.success_rate(),
|
||||
avg_latency_ms: node.avg_latency_ms,
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(PoolStatus {
|
||||
total_node_count: total as u32,
|
||||
healthy_node_count: reachable as u32,
|
||||
successful_health_checks: successful_checks,
|
||||
unsuccessful_health_checks: unsuccessful_checks,
|
||||
top_reliable_nodes,
|
||||
})
|
||||
}
|
||||
|
||||
/// Get top reliable nodes with fill-up logic to ensure pool size
|
||||
/// First tries to get top nodes based on recent success, then fills up with random nodes
|
||||
pub async fn get_top_reliable_nodes(
|
||||
&self,
|
||||
limit: usize,
|
||||
) -> Result<Vec<crate::database::MoneroNode>> {
|
||||
debug!(
|
||||
"Getting top reliable nodes for network {} (target: {})",
|
||||
self.network, limit
|
||||
);
|
||||
|
||||
// Step 1: Try primary fetch - get top nodes based on recent success (last 200 health checks)
|
||||
let mut top_nodes = self
|
||||
.db
|
||||
.get_top_nodes_by_recent_success(&self.network, 200, limit as i64)
|
||||
.await
|
||||
.context("Failed to get top nodes by recent success")?;
|
||||
|
||||
debug!(
|
||||
"Primary fetch returned {} nodes for network {} (target: {})",
|
||||
top_nodes.len(),
|
||||
self.network,
|
||||
limit
|
||||
);
|
||||
|
||||
// Step 2: If primary fetch didn't return enough nodes, fall back to any identified nodes with successful health checks
|
||||
if top_nodes.len() < limit {
|
||||
debug!("Primary fetch returned insufficient nodes, falling back to any identified nodes with successful health checks");
|
||||
top_nodes = self
|
||||
.db
|
||||
.get_identified_nodes_with_success(&self.network)
|
||||
.await?;
|
||||
|
||||
debug!(
|
||||
"Fallback fetch returned {} nodes with successful health checks for network {}",
|
||||
top_nodes.len(),
|
||||
self.network
|
||||
);
|
||||
}
|
||||
|
||||
// Step 3: Check if we still don't have enough nodes
|
||||
if top_nodes.len() < limit {
|
||||
let needed = limit - top_nodes.len();
|
||||
debug!(
|
||||
"Pool needs {} more nodes to reach target of {} for network {}",
|
||||
needed, limit, self.network
|
||||
);
|
||||
|
||||
// Step 4: Collect exclusion IDs from nodes already selected
|
||||
let exclude_ids: Vec<i64> = top_nodes.iter().filter_map(|node| node.id).collect();
|
||||
|
||||
// Step 5: Secondary fetch - get random nodes to fill up
|
||||
let random_fillers = self
|
||||
.db
|
||||
.get_random_nodes(&self.network, needed as i64, &exclude_ids)
|
||||
.await?;
|
||||
|
||||
debug!(
|
||||
"Secondary fetch returned {} random nodes for network {}",
|
||||
random_fillers.len(),
|
||||
self.network
|
||||
);
|
||||
|
||||
// Step 6: Combine lists
|
||||
top_nodes.extend(random_fillers);
|
||||
}
|
||||
|
||||
debug!(
|
||||
"Final pool size: {} nodes for network {} (target: {})",
|
||||
top_nodes.len(),
|
||||
self.network,
|
||||
limit
|
||||
);
|
||||
|
||||
Ok(top_nodes)
|
||||
}
|
||||
|
||||
pub async fn get_pool_stats(&self) -> Result<PoolStats> {
|
||||
let (total, reachable, reliable) = self.db.get_node_stats(&self.network).await?;
|
||||
let reliable_nodes = self.db.get_reliable_nodes(&self.network).await?;
|
||||
|
||||
let avg_reliable_latency = if reliable_nodes.is_empty() {
|
||||
None
|
||||
} else {
|
||||
let total_latency: f64 = reliable_nodes
|
||||
.iter()
|
||||
.filter_map(|node| node.avg_latency_ms)
|
||||
.sum();
|
||||
let count = reliable_nodes
|
||||
.iter()
|
||||
.filter(|node| node.avg_latency_ms.is_some())
|
||||
.count();
|
||||
|
||||
if count > 0 {
|
||||
Some(total_latency / count as f64)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
Ok(PoolStats {
|
||||
total_nodes: total,
|
||||
reachable_nodes: reachable,
|
||||
reliable_nodes: reliable,
|
||||
avg_reliable_latency_ms: avg_reliable_latency,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct PoolStats {
|
||||
pub total_nodes: i64,
|
||||
pub reachable_nodes: i64,
|
||||
pub reliable_nodes: i64,
|
||||
pub avg_reliable_latency_ms: Option<f64>, // TOOD: Why is this an Option, we hate Options
|
||||
}
|
||||
|
||||
impl PoolStats {
|
||||
pub fn health_percentage(&self) -> f64 {
|
||||
if self.total_nodes == 0 {
|
||||
0.0
|
||||
} else {
|
||||
(self.reachable_nodes as f64 / self.total_nodes as f64) * 100.0
|
||||
}
|
||||
}
|
||||
}
|
579
monero-rpc-pool/src/simple_handlers.rs
Normal file
579
monero-rpc-pool/src/simple_handlers.rs
Normal file
|
@ -0,0 +1,579 @@
|
|||
use axum::{
|
||||
body::Body,
|
||||
extract::State,
|
||||
http::{HeaderMap, Method, StatusCode},
|
||||
response::Response,
|
||||
};
|
||||
use serde_json::json;
|
||||
use std::{error::Error, time::Instant};
|
||||
use tracing::{debug, error, info_span, Instrument};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::AppState;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
enum HandlerError {
|
||||
NoNodes,
|
||||
PoolError(String),
|
||||
RequestError(String),
|
||||
AllRequestsFailed(Vec<(String, String)>), // Vec of (node_url, error_message)
|
||||
}
|
||||
|
||||
impl std::fmt::Display for HandlerError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
HandlerError::NoNodes => write!(f, "No nodes available"),
|
||||
HandlerError::PoolError(msg) => write!(f, "Pool error: {}", msg),
|
||||
HandlerError::RequestError(msg) => write!(f, "Request error: {}", msg),
|
||||
HandlerError::AllRequestsFailed(errors) => {
|
||||
write!(f, "All requests failed: [")?;
|
||||
for (i, (node, error)) in errors.iter().enumerate() {
|
||||
if i > 0 {
|
||||
write!(f, ", ")?;
|
||||
}
|
||||
write!(f, "{}: {}", node, error)?;
|
||||
}
|
||||
write!(f, "]")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn is_jsonrpc_error(body: &[u8]) -> bool {
|
||||
// Try to parse as JSON
|
||||
if let Ok(json) = serde_json::from_slice::<serde_json::Value>(body) {
|
||||
// Check if there's an "error" field
|
||||
return json.get("error").is_some();
|
||||
}
|
||||
|
||||
// If we can't parse JSON, treat it as an error
|
||||
true
|
||||
}
|
||||
|
||||
fn extract_jsonrpc_method(body: &[u8]) -> Option<String> {
|
||||
if let Ok(json) = serde_json::from_slice::<serde_json::Value>(body) {
|
||||
if let Some(method) = json.get("method").and_then(|m| m.as_str()) {
|
||||
return Some(method.to_string());
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
async fn raw_http_request(
|
||||
node_url: &str,
|
||||
path: &str,
|
||||
method: &str,
|
||||
headers: &HeaderMap,
|
||||
body: Option<&[u8]>,
|
||||
) -> Result<Response, HandlerError> {
|
||||
let client = reqwest::Client::builder()
|
||||
.timeout(std::time::Duration::from_secs(30))
|
||||
.build()
|
||||
.map_err(|e| HandlerError::RequestError(e.to_string()))?;
|
||||
|
||||
let url = format!("{}{}", node_url, path);
|
||||
|
||||
// Use generic request method to support any HTTP verb
|
||||
let http_method = method
|
||||
.parse::<reqwest::Method>()
|
||||
.map_err(|e| HandlerError::RequestError(format!("Invalid method '{}': {}", method, e)))?;
|
||||
|
||||
let mut request_builder = client.request(http_method, &url);
|
||||
|
||||
// Forward body if present
|
||||
if let Some(body_bytes) = body {
|
||||
request_builder = request_builder.body(body_bytes.to_vec());
|
||||
}
|
||||
|
||||
// Forward essential headers
|
||||
for (name, value) in headers.iter() {
|
||||
let header_name = name.as_str();
|
||||
let header_name_lc = header_name.to_ascii_lowercase();
|
||||
|
||||
// Skip hop-by-hop headers and any body-related headers when we are **not** forwarding a body.
|
||||
let is_hop_by_hop = matches!(
|
||||
header_name_lc.as_str(),
|
||||
"host"
|
||||
| "connection"
|
||||
| "transfer-encoding"
|
||||
| "upgrade"
|
||||
| "proxy-authenticate"
|
||||
| "proxy-authorization"
|
||||
| "te"
|
||||
| "trailers"
|
||||
);
|
||||
|
||||
// If we are not forwarding a body (e.g. GET request) then forwarding `content-length` or
|
||||
// `content-type` with an absent body makes many Monero nodes hang waiting for bytes and
|
||||
// eventually close the connection. This manifests as the time-outs we have observed.
|
||||
let is_body_header_without_body =
|
||||
body.is_none() && matches!(header_name_lc.as_str(), "content-length" | "content-type");
|
||||
|
||||
if !is_hop_by_hop && !is_body_header_without_body {
|
||||
if let Ok(header_value) = std::str::from_utf8(value.as_bytes()) {
|
||||
request_builder = request_builder.header(header_name, header_value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let response = request_builder
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| HandlerError::RequestError(e.to_string()))?;
|
||||
|
||||
// Convert to axum Response preserving everything
|
||||
let status = response.status();
|
||||
let response_headers = response.headers().clone();
|
||||
|
||||
let body_bytes = response.bytes().await.map_err(|e| {
|
||||
let mut error_msg = format!("Failed to read response body: {}", e);
|
||||
if let Some(source) = e.source() {
|
||||
error_msg.push_str(&format!(" (source: {})", source));
|
||||
}
|
||||
|
||||
HandlerError::RequestError(error_msg)
|
||||
})?;
|
||||
|
||||
let mut axum_response = Response::new(Body::from(body_bytes));
|
||||
*axum_response.status_mut() =
|
||||
StatusCode::from_u16(status.as_u16()).unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
|
||||
|
||||
// Copy response headers exactly
|
||||
for (name, value) in response_headers.iter() {
|
||||
if let (Ok(header_name), Ok(header_value)) = (
|
||||
axum::http::HeaderName::try_from(name.as_str()),
|
||||
axum::http::HeaderValue::try_from(value.as_bytes()),
|
||||
) {
|
||||
axum_response
|
||||
.headers_mut()
|
||||
.insert(header_name, header_value);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(axum_response)
|
||||
}
|
||||
|
||||
async fn record_success(state: &AppState, node_url: &str, latency_ms: f64) {
|
||||
let node_pool_guard = state.node_pool.read().await;
|
||||
if let Err(e) = node_pool_guard.record_success(node_url, latency_ms).await {
|
||||
error!("Failed to record success for {}: {}", node_url, e);
|
||||
}
|
||||
}
|
||||
|
||||
async fn record_failure(state: &AppState, node_url: &str) {
|
||||
let node_pool_guard = state.node_pool.read().await;
|
||||
if let Err(e) = node_pool_guard.record_failure(node_url).await {
|
||||
error!("Failed to record failure for {}: {}", node_url, e);
|
||||
}
|
||||
}
|
||||
|
||||
async fn single_raw_request(
|
||||
state: &AppState,
|
||||
node_url: String,
|
||||
path: &str,
|
||||
method: &str,
|
||||
headers: &HeaderMap,
|
||||
body: Option<&[u8]>,
|
||||
) -> Result<(Response, String, f64), HandlerError> {
|
||||
let start_time = Instant::now();
|
||||
|
||||
match raw_http_request(&node_url, path, method, headers, body).await {
|
||||
Ok(response) => {
|
||||
let elapsed = start_time.elapsed();
|
||||
let latency_ms = elapsed.as_millis() as f64;
|
||||
|
||||
// Check HTTP status code - only 200 is success!
|
||||
if response.status().is_success() {
|
||||
// For JSON-RPC endpoints, also check for JSON-RPC errors
|
||||
if path == "/json_rpc" {
|
||||
let (parts, body_stream) = response.into_parts();
|
||||
let body_bytes = axum::body::to_bytes(body_stream, usize::MAX)
|
||||
.await
|
||||
.map_err(|e| HandlerError::RequestError(e.to_string()))?;
|
||||
|
||||
if is_jsonrpc_error(&body_bytes) {
|
||||
record_failure(state, &node_url).await;
|
||||
return Err(HandlerError::RequestError("JSON-RPC error".to_string()));
|
||||
}
|
||||
|
||||
// Reconstruct response with the body we consumed
|
||||
let response = Response::from_parts(parts, Body::from(body_bytes));
|
||||
record_success(state, &node_url, latency_ms).await;
|
||||
Ok((response, node_url, latency_ms))
|
||||
} else {
|
||||
// For non-JSON-RPC endpoints, HTTP success is enough
|
||||
record_success(state, &node_url, latency_ms).await;
|
||||
Ok((response, node_url, latency_ms))
|
||||
}
|
||||
} else {
|
||||
// Non-200 status codes are failures
|
||||
record_failure(state, &node_url).await;
|
||||
Err(HandlerError::RequestError(format!(
|
||||
"HTTP {}",
|
||||
response.status()
|
||||
)))
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
record_failure(state, &node_url).await;
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn race_requests(
|
||||
state: &AppState,
|
||||
path: &str,
|
||||
method: &str,
|
||||
headers: &HeaderMap,
|
||||
body: Option<&[u8]>,
|
||||
) -> Result<Response, HandlerError> {
|
||||
// Extract JSON-RPC method for better logging
|
||||
let jsonrpc_method = if path == "/json_rpc" {
|
||||
if let Some(body_data) = body {
|
||||
extract_jsonrpc_method(body_data)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
const POOL_SIZE: usize = 20;
|
||||
let mut tried_nodes = std::collections::HashSet::new();
|
||||
let mut pool_index = 0;
|
||||
let mut collected_errors: Vec<(String, String)> = Vec::new();
|
||||
|
||||
// Get the exclusive pool of 20 nodes once at the beginning
|
||||
let available_pool = {
|
||||
let node_pool_guard = state.node_pool.read().await;
|
||||
let reliable_nodes = node_pool_guard
|
||||
.get_top_reliable_nodes(POOL_SIZE)
|
||||
.await
|
||||
.map_err(|e| HandlerError::PoolError(e.to_string()))?;
|
||||
|
||||
let pool: Vec<String> = reliable_nodes
|
||||
.into_iter()
|
||||
.map(|node| node.full_url)
|
||||
.collect();
|
||||
|
||||
pool
|
||||
};
|
||||
|
||||
if available_pool.is_empty() {
|
||||
return Err(HandlerError::NoNodes);
|
||||
}
|
||||
|
||||
// Power of Two Choices within the exclusive pool
|
||||
while pool_index < available_pool.len() && tried_nodes.len() < POOL_SIZE {
|
||||
let mut node1_option = None;
|
||||
let mut node2_option = None;
|
||||
|
||||
// Select first untried node from pool
|
||||
for (i, node) in available_pool.iter().enumerate().skip(pool_index) {
|
||||
if !tried_nodes.contains(node) {
|
||||
node1_option = Some(node.clone());
|
||||
pool_index = i + 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Select second untried node from pool (different from first)
|
||||
for node in available_pool.iter().skip(pool_index) {
|
||||
if !tried_nodes.contains(node) && Some(node) != node1_option.as_ref() {
|
||||
node2_option = Some(node.clone());
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// If we can't get any new nodes from the pool, we've exhausted our options
|
||||
if node1_option.is_none() && node2_option.is_none() {
|
||||
break;
|
||||
}
|
||||
|
||||
// Store node URLs for error tracking before consuming them
|
||||
let current_nodes: Vec<String> = [&node1_option, &node2_option]
|
||||
.iter()
|
||||
.filter_map(|opt| opt.as_ref())
|
||||
.cloned()
|
||||
.collect();
|
||||
|
||||
let mut requests = Vec::new();
|
||||
|
||||
if let Some(node1) = node1_option {
|
||||
tried_nodes.insert(node1.clone());
|
||||
requests.push(single_raw_request(
|
||||
state,
|
||||
node1.clone(),
|
||||
path,
|
||||
method,
|
||||
headers,
|
||||
body,
|
||||
));
|
||||
}
|
||||
|
||||
if let Some(node2) = node2_option {
|
||||
tried_nodes.insert(node2.clone());
|
||||
requests.push(single_raw_request(
|
||||
state,
|
||||
node2.clone(),
|
||||
path,
|
||||
method,
|
||||
headers,
|
||||
body,
|
||||
));
|
||||
}
|
||||
|
||||
if requests.is_empty() {
|
||||
break;
|
||||
}
|
||||
|
||||
match &jsonrpc_method {
|
||||
Some(rpc_method) => debug!(
|
||||
"Racing {} requests to {} (JSON-RPC: {}): {} nodes (tried {} so far)",
|
||||
method,
|
||||
path,
|
||||
rpc_method,
|
||||
requests.len(),
|
||||
tried_nodes.len()
|
||||
),
|
||||
None => debug!(
|
||||
"Racing {} requests to {}: {} nodes (tried {} so far)",
|
||||
method,
|
||||
path,
|
||||
requests.len(),
|
||||
tried_nodes.len()
|
||||
),
|
||||
}
|
||||
|
||||
// Handle the requests based on how many we have
|
||||
let result = match requests.len() {
|
||||
1 => {
|
||||
// Only one request
|
||||
requests.into_iter().next().unwrap().await
|
||||
}
|
||||
2 => {
|
||||
// Two requests - race them
|
||||
let mut iter = requests.into_iter();
|
||||
let req1 = iter.next().unwrap();
|
||||
let req2 = iter.next().unwrap();
|
||||
|
||||
tokio::select! {
|
||||
result1 = req1 => result1,
|
||||
result2 = req2 => result2,
|
||||
}
|
||||
}
|
||||
_ => unreachable!("We only add 1 or 2 requests"),
|
||||
};
|
||||
|
||||
match result {
|
||||
Ok((response, winning_node, latency_ms)) => {
|
||||
match &jsonrpc_method {
|
||||
Some(rpc_method) => {
|
||||
debug!(
|
||||
"{} response from {} ({}ms) - SUCCESS after trying {} nodes! JSON-RPC: {}",
|
||||
method, winning_node, latency_ms, tried_nodes.len(), rpc_method
|
||||
)
|
||||
}
|
||||
None => debug!(
|
||||
"{} response from {} ({}ms) - SUCCESS after trying {} nodes!",
|
||||
method,
|
||||
winning_node,
|
||||
latency_ms,
|
||||
tried_nodes.len()
|
||||
),
|
||||
}
|
||||
record_success(state, &winning_node, latency_ms).await;
|
||||
return Ok(response);
|
||||
}
|
||||
Err(e) => {
|
||||
// Since we don't know which specific node failed in the race,
|
||||
// record the error for all nodes in this batch
|
||||
for node_url in ¤t_nodes {
|
||||
collected_errors.push((node_url.clone(), e.to_string()));
|
||||
}
|
||||
debug!(
|
||||
"Request failed: {} - retrying with different nodes from pool...",
|
||||
e
|
||||
);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Log detailed error information
|
||||
let detailed_errors: Vec<String> = collected_errors
|
||||
.iter()
|
||||
.map(|(node, error)| format!("{}: {}", node, error))
|
||||
.collect();
|
||||
|
||||
match &jsonrpc_method {
|
||||
Some(rpc_method) => error!(
|
||||
"All {} requests failed after trying {} nodes (JSON-RPC: {}). Detailed errors:\n{}",
|
||||
method,
|
||||
tried_nodes.len(),
|
||||
rpc_method,
|
||||
detailed_errors.join("\n")
|
||||
),
|
||||
None => error!(
|
||||
"All {} requests failed after trying {} nodes. Detailed errors:\n{}",
|
||||
method,
|
||||
tried_nodes.len(),
|
||||
detailed_errors.join("\n")
|
||||
),
|
||||
}
|
||||
|
||||
Err(HandlerError::AllRequestsFailed(collected_errors))
|
||||
}
|
||||
|
||||
/// Forward a request to the node pool, returning either a successful response or a simple
|
||||
/// `500` with text "All nodes failed". Keeps the error handling logic in one place so the
|
||||
/// public handlers stay readable.
|
||||
async fn proxy_request(
|
||||
state: &AppState,
|
||||
path: &str,
|
||||
method: &str,
|
||||
headers: &HeaderMap,
|
||||
body: Option<&[u8]>,
|
||||
) -> Response {
|
||||
match race_requests(state, path, method, headers, body).await {
|
||||
Ok(res) => res,
|
||||
Err(handler_error) => {
|
||||
let error_response = match &handler_error {
|
||||
HandlerError::AllRequestsFailed(node_errors) => {
|
||||
json!({
|
||||
"error": "All nodes failed",
|
||||
"details": {
|
||||
"type": "AllRequestsFailed",
|
||||
"message": "All proxy requests to available nodes failed",
|
||||
"node_errors": node_errors.iter().map(|(node, error)| {
|
||||
json!({
|
||||
"node": node,
|
||||
"error": error
|
||||
})
|
||||
}).collect::<Vec<_>>(),
|
||||
"total_nodes_tried": node_errors.len()
|
||||
}
|
||||
})
|
||||
}
|
||||
HandlerError::NoNodes => {
|
||||
json!({
|
||||
"error": "No nodes available",
|
||||
"details": {
|
||||
"type": "NoNodes",
|
||||
"message": "No healthy nodes available in the pool"
|
||||
}
|
||||
})
|
||||
}
|
||||
HandlerError::PoolError(msg) => {
|
||||
json!({
|
||||
"error": "Pool error",
|
||||
"details": {
|
||||
"type": "PoolError",
|
||||
"message": msg
|
||||
}
|
||||
})
|
||||
}
|
||||
HandlerError::RequestError(msg) => {
|
||||
json!({
|
||||
"error": "Request error",
|
||||
"details": {
|
||||
"type": "RequestError",
|
||||
"message": msg
|
||||
}
|
||||
})
|
||||
}
|
||||
};
|
||||
|
||||
Response::builder()
|
||||
.status(StatusCode::INTERNAL_SERVER_ERROR)
|
||||
.header("content-type", "application/json")
|
||||
.body(Body::from(error_response.to_string()))
|
||||
.unwrap_or_else(|_| Response::new(Body::empty()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[axum::debug_handler]
|
||||
pub async fn simple_proxy_handler(
|
||||
State(state): State<AppState>,
|
||||
method: Method,
|
||||
uri: axum::http::Uri,
|
||||
headers: HeaderMap,
|
||||
body: axum::body::Bytes,
|
||||
) -> Response {
|
||||
let body_size = body.len();
|
||||
let request_id = Uuid::new_v4();
|
||||
let path = uri.path().to_string();
|
||||
let method_str = method.to_string();
|
||||
let path_clone = path.clone();
|
||||
|
||||
// Extract JSON-RPC method for tracing span
|
||||
let body_option = (!body.is_empty()).then_some(&body[..]);
|
||||
let jsonrpc_method = if path == "/json_rpc" {
|
||||
if let Some(body_data) = body_option {
|
||||
extract_jsonrpc_method(body_data)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let jsonrpc_method_for_span = jsonrpc_method.as_deref().unwrap_or("N/A").to_string();
|
||||
|
||||
async move {
|
||||
match &jsonrpc_method {
|
||||
Some(rpc_method) => debug!(
|
||||
"Proxying {} {} ({} bytes) - JSON-RPC method: {}",
|
||||
method, path, body_size, rpc_method
|
||||
),
|
||||
None => debug!("Proxying {} {} ({} bytes)", method, path, body_size),
|
||||
}
|
||||
|
||||
proxy_request(&state, &path, method.as_str(), &headers, body_option).await
|
||||
}
|
||||
.instrument(info_span!("proxy_request",
|
||||
request_id = %request_id,
|
||||
method = %method_str,
|
||||
path = %path_clone,
|
||||
body_size = body_size,
|
||||
jsonrpc_method = %jsonrpc_method_for_span
|
||||
))
|
||||
.await
|
||||
}
|
||||
|
||||
#[axum::debug_handler]
|
||||
pub async fn simple_stats_handler(State(state): State<AppState>) -> Response {
|
||||
async move {
|
||||
let node_pool_guard = state.node_pool.read().await;
|
||||
|
||||
match node_pool_guard.get_current_status().await {
|
||||
Ok(status) => {
|
||||
let stats_json = serde_json::json!({
|
||||
"status": "healthy",
|
||||
"total_node_count": status.total_node_count,
|
||||
"healthy_node_count": status.healthy_node_count,
|
||||
"successful_health_checks": status.successful_health_checks,
|
||||
"unsuccessful_health_checks": status.unsuccessful_health_checks,
|
||||
"top_reliable_nodes": status.top_reliable_nodes
|
||||
});
|
||||
|
||||
Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header("content-type", "application/json")
|
||||
.body(Body::from(stats_json.to_string()))
|
||||
.unwrap_or_else(|_| Response::new(Body::empty()))
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to get pool status: {}", e);
|
||||
let error_json = r#"{"status":"error","message":"Failed to get pool status"}"#;
|
||||
Response::builder()
|
||||
.status(StatusCode::INTERNAL_SERVER_ERROR)
|
||||
.header("content-type", "application/json")
|
||||
.body(Body::from(error_json))
|
||||
.unwrap_or_else(|_| Response::new(Body::empty()))
|
||||
}
|
||||
}
|
||||
}
|
||||
.instrument(info_span!("stats_request"))
|
||||
.await
|
||||
}
|
|
@ -1054,7 +1054,7 @@ impl FfiWallet {
|
|||
monero::Address::from_str(&address.to_string()).expect("wallet's own address to be valid")
|
||||
}
|
||||
|
||||
fn set_daemon_address(&mut self, address: &str) -> anyhow::Result<()> {
|
||||
pub fn set_daemon_address(&mut self, address: &str) -> anyhow::Result<()> {
|
||||
tracing::debug!(%address, "Setting daemon address");
|
||||
|
||||
let_cxx_string!(address = address);
|
||||
|
|
|
@ -4,9 +4,9 @@
|
|||
"version": "0.7.0",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"check-bindings": "typeshare --lang=typescript --output-file __temp_bindings.ts ../swap/src && dprint fmt __temp_bindings.ts && diff -wbB __temp_bindings.ts ./src/models/tauriModel.ts && rm __temp_bindings.ts",
|
||||
"gen-bindings-verbose": "RUST_LOG=debug RUST_BACKTRACE=1 typeshare --lang=typescript --output-file ./src/models/tauriModel.ts ../swap/src && dprint fmt ./src/models/tauriModel.ts",
|
||||
"gen-bindings": "typeshare --lang=typescript --output-file ./src/models/tauriModel.ts ../swap/src && dprint fmt ./src/models/tauriModel.ts",
|
||||
"check-bindings": "typeshare --lang=typescript --output-file __temp_bindings.ts ../swap/src ../monero-rpc-pool/src && dprint fmt __temp_bindings.ts && diff -wbB __temp_bindings.ts ./src/models/tauriModel.ts && rm __temp_bindings.ts",
|
||||
"gen-bindings-verbose": "RUST_LOG=debug RUST_BACKTRACE=1 typeshare --lang=typescript --output-file ./src/models/tauriModel.ts ../swap/src ../monero-rpc-pool/src && dprint fmt ./src/models/tauriModel.ts",
|
||||
"gen-bindings": "typeshare --lang=typescript --output-file ./src/models/tauriModel.ts ../swap/src ../monero-rpc-pool/src && dprint fmt ./src/models/tauriModel.ts",
|
||||
"test": "vitest",
|
||||
"test:ui": "vitest --ui",
|
||||
"dev": "vite",
|
||||
|
|
|
@ -8,6 +8,7 @@ import {
|
|||
approvalEventReceived,
|
||||
backgroundProgressEventReceived,
|
||||
} from "store/features/rpcSlice";
|
||||
import { poolStatusReceived } from "store/features/poolSlice";
|
||||
import { swapProgressEventReceived } from "store/features/swapSlice";
|
||||
import logger from "utils/logger";
|
||||
import {
|
||||
|
@ -127,6 +128,10 @@ export async function setupBackgroundTasks(): Promise<void> {
|
|||
store.dispatch(backgroundProgressEventReceived(eventData));
|
||||
break;
|
||||
|
||||
case "PoolStatusUpdate":
|
||||
store.dispatch(poolStatusReceived(eventData));
|
||||
break;
|
||||
|
||||
default:
|
||||
exhaustiveGuard(channelName);
|
||||
}
|
||||
|
|
|
@ -24,8 +24,8 @@ export default function UnfinishedSwapsAlert() {
|
|||
>
|
||||
You have{" "}
|
||||
{resumableSwapsCount > 1
|
||||
? `${resumableSwapsCount} unfinished swaps`
|
||||
: "one unfinished swap"}
|
||||
? `${resumableSwapsCount} pending swaps`
|
||||
: "one pending swap"}
|
||||
</Alert>
|
||||
);
|
||||
}
|
||||
|
|
|
@ -63,7 +63,10 @@ function getActiveStep(state: SwapState | null): PathStep | null {
|
|||
// Bitcoin has been locked, waiting for the counterparty to lock their XMR
|
||||
case "BtcLockTxInMempool":
|
||||
// We only display the first step as completed if the Bitcoin lock has been confirmed
|
||||
if (latestState.content.btc_lock_confirmations > 0) {
|
||||
if (
|
||||
latestState.content.btc_lock_confirmations !== undefined &&
|
||||
latestState.content.btc_lock_confirmations > 0
|
||||
) {
|
||||
return [PathType.HAPPY_PATH, 1, isReleased];
|
||||
}
|
||||
return [PathType.HAPPY_PATH, 0, isReleased];
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
import { TauriSwapProgressEventContent } from "models/tauriModelExt";
|
||||
import { formatConfirmations } from "utils/formatUtils";
|
||||
import BitcoinTransactionInfoBox from "../../BitcoinTransactionInfoBox";
|
||||
import SwapStatusAlert from "renderer/components/alert/SwapStatusAlert/SwapStatusAlert";
|
||||
import { useActiveSwapInfo } from "store/hooks";
|
||||
|
@ -15,10 +16,11 @@ export default function BitcoinLockTxInMempoolPage({
|
|||
|
||||
return (
|
||||
<Box>
|
||||
{btc_lock_confirmations < BITCOIN_CONFIRMATIONS_WARNING_THRESHOLD && (
|
||||
{(btc_lock_confirmations === undefined ||
|
||||
btc_lock_confirmations < BITCOIN_CONFIRMATIONS_WARNING_THRESHOLD) && (
|
||||
<DialogContentText>
|
||||
Your Bitcoin has been locked.{" "}
|
||||
{btc_lock_confirmations > 0
|
||||
{btc_lock_confirmations !== undefined && btc_lock_confirmations > 0
|
||||
? "We are waiting for the other party to lock their Monero."
|
||||
: "We are waiting for the blockchain to confirm the transaction. Once confirmed, the other party will lock their Monero."}
|
||||
</DialogContentText>
|
||||
|
@ -30,9 +32,10 @@ export default function BitcoinLockTxInMempoolPage({
|
|||
gap: "1rem",
|
||||
}}
|
||||
>
|
||||
{btc_lock_confirmations >= BITCOIN_CONFIRMATIONS_WARNING_THRESHOLD && (
|
||||
<SwapStatusAlert swap={swapInfo} isRunning={true} />
|
||||
)}
|
||||
{btc_lock_confirmations !== undefined &&
|
||||
btc_lock_confirmations >= BITCOIN_CONFIRMATIONS_WARNING_THRESHOLD && (
|
||||
<SwapStatusAlert swap={swapInfo} isRunning={true} />
|
||||
)}
|
||||
<BitcoinTransactionInfoBox
|
||||
title="Bitcoin Lock Transaction"
|
||||
txId={btc_lock_txid}
|
||||
|
@ -43,7 +46,7 @@ export default function BitcoinLockTxInMempoolPage({
|
|||
After they lock their funds and the Monero transaction receives
|
||||
one confirmation, the swap will proceed to the next step.
|
||||
<br />
|
||||
Confirmations: {btc_lock_confirmations}
|
||||
Confirmations: {formatConfirmations(btc_lock_confirmations)}
|
||||
</>
|
||||
}
|
||||
/>
|
||||
|
|
|
@ -1,12 +1,13 @@
|
|||
import { Box, DialogContentText } from "@mui/material";
|
||||
import { TauriSwapProgressEventContent } from "models/tauriModelExt";
|
||||
import { formatConfirmations } from "utils/formatUtils";
|
||||
import MoneroTransactionInfoBox from "../../MoneroTransactionInfoBox";
|
||||
|
||||
export default function XmrLockTxInMempoolPage({
|
||||
xmr_lock_tx_confirmations,
|
||||
xmr_lock_txid,
|
||||
}: TauriSwapProgressEventContent<"XmrLockTxInMempool">) {
|
||||
const additionalContent = `Confirmations: ${xmr_lock_tx_confirmations}/10`;
|
||||
const additionalContent = `Confirmations: ${formatConfirmations(xmr_lock_tx_confirmations, 10)}`;
|
||||
|
||||
return (
|
||||
<Box>
|
||||
|
|
|
@ -0,0 +1,211 @@
|
|||
import {
|
||||
Box,
|
||||
Typography,
|
||||
Table,
|
||||
TableBody,
|
||||
TableCell,
|
||||
TableContainer,
|
||||
TableHead,
|
||||
TableRow,
|
||||
Chip,
|
||||
LinearProgress,
|
||||
useTheme,
|
||||
} from "@mui/material";
|
||||
import InfoBox from "renderer/components/modal/swap/InfoBox";
|
||||
import { ReliableNodeInfo } from "models/tauriModel";
|
||||
import NetworkWifiIcon from "@mui/icons-material/NetworkWifi";
|
||||
import { useAppSelector } from "store/hooks";
|
||||
|
||||
export default function MoneroPoolHealthBox() {
|
||||
const { poolStatus, isLoading } = useAppSelector((state) => ({
|
||||
poolStatus: state.pool.status,
|
||||
isLoading: state.pool.isLoading,
|
||||
}));
|
||||
const theme = useTheme();
|
||||
|
||||
const formatLatency = (latencyMs?: number) => {
|
||||
if (latencyMs === undefined || latencyMs === null) return "N/A";
|
||||
return `${Math.round(latencyMs)}ms`;
|
||||
};
|
||||
|
||||
const formatSuccessRate = (rate: number) => {
|
||||
return `${(rate * 100).toFixed(1)}%`;
|
||||
};
|
||||
|
||||
const getHealthColor = (healthyCount: number, reliableCount: number) => {
|
||||
if (reliableCount === 0) return theme.palette.error.main;
|
||||
if (reliableCount < 3) return theme.palette.warning.main;
|
||||
return theme.palette.success.main;
|
||||
};
|
||||
|
||||
const renderHealthSummary = () => {
|
||||
if (!poolStatus) return null;
|
||||
|
||||
const totalChecks =
|
||||
poolStatus.successful_health_checks +
|
||||
poolStatus.unsuccessful_health_checks;
|
||||
const overallSuccessRate =
|
||||
totalChecks > 0
|
||||
? (poolStatus.successful_health_checks / totalChecks) * 100
|
||||
: 0;
|
||||
|
||||
return (
|
||||
<Box sx={{ display: "flex", gap: 2, flexWrap: "wrap" }}>
|
||||
<Chip
|
||||
label={`${poolStatus.total_node_count} Total Known`}
|
||||
color="info"
|
||||
variant="outlined"
|
||||
size="small"
|
||||
/>
|
||||
<Chip
|
||||
label={`${poolStatus.healthy_node_count} Healthy`}
|
||||
color={poolStatus.healthy_node_count > 0 ? "success" : "error"}
|
||||
variant="outlined"
|
||||
size="small"
|
||||
/>
|
||||
<Chip
|
||||
label={`${(100 - overallSuccessRate).toFixed(1)}% Retry Rate`}
|
||||
color={
|
||||
overallSuccessRate > 80
|
||||
? "success"
|
||||
: overallSuccessRate > 60
|
||||
? "warning"
|
||||
: "error"
|
||||
}
|
||||
variant="outlined"
|
||||
size="small"
|
||||
/>
|
||||
</Box>
|
||||
);
|
||||
};
|
||||
|
||||
const renderTopNodes = () => {
|
||||
if (!poolStatus || poolStatus.top_reliable_nodes.length === 0) {
|
||||
return (
|
||||
<>
|
||||
<Box sx={{ display: "flex", alignItems: "center", gap: 1 }}>
|
||||
<Typography variant="h6" sx={{ fontSize: "1rem" }}>
|
||||
🚧
|
||||
</Typography>
|
||||
<Typography variant="body2" color="text.secondary">
|
||||
Bootstrapping remote Monero node registry... But you can already
|
||||
start swapping!
|
||||
</Typography>
|
||||
</Box>
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<TableContainer>
|
||||
<Table size="small">
|
||||
<TableHead>
|
||||
<TableRow>
|
||||
<TableCell>Node URL</TableCell>
|
||||
<TableCell align="right">Success Rate</TableCell>
|
||||
<TableCell align="right">Avg Latency</TableCell>
|
||||
</TableRow>
|
||||
</TableHead>
|
||||
<TableBody>
|
||||
{poolStatus.top_reliable_nodes.map(
|
||||
(node: ReliableNodeInfo, index: number) => (
|
||||
<TableRow key={index}>
|
||||
<TableCell>
|
||||
<Typography
|
||||
variant="caption"
|
||||
sx={{ wordBreak: "break-all" }}
|
||||
>
|
||||
{node.url}
|
||||
</Typography>
|
||||
</TableCell>
|
||||
<TableCell align="right">
|
||||
<Typography variant="caption">
|
||||
{formatSuccessRate(node.success_rate)}
|
||||
</Typography>
|
||||
</TableCell>
|
||||
<TableCell align="right">
|
||||
<Typography variant="caption">
|
||||
{formatLatency(node.avg_latency_ms)}
|
||||
</Typography>
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
),
|
||||
)}
|
||||
</TableBody>
|
||||
</Table>
|
||||
</TableContainer>
|
||||
);
|
||||
};
|
||||
|
||||
// Show bootstrapping message when no data is available
|
||||
if (!poolStatus && !isLoading) {
|
||||
return (
|
||||
<InfoBox
|
||||
title={
|
||||
<Box sx={{ display: "flex", alignItems: "center", gap: 1 }}>
|
||||
<NetworkWifiIcon />
|
||||
Monero Pool Health
|
||||
</Box>
|
||||
}
|
||||
mainContent={
|
||||
<Box sx={{ display: "flex", alignItems: "center", gap: 1 }}>
|
||||
<Typography variant="h2" sx={{ fontSize: "1.5rem" }}>
|
||||
🚧
|
||||
</Typography>
|
||||
<Typography variant="subtitle2">
|
||||
Bootstrapping pool health monitoring. You can already start using
|
||||
the app!
|
||||
</Typography>
|
||||
</Box>
|
||||
}
|
||||
additionalContent={null}
|
||||
icon={null}
|
||||
loading={false}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<InfoBox
|
||||
title={
|
||||
<Box sx={{ display: "flex", alignItems: "center", gap: 1 }}>
|
||||
<NetworkWifiIcon />
|
||||
Monero Pool Health
|
||||
</Box>
|
||||
}
|
||||
mainContent={
|
||||
<Typography variant="subtitle2">
|
||||
Real-time health monitoring of the Monero node pool. Shows node
|
||||
availability, success rates, and performance metrics.
|
||||
</Typography>
|
||||
}
|
||||
additionalContent={
|
||||
<Box sx={{ display: "flex", flexDirection: "column", gap: 2 }}>
|
||||
{poolStatus && renderHealthSummary()}
|
||||
|
||||
{poolStatus && (
|
||||
<Box>
|
||||
<Typography variant="body2" sx={{ mb: 1, fontWeight: "medium" }}>
|
||||
Health Check Statistics
|
||||
</Typography>
|
||||
<Box sx={{ display: "flex", gap: 2, flexWrap: "wrap" }}>
|
||||
<Typography variant="caption" color="text.secondary">
|
||||
Successful:{" "}
|
||||
{poolStatus.successful_health_checks.toLocaleString()}
|
||||
</Typography>
|
||||
<Typography variant="caption" color="text.secondary">
|
||||
Failed:{" "}
|
||||
{poolStatus.unsuccessful_health_checks.toLocaleString()}
|
||||
</Typography>
|
||||
</Box>
|
||||
</Box>
|
||||
)}
|
||||
|
||||
<Box>{renderTopNodes()}</Box>
|
||||
</Box>
|
||||
}
|
||||
icon={null}
|
||||
loading={isLoading}
|
||||
/>
|
||||
);
|
||||
}
|
|
@ -20,6 +20,11 @@ import {
|
|||
useTheme,
|
||||
Switch,
|
||||
SelectChangeEvent,
|
||||
TextField,
|
||||
ToggleButton,
|
||||
ToggleButtonGroup,
|
||||
Chip,
|
||||
LinearProgress,
|
||||
} from "@mui/material";
|
||||
import {
|
||||
addNode,
|
||||
|
@ -35,11 +40,13 @@ import {
|
|||
setFiatCurrency,
|
||||
setTheme,
|
||||
setTorEnabled,
|
||||
setUseMoneroRpcPool,
|
||||
} from "store/features/settingsSlice";
|
||||
import { useAppDispatch, useNodes, useSettings } from "store/hooks";
|
||||
import ValidatedTextField from "renderer/components/other/ValidatedTextField";
|
||||
import PromiseInvokeButton from "renderer/components/PromiseInvokeButton";
|
||||
import HelpIcon from "@mui/icons-material/HelpOutline";
|
||||
import { ReactNode, useState } from "react";
|
||||
import { ReactNode, useState, useEffect } from "react";
|
||||
import { Theme } from "renderer/components/theme";
|
||||
import {
|
||||
Add,
|
||||
|
@ -47,12 +54,18 @@ import {
|
|||
Delete,
|
||||
Edit,
|
||||
HourglassEmpty,
|
||||
Refresh,
|
||||
} from "@mui/icons-material";
|
||||
|
||||
import { getNetwork } from "store/config";
|
||||
import { currencySymbol } from "utils/formatUtils";
|
||||
import InfoBox from "renderer/components/modal/swap/InfoBox";
|
||||
import { isValidMultiAddressWithPeerId } from "utils/parseUtils";
|
||||
|
||||
import { useAppSelector } from "store/hooks";
|
||||
import { getNodeStatus } from "renderer/rpc";
|
||||
import { setStatus } from "store/features/nodesSlice";
|
||||
|
||||
const PLACEHOLDER_ELECTRUM_RPC_URL = "ssl://blockstream.info:700";
|
||||
const PLACEHOLDER_MONERO_NODE_URL = "http://xmr-node.cakewallet.com:18081";
|
||||
|
||||
|
@ -83,6 +96,7 @@ export default function SettingsBox() {
|
|||
<TableBody>
|
||||
<TorSettings />
|
||||
<ElectrumRpcUrlSetting />
|
||||
<MoneroRpcPoolSetting />
|
||||
<MoneroNodeUrlSetting />
|
||||
<FetchFiatPricesSetting />
|
||||
<ThemeSetting />
|
||||
|
@ -268,15 +282,21 @@ function ElectrumRpcUrlSetting() {
|
|||
function SettingLabel({
|
||||
label,
|
||||
tooltip,
|
||||
disabled = false,
|
||||
}: {
|
||||
label: ReactNode;
|
||||
tooltip: string | null;
|
||||
disabled?: boolean;
|
||||
}) {
|
||||
const opacity = disabled ? 0.5 : 1;
|
||||
|
||||
return (
|
||||
<Box style={{ display: "flex", alignItems: "center", gap: "0.5rem" }}>
|
||||
<Box
|
||||
style={{ display: "flex", alignItems: "center", gap: "0.5rem", opacity }}
|
||||
>
|
||||
<Box>{label}</Box>
|
||||
<Tooltip title={tooltip}>
|
||||
<IconButton size="small">
|
||||
<IconButton size="small" disabled={disabled}>
|
||||
<HelpIcon />
|
||||
</IconButton>
|
||||
</Tooltip>
|
||||
|
@ -285,38 +305,147 @@ function SettingLabel({
|
|||
}
|
||||
|
||||
/**
|
||||
* A setting that allows you to select the Monero Node URL to use.
|
||||
* A setting that allows you to toggle between using the Monero RPC Pool and custom nodes.
|
||||
*/
|
||||
function MoneroRpcPoolSetting() {
|
||||
const useMoneroRpcPool = useSettings((s) => s.useMoneroRpcPool);
|
||||
const dispatch = useAppDispatch();
|
||||
|
||||
const handleChange = (
|
||||
event: React.MouseEvent<HTMLElement>,
|
||||
newValue: string,
|
||||
) => {
|
||||
if (newValue !== null) {
|
||||
dispatch(setUseMoneroRpcPool(newValue === "pool"));
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<TableRow>
|
||||
<TableCell>
|
||||
<SettingLabel
|
||||
label="Monero Node Selection"
|
||||
tooltip="Choose between using a load-balanced pool of Monero nodes for better reliability, or configure custom Monero nodes."
|
||||
/>
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
<ToggleButtonGroup
|
||||
color="primary"
|
||||
value={useMoneroRpcPool ? "pool" : "custom"}
|
||||
exclusive
|
||||
onChange={handleChange}
|
||||
aria-label="Monero node selection"
|
||||
size="small"
|
||||
>
|
||||
<ToggleButton value="pool">Pool (Recommended)</ToggleButton>
|
||||
<ToggleButton value="custom">Manual</ToggleButton>
|
||||
</ToggleButtonGroup>
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* A setting that allows you to configure a single Monero Node URL.
|
||||
* Gets disabled when RPC pool is enabled.
|
||||
*/
|
||||
function MoneroNodeUrlSetting() {
|
||||
const network = getNetwork();
|
||||
const [tableVisible, setTableVisible] = useState(false);
|
||||
const useMoneroRpcPool = useSettings((s) => s.useMoneroRpcPool);
|
||||
const moneroNodeUrl = useSettings((s) => s.nodes[network][Blockchain.Monero][0] || "");
|
||||
const nodeStatuses = useNodes((s) => s.nodes);
|
||||
const dispatch = useAppDispatch();
|
||||
const [isRefreshing, setIsRefreshing] = useState(false);
|
||||
|
||||
const isValid = (url: string) => isValidUrl(url, ["http"]);
|
||||
const currentNodes = useSettings((s) => s.nodes[network][Blockchain.Monero]);
|
||||
|
||||
const handleNodeUrlChange = (newUrl: string) => {
|
||||
// Remove existing nodes and add the new one
|
||||
currentNodes.forEach(node => {
|
||||
dispatch(removeNode({ network, type: Blockchain.Monero, node }));
|
||||
});
|
||||
|
||||
if (newUrl.trim()) {
|
||||
dispatch(addNode({ network, type: Blockchain.Monero, node: newUrl.trim() }));
|
||||
}
|
||||
};
|
||||
|
||||
const handleRefreshStatus = async () => {
|
||||
// Don't refresh if pool is enabled or no node URL is configured
|
||||
if (!moneroNodeUrl || useMoneroRpcPool) return;
|
||||
|
||||
setIsRefreshing(true);
|
||||
try {
|
||||
const status = await getNodeStatus(moneroNodeUrl, Blockchain.Monero, network);
|
||||
|
||||
// Update the status in the store
|
||||
dispatch(setStatus({ node: moneroNodeUrl, status, blockchain: Blockchain.Monero }));
|
||||
} catch (error) {
|
||||
console.error("Failed to refresh node status:", error);
|
||||
} finally {
|
||||
setIsRefreshing(false);
|
||||
}
|
||||
};
|
||||
|
||||
const isValid = (url: string) => url === "" || isValidUrl(url, ["http"]);
|
||||
const nodeStatus = moneroNodeUrl ? nodeStatuses[Blockchain.Monero][moneroNodeUrl] : null;
|
||||
|
||||
return (
|
||||
<TableRow>
|
||||
<TableCell>
|
||||
<SettingLabel
|
||||
label="Custom Monero Node URL"
|
||||
tooltip="This is the URL of the Monero node that the GUI will connect to. Ensure the node is listening for RPC connections over HTTP. If you leave this field empty, the GUI will choose from a list of known nodes at random."
|
||||
tooltip={
|
||||
useMoneroRpcPool
|
||||
? "This setting is disabled because Monero RPC pool is enabled. Disable the RPC pool to configure a custom node."
|
||||
: "This is the URL of the Monero node that the GUI will connect to. It is used to sync Monero transactions. If you leave this field empty, the GUI will choose from a list of known servers at random."
|
||||
}
|
||||
disabled={useMoneroRpcPool}
|
||||
/>
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
<IconButton onClick={() => setTableVisible(!tableVisible)} size="large">
|
||||
<Edit />
|
||||
</IconButton>
|
||||
{tableVisible ? (
|
||||
<NodeTableModal
|
||||
open={tableVisible}
|
||||
onClose={() => setTableVisible(false)}
|
||||
network={network}
|
||||
blockchain={Blockchain.Monero}
|
||||
isValid={isValid}
|
||||
<Box sx={{ display: "flex", alignItems: "center", gap: 1 }}>
|
||||
<ValidatedTextField
|
||||
value={moneroNodeUrl}
|
||||
onValidatedChange={handleNodeUrlChange}
|
||||
placeholder={PLACEHOLDER_MONERO_NODE_URL}
|
||||
disabled={useMoneroRpcPool}
|
||||
fullWidth
|
||||
isValid={isValid}
|
||||
variant="outlined"
|
||||
noErrorWhenEmpty
|
||||
/>
|
||||
) : (
|
||||
<></>
|
||||
)}
|
||||
<>
|
||||
<Tooltip title={
|
||||
useMoneroRpcPool
|
||||
? "Node status checking is disabled when using the pool"
|
||||
: !moneroNodeUrl
|
||||
? "Enter a node URL to check status"
|
||||
: "Node status"
|
||||
}>
|
||||
<Box sx={{ display: "flex", alignItems: "center" }}>
|
||||
<Circle
|
||||
color={useMoneroRpcPool || !moneroNodeUrl ? "gray" : (nodeStatus ? "green" : "red")}
|
||||
/>
|
||||
</Box>
|
||||
</Tooltip>
|
||||
<Tooltip title={
|
||||
useMoneroRpcPool
|
||||
? "Node status refresh is disabled when using the pool"
|
||||
: !moneroNodeUrl
|
||||
? "Enter a node URL to refresh status"
|
||||
: "Refresh node status"
|
||||
}>
|
||||
<IconButton
|
||||
onClick={handleRefreshStatus}
|
||||
disabled={isRefreshing || useMoneroRpcPool || !moneroNodeUrl}
|
||||
size="small"
|
||||
>
|
||||
{isRefreshing ? <HourglassEmpty /> : <Refresh />}
|
||||
</IconButton>
|
||||
</Tooltip>
|
||||
</>
|
||||
</Box>
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
);
|
||||
|
@ -380,7 +509,7 @@ function NodeTableModal({
|
|||
When the daemon is started, it will attempt to connect to the first
|
||||
available {blockchain} node in this list. If you leave this field
|
||||
empty or all nodes are unavailable, it will choose from a list of
|
||||
known nodes at random. Requires a restart to take effect.
|
||||
known nodes at random.
|
||||
</Typography>
|
||||
<NodeTable
|
||||
network={network}
|
||||
|
@ -413,38 +542,6 @@ function Circle({ color, radius = 6 }: { color: string; radius?: number }) {
|
|||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Displays a status indicator for a node
|
||||
*/
|
||||
function NodeStatus({ status }: { status: boolean | undefined }) {
|
||||
const theme = useTheme();
|
||||
|
||||
switch (status) {
|
||||
case true:
|
||||
return (
|
||||
<Tooltip
|
||||
title={"This node is available and responding to RPC requests"}
|
||||
>
|
||||
<Circle color={theme.palette.success.dark} />
|
||||
</Tooltip>
|
||||
);
|
||||
case false:
|
||||
return (
|
||||
<Tooltip
|
||||
title={"This node is not available or not responding to RPC requests"}
|
||||
>
|
||||
<Circle color={theme.palette.error.dark} />
|
||||
</Tooltip>
|
||||
);
|
||||
default:
|
||||
return (
|
||||
<Tooltip title={"The status of this node is currently unknown"}>
|
||||
<HourglassEmpty />
|
||||
</Tooltip>
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A table that displays the available nodes for a given network and blockchain.
|
||||
* It allows you to add, remove, and move nodes up the list.
|
||||
|
@ -515,7 +612,9 @@ function NodeTable({
|
|||
</TableCell>
|
||||
{/* Node status icon */}
|
||||
<TableCell align="center">
|
||||
<NodeStatus status={nodeStatuses[blockchain][node]} />
|
||||
<Circle
|
||||
color={nodeStatuses[blockchain][node] ? "green" : "red"}
|
||||
/>
|
||||
</TableCell>
|
||||
{/* Remove and move buttons */}
|
||||
<TableCell>
|
||||
|
@ -582,7 +681,7 @@ export function TorSettings() {
|
|||
<TableCell>
|
||||
<SettingLabel
|
||||
label="Use Tor"
|
||||
tooltip="Tor (The Onion Router) is a decentralized network allowing for anonymous browsing. If enabled, the app will use its internal Tor client to hide your IP address from the maker. Requires a restart to take effect."
|
||||
tooltip="Route network traffic through Tor to hide your IP address from the maker."
|
||||
/>
|
||||
</TableCell>
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@ import DaemonControlBox from "./DaemonControlBox";
|
|||
import SettingsBox from "./SettingsBox";
|
||||
import ExportDataBox from "./ExportDataBox";
|
||||
import DiscoveryBox from "./DiscoveryBox";
|
||||
import MoneroPoolHealthBox from "./MoneroPoolHealthBox";
|
||||
import { useLocation } from "react-router-dom";
|
||||
import { useEffect } from "react";
|
||||
|
||||
|
@ -29,6 +30,7 @@ export default function SettingsPage() {
|
|||
>
|
||||
<SettingsBox />
|
||||
<DiscoveryBox />
|
||||
<MoneroPoolHealthBox />
|
||||
<ExportDataBox />
|
||||
<DaemonControlBox />
|
||||
<DonateInfoBox />
|
||||
|
|
|
@ -223,37 +223,20 @@ export async function initializeContext() {
|
|||
const bitcoinNodes =
|
||||
store.getState().settings.nodes[network][Blockchain.Bitcoin];
|
||||
|
||||
// For Monero nodes, check availability and use the first working one
|
||||
const moneroNodes =
|
||||
store.getState().settings.nodes[network][Blockchain.Monero];
|
||||
let moneroNode = null;
|
||||
// For Monero nodes, get the configured node URL and pool setting
|
||||
const useMoneroRpcPool = store.getState().settings.useMoneroRpcPool;
|
||||
const moneroNodes = store.getState().settings.nodes[network][Blockchain.Monero];
|
||||
|
||||
if (moneroNodes.length > 0) {
|
||||
try {
|
||||
moneroNode = await Promise.any(
|
||||
moneroNodes.map(async (node) => {
|
||||
const isAvailable = await getNodeStatus(
|
||||
node,
|
||||
Blockchain.Monero,
|
||||
network,
|
||||
);
|
||||
if (isAvailable) {
|
||||
return node;
|
||||
}
|
||||
throw new Error(`Monero node ${node} is not available`);
|
||||
}),
|
||||
);
|
||||
} catch {
|
||||
// If no Monero node is available, use null
|
||||
moneroNode = null;
|
||||
}
|
||||
}
|
||||
// Always pass the first configured monero node URL directly without checking availability
|
||||
// The backend will handle whether to use the pool or the custom node
|
||||
const moneroNode = moneroNodes.length > 0 ? moneroNodes[0] : null;
|
||||
|
||||
// Initialize Tauri settings
|
||||
const tauriSettings: TauriSettings = {
|
||||
electrum_rpc_urls: bitcoinNodes,
|
||||
monero_node_url: moneroNode,
|
||||
use_tor: useTor,
|
||||
use_monero_rpc_pool: useMoneroRpcPool,
|
||||
};
|
||||
|
||||
logger.info("Initializing context with settings", tauriSettings);
|
||||
|
@ -325,13 +308,15 @@ export async function updateAllNodeStatuses() {
|
|||
const network = getNetwork();
|
||||
const settings = store.getState().settings;
|
||||
|
||||
// Only check Monero nodes, skip Bitcoin nodes since we pass all electrum servers
|
||||
// to the backend without checking them (ElectrumBalancer handles failover)
|
||||
await Promise.all(
|
||||
settings.nodes[network][Blockchain.Monero].map((node) =>
|
||||
updateNodeStatus(node, Blockchain.Monero, network),
|
||||
),
|
||||
);
|
||||
// Only check Monero nodes if we're using custom nodes (not RPC pool)
|
||||
// Skip Bitcoin nodes since we pass all electrum servers to the backend without checking them (ElectrumBalancer handles failover)
|
||||
if (!settings.useMoneroRpcPool) {
|
||||
await Promise.all(
|
||||
settings.nodes[network][Blockchain.Monero].map((node) =>
|
||||
updateNodeStatus(node, Blockchain.Monero, network),
|
||||
),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
export async function getMoneroAddresses(): Promise<GetMoneroAddressesResponse> {
|
||||
|
@ -361,3 +346,9 @@ export async function saveLogFiles(
|
|||
): Promise<void> {
|
||||
await invokeUnsafe<void>("save_txt_files", { zipFileName, content });
|
||||
}
|
||||
|
||||
export async function saveFilesInDialog(files: Record<string, string>) {
|
||||
await invokeUnsafe<void>("save_txt_files", {
|
||||
files,
|
||||
});
|
||||
}
|
||||
|
|
|
@ -7,6 +7,7 @@ import torSlice from "./features/torSlice";
|
|||
import settingsSlice from "./features/settingsSlice";
|
||||
import nodesSlice from "./features/nodesSlice";
|
||||
import conversationsSlice from "./features/conversationsSlice";
|
||||
import poolSlice from "./features/poolSlice";
|
||||
|
||||
export const reducers = {
|
||||
swap: swapReducer,
|
||||
|
@ -18,4 +19,5 @@ export const reducers = {
|
|||
settings: settingsSlice,
|
||||
nodes: nodesSlice,
|
||||
conversations: conversationsSlice,
|
||||
pool: poolSlice,
|
||||
};
|
||||
|
|
31
src-gui/src/store/features/poolSlice.ts
Normal file
31
src-gui/src/store/features/poolSlice.ts
Normal file
|
@ -0,0 +1,31 @@
|
|||
import { createSlice, PayloadAction } from "@reduxjs/toolkit";
|
||||
import { PoolStatus } from "models/tauriModel";
|
||||
|
||||
interface PoolSlice {
|
||||
status: PoolStatus | null;
|
||||
isLoading: boolean;
|
||||
}
|
||||
|
||||
const initialState: PoolSlice = {
|
||||
status: null,
|
||||
isLoading: true,
|
||||
};
|
||||
|
||||
export const poolSlice = createSlice({
|
||||
name: "pool",
|
||||
initialState,
|
||||
reducers: {
|
||||
poolStatusReceived(slice, action: PayloadAction<PoolStatus>) {
|
||||
slice.status = action.payload;
|
||||
slice.isLoading = false;
|
||||
},
|
||||
poolStatusReset(slice) {
|
||||
slice.status = null;
|
||||
slice.isLoading = true;
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
export const { poolStatusReceived, poolStatusReset } = poolSlice.actions;
|
||||
|
||||
export default poolSlice.reducer;
|
|
@ -17,6 +17,8 @@ export interface SettingsState {
|
|||
fiatCurrency: FiatCurrency;
|
||||
/// Whether to enable Tor for p2p connections
|
||||
enableTor: boolean;
|
||||
/// Whether to use the Monero RPC pool for load balancing (true) or custom nodes (false)
|
||||
useMoneroRpcPool: boolean;
|
||||
userHasSeenIntroduction: boolean;
|
||||
/// List of rendezvous points
|
||||
rendezvousPoints: string[];
|
||||
|
@ -119,6 +121,7 @@ const initialState: SettingsState = {
|
|||
fetchFiatPrices: false,
|
||||
fiatCurrency: FiatCurrency.Usd,
|
||||
enableTor: true,
|
||||
useMoneroRpcPool: true, // Default to using RPC pool
|
||||
userHasSeenIntroduction: false,
|
||||
rendezvousPoints: DEFAULT_RENDEZVOUS_POINTS,
|
||||
};
|
||||
|
@ -206,6 +209,9 @@ const alertsSlice = createSlice({
|
|||
setTorEnabled(slice, action: PayloadAction<boolean>) {
|
||||
slice.enableTor = action.payload;
|
||||
},
|
||||
setUseMoneroRpcPool(slice, action: PayloadAction<boolean>) {
|
||||
slice.useMoneroRpcPool = action.payload;
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
|
@ -218,6 +224,7 @@ export const {
|
|||
setFetchFiatPrices,
|
||||
setFiatCurrency,
|
||||
setTorEnabled,
|
||||
setUseMoneroRpcPool,
|
||||
setUserHasSeenIntroduction,
|
||||
addRendezvousPoint,
|
||||
removeRendezvousPoint,
|
||||
|
|
|
@ -83,3 +83,24 @@ export function currencySymbol(currency: FiatCurrency): string | null {
|
|||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Formats confirmation count, displaying "?" when the transaction state is unknown
|
||||
* @param confirmations - The number of confirmations, or undefined if unknown
|
||||
* @param maxConfirmations - Optional maximum confirmations to show as "X/Y" format
|
||||
* @returns Formatted string showing confirmations or "?" if unknown
|
||||
*/
|
||||
export function formatConfirmations(
|
||||
confirmations: number | undefined | null,
|
||||
maxConfirmations?: number,
|
||||
): string {
|
||||
if (confirmations === undefined || confirmations === null) {
|
||||
return maxConfirmations !== undefined ? `?/${maxConfirmations}` : "?";
|
||||
}
|
||||
|
||||
if (maxConfirmations !== undefined) {
|
||||
return `${confirmations}/${maxConfirmations}`;
|
||||
}
|
||||
|
||||
return confirmations.toString();
|
||||
}
|
||||
|
|
|
@ -16,6 +16,7 @@ tauri-build = { version = "^2.0.0", features = [ "config-json5" ] }
|
|||
|
||||
[dependencies]
|
||||
anyhow = "1"
|
||||
monero-rpc-pool = { path = "../monero-rpc-pool" }
|
||||
rustls = { version = "0.23.26", default-features = false, features = ["ring"] }
|
||||
serde = { version = "1", features = [ "derive" ] }
|
||||
serde_json = "1"
|
||||
|
|
|
@ -141,7 +141,8 @@ fn setup(app: &mut tauri::App) -> Result<(), Box<dyn std::error::Error>> {
|
|||
|
||||
// We need to set a value for the Tauri state right at the start
|
||||
// If we don't do this, Tauri commands will panic at runtime if no value is present
|
||||
app_handle.manage::<RwLock<State>>(RwLock::new(State::new()));
|
||||
let state = RwLock::new(State::new());
|
||||
app_handle.manage::<RwLock<State>>(state);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -192,7 +193,7 @@ pub fn run() {
|
|||
get_data_dir,
|
||||
resolve_approval_request,
|
||||
redact,
|
||||
save_txt_files
|
||||
save_txt_files,
|
||||
])
|
||||
.setup(setup)
|
||||
.build(tauri::generate_context!())
|
||||
|
@ -366,6 +367,48 @@ async fn initialize_context(
|
|||
.context("Context is already being initialized")
|
||||
.to_string_result()?;
|
||||
|
||||
// Determine which Monero node to use:
|
||||
// - If using RPC pool, start and use the local RPC pool
|
||||
// - Otherwise, use the provided node URL directly (even if empty)
|
||||
let monero_node_url = if settings.use_monero_rpc_pool {
|
||||
// Start RPC pool and use it
|
||||
let data_dir = data::data_dir_from(None, testnet).to_string_result()?;
|
||||
match monero_rpc_pool::start_server_with_random_port(
|
||||
monero_rpc_pool::config::Config::new_random_port(
|
||||
"127.0.0.1".to_string(),
|
||||
data_dir.join("monero-rpc-pool"),
|
||||
),
|
||||
match testnet {
|
||||
true => swap::monero::Network::Stagenet,
|
||||
false => swap::monero::Network::Mainnet,
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok((server_info, mut status_receiver, _task_manager)) => {
|
||||
let rpc_url = format!("http://{}:{}", server_info.host, server_info.port);
|
||||
tracing::info!("Monero RPC Pool started on {}", rpc_url);
|
||||
|
||||
// Start listening for pool status updates and forward them to frontend
|
||||
let pool_tauri_handle = TauriHandle::new(app_handle.clone());
|
||||
tauri::async_runtime::spawn(async move {
|
||||
while let Ok(status) = status_receiver.recv().await {
|
||||
pool_tauri_handle.emit_pool_status_update(status);
|
||||
}
|
||||
});
|
||||
|
||||
rpc_url.parse().ok()
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!("Failed to start Monero RPC Pool: {}", e);
|
||||
None
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Use the provided node URL directly without checking availability
|
||||
settings.monero_node_url.clone()
|
||||
};
|
||||
|
||||
// Get app handle and create a Tauri handle
|
||||
let tauri_handle = TauriHandle::new(app_handle.clone());
|
||||
|
||||
|
@ -378,7 +421,7 @@ async fn initialize_context(
|
|||
bitcoin_target_block: None,
|
||||
})
|
||||
.with_monero(Monero {
|
||||
monero_node_address: settings.monero_node_url.clone(),
|
||||
monero_node_address: monero_node_url,
|
||||
})
|
||||
.with_json(false)
|
||||
.with_debug(true)
|
||||
|
|
|
@ -44,6 +44,7 @@ libp2p-community-tor = { git = "https://github.com/umgefahren/libp2p-tor", branc
|
|||
moka = { version = "0.12", features = ["sync", "future"] }
|
||||
monero = { version = "0.12", features = ["serde_support"] }
|
||||
monero-rpc = { path = "../monero-rpc" }
|
||||
monero-rpc-pool = { path = "../monero-rpc-pool" }
|
||||
monero-sys = { path = "../monero-sys" }
|
||||
once_cell = "1.19"
|
||||
pem = "3.0"
|
||||
|
|
|
@ -242,6 +242,12 @@ pub struct Monero {
|
|||
pub finality_confirmations: Option<u64>,
|
||||
#[serde(with = "crate::monero::network")]
|
||||
pub network: monero::Network,
|
||||
#[serde(default = "default_monero_node_pool")]
|
||||
pub monero_node_pool: bool,
|
||||
}
|
||||
|
||||
fn default_monero_node_pool() -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize)]
|
||||
|
@ -461,6 +467,7 @@ pub fn query_user_for_initial_config(testnet: bool) -> Result<Config> {
|
|||
daemon_url: monero_daemon_url,
|
||||
finality_confirmations: None,
|
||||
network: monero_network,
|
||||
monero_node_pool: false,
|
||||
},
|
||||
tor: TorConf {
|
||||
register_hidden_service,
|
||||
|
@ -511,6 +518,7 @@ mod tests {
|
|||
daemon_url: defaults.monero_daemon_address,
|
||||
finality_confirmations: None,
|
||||
network: monero::Network::Stagenet,
|
||||
monero_node_pool: false,
|
||||
},
|
||||
tor: Default::default(),
|
||||
maker: Maker {
|
||||
|
@ -556,6 +564,7 @@ mod tests {
|
|||
daemon_url: defaults.monero_daemon_address,
|
||||
finality_confirmations: None,
|
||||
network: monero::Network::Mainnet,
|
||||
monero_node_pool: false,
|
||||
},
|
||||
tor: Default::default(),
|
||||
maker: Maker {
|
||||
|
@ -611,6 +620,7 @@ mod tests {
|
|||
daemon_url: defaults.monero_daemon_address,
|
||||
finality_confirmations: None,
|
||||
network: monero::Network::Mainnet,
|
||||
monero_node_pool: false,
|
||||
},
|
||||
tor: Default::default(),
|
||||
maker: Maker {
|
||||
|
|
|
@ -44,6 +44,28 @@ use uuid::Uuid;
|
|||
|
||||
const DEFAULT_WALLET_NAME: &str = "asb-wallet";
|
||||
|
||||
trait IntoDaemon {
|
||||
fn into_daemon(self) -> Result<Daemon>;
|
||||
}
|
||||
|
||||
impl IntoDaemon for url::Url {
|
||||
fn into_daemon(self) -> Result<Daemon> {
|
||||
let address = self.to_string();
|
||||
let ssl = self.scheme() == "https";
|
||||
|
||||
Ok(Daemon { address, ssl })
|
||||
}
|
||||
}
|
||||
|
||||
impl IntoDaemon for monero_rpc_pool::ServerInfo {
|
||||
fn into_daemon(self) -> Result<Daemon> {
|
||||
let address = format!("http://{}:{}", self.host, self.port);
|
||||
let ssl = false; // Pool server always uses HTTP locally
|
||||
|
||||
Ok(Daemon { address, ssl })
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
pub async fn main() -> Result<()> {
|
||||
rustls::crypto::ring::default_provider()
|
||||
|
@ -457,9 +479,39 @@ async fn init_monero_wallet(
|
|||
) -> Result<Arc<monero::Wallets>> {
|
||||
tracing::debug!("Initializing Monero wallets");
|
||||
|
||||
let daemon = Daemon {
|
||||
address: config.monero.daemon_url.to_string(),
|
||||
ssl: config.monero.daemon_url.as_str().contains("https"),
|
||||
let daemon = if config.monero.monero_node_pool {
|
||||
// Start the monero-rpc-pool and use it
|
||||
tracing::info!("Starting Monero RPC Pool for ASB");
|
||||
|
||||
let (server_info, _status_receiver, _task_manager) =
|
||||
monero_rpc_pool::start_server_with_random_port(
|
||||
monero_rpc_pool::config::Config::new_random_port(
|
||||
"127.0.0.1".to_string(),
|
||||
config.data.dir.join("monero-rpc-pool"),
|
||||
),
|
||||
env_config.monero_network,
|
||||
)
|
||||
.await
|
||||
.context("Failed to start Monero RPC Pool for ASB")?;
|
||||
|
||||
let pool_url = format!("http://{}:{}", server_info.host, server_info.port);
|
||||
tracing::info!("Monero RPC Pool started for ASB on {}", pool_url);
|
||||
|
||||
server_info
|
||||
.into_daemon()
|
||||
.context("Failed to convert ServerInfo to Daemon")?
|
||||
} else {
|
||||
tracing::info!(
|
||||
"Using direct Monero daemon connection: {}",
|
||||
config.monero.daemon_url
|
||||
);
|
||||
|
||||
config
|
||||
.monero
|
||||
.daemon_url
|
||||
.clone()
|
||||
.into_daemon()
|
||||
.context("Failed to convert daemon URL to Daemon")?
|
||||
};
|
||||
|
||||
let manager = monero::Wallets::new(
|
||||
|
|
|
@ -1195,10 +1195,27 @@ pub async fn monero_recovery(
|
|||
#[tracing::instrument(fields(method = "get_current_swap"), skip(context))]
|
||||
pub async fn get_current_swap(context: Arc<Context>) -> Result<serde_json::Value> {
|
||||
Ok(json!({
|
||||
"swap_id": context.swap_lock.get_current_swap_id().await
|
||||
"swap_id": context.swap_lock.get_current_swap_id().await,
|
||||
}))
|
||||
}
|
||||
|
||||
pub async fn resolve_approval_request(
|
||||
resolve_approval: ResolveApprovalArgs,
|
||||
ctx: Arc<Context>,
|
||||
) -> Result<ResolveApprovalResponse> {
|
||||
let request_id = Uuid::parse_str(&resolve_approval.request_id).context("Invalid request ID")?;
|
||||
|
||||
if let Some(handle) = ctx.tauri_handle.clone() {
|
||||
handle
|
||||
.resolve_approval(request_id, resolve_approval.accept)
|
||||
.await?;
|
||||
} else {
|
||||
bail!("Cannot resolve approval without a Tauri handle");
|
||||
}
|
||||
|
||||
Ok(ResolveApprovalResponse { success: true })
|
||||
}
|
||||
|
||||
fn qr_code(value: &impl ToString) -> Result<String> {
|
||||
let code = QrCode::new(value.to_string())?;
|
||||
let qr_code = code
|
||||
|
@ -1353,6 +1370,9 @@ struct UnknownMoneroNetwork(String);
|
|||
|
||||
impl CheckMoneroNodeArgs {
|
||||
pub async fn request(self) -> Result<CheckMoneroNodeResponse> {
|
||||
let url = self.url.clone();
|
||||
let network_str = self.network.clone();
|
||||
|
||||
let network = match self.network.to_lowercase().as_str() {
|
||||
// When the GUI says testnet, it means monero stagenet
|
||||
"mainnet" => Network::Mainnet,
|
||||
|
@ -1373,11 +1393,20 @@ impl CheckMoneroNodeArgs {
|
|||
return Ok(CheckMoneroNodeResponse { available: false });
|
||||
};
|
||||
|
||||
let Ok(available) = monero_daemon.is_available(&CLIENT).await else {
|
||||
return Ok(CheckMoneroNodeResponse { available: false });
|
||||
};
|
||||
match monero_daemon.is_available(&CLIENT).await {
|
||||
Ok(available) => Ok(CheckMoneroNodeResponse { available }),
|
||||
Err(e) => {
|
||||
tracing::error!(
|
||||
url = %url,
|
||||
network = %network_str,
|
||||
error = ?e,
|
||||
error_chain = %format!("{:#}", e),
|
||||
"Failed to check monero node availability"
|
||||
);
|
||||
|
||||
Ok(CheckMoneroNodeResponse { available })
|
||||
Ok(CheckMoneroNodeResponse { available: false })
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1410,14 +1439,14 @@ impl CheckElectrumNodeArgs {
|
|||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Deserialize, Serialize)]
|
||||
#[derive(Debug, Eq, PartialEq, Serialize, Deserialize)]
|
||||
pub struct ResolveApprovalArgs {
|
||||
pub request_id: String,
|
||||
pub accept: bool,
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Deserialize, Serialize)]
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct ResolveApprovalResponse {
|
||||
pub success: bool,
|
||||
}
|
||||
|
@ -1426,14 +1455,6 @@ impl Request for ResolveApprovalArgs {
|
|||
type Response = ResolveApprovalResponse;
|
||||
|
||||
async fn request(self, ctx: Arc<Context>) -> Result<Self::Response> {
|
||||
let request_id = Uuid::parse_str(&self.request_id).context("Invalid request ID")?;
|
||||
|
||||
if let Some(handle) = ctx.tauri_handle.clone() {
|
||||
handle.resolve_approval(request_id, self.accept).await?;
|
||||
} else {
|
||||
bail!("Cannot resolve approval without a Tauri handle");
|
||||
}
|
||||
|
||||
Ok(ResolveApprovalResponse { success: true })
|
||||
resolve_approval_request(self, ctx).await
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ use crate::bitcoin;
|
|||
use crate::{bitcoin::ExpiredTimelocks, monero, network::quote::BidQuote};
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use bitcoin::Txid;
|
||||
use monero_rpc_pool::pool::PoolStatus;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::future::Future;
|
||||
|
@ -27,6 +28,7 @@ pub enum TauriEvent {
|
|||
TimelockChange(TauriTimelockChangeEvent),
|
||||
Approval(ApprovalRequest),
|
||||
BackgroundProgress(TauriBackgroundProgressWrapper),
|
||||
PoolStatusUpdate(PoolStatus),
|
||||
}
|
||||
|
||||
const TAURI_UNIFIED_EVENT_NAME: &str = "tauri-unified-event";
|
||||
|
@ -297,6 +299,10 @@ pub trait TauriEmitter {
|
|||
));
|
||||
}
|
||||
|
||||
fn emit_pool_status_update(&self, status: PoolStatus) {
|
||||
self.emit_unified_event(TauriEvent::PoolStatusUpdate(status));
|
||||
}
|
||||
|
||||
/// Create a new background progress handle for tracking a specific type of progress
|
||||
fn new_background_process<T: Clone>(
|
||||
&self,
|
||||
|
@ -609,14 +615,14 @@ pub enum TauriSwapProgressEvent {
|
|||
BtcLockTxInMempool {
|
||||
#[typeshare(serialized_as = "string")]
|
||||
btc_lock_txid: bitcoin::Txid,
|
||||
#[typeshare(serialized_as = "number")]
|
||||
btc_lock_confirmations: u64,
|
||||
#[typeshare(serialized_as = "Option<number>")]
|
||||
btc_lock_confirmations: Option<u64>,
|
||||
},
|
||||
XmrLockTxInMempool {
|
||||
#[typeshare(serialized_as = "string")]
|
||||
xmr_lock_txid: monero::TxHash,
|
||||
#[typeshare(serialized_as = "number")]
|
||||
xmr_lock_tx_confirmations: u64,
|
||||
#[typeshare(serialized_as = "Option<number>")]
|
||||
xmr_lock_tx_confirmations: Option<u64>,
|
||||
},
|
||||
XmrLocked,
|
||||
EncryptedSignatureSent,
|
||||
|
@ -708,6 +714,8 @@ pub struct TauriSettings {
|
|||
pub electrum_rpc_urls: Vec<String>,
|
||||
/// Whether to initialize and use a tor client.
|
||||
pub use_tor: bool,
|
||||
/// Whether to use the Monero RPC pool instead of custom nodes.
|
||||
pub use_monero_rpc_pool: bool,
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
|
|
|
@ -67,7 +67,13 @@ pub fn init(
|
|||
"libp2p_dcutr",
|
||||
"monero_cpp",
|
||||
];
|
||||
let OUR_CRATES: Vec<&str> = vec!["swap", "asb", "monero_sys", "unstoppableswap-gui-rs"];
|
||||
let OUR_CRATES: Vec<&str> = vec![
|
||||
"swap",
|
||||
"asb",
|
||||
"monero_sys",
|
||||
"unstoppableswap-gui-rs",
|
||||
"monero_rpc_pool",
|
||||
];
|
||||
|
||||
// General log file for non-verbose logs
|
||||
let file_appender: RollingFileAppender = tracing_appender::rolling::never(&dir, "swap-all.log");
|
||||
|
@ -89,6 +95,8 @@ pub fn init(
|
|||
.with_ansi(false)
|
||||
.with_timer(UtcTime::rfc_3339())
|
||||
.with_target(false)
|
||||
.with_file(true)
|
||||
.with_line_number(true)
|
||||
.json()
|
||||
.with_filter(env_filter(level_filter, OUR_CRATES.clone())?);
|
||||
|
||||
|
@ -100,6 +108,8 @@ pub fn init(
|
|||
.with_ansi(false)
|
||||
.with_timer(UtcTime::rfc_3339())
|
||||
.with_target(false)
|
||||
.with_file(true)
|
||||
.with_line_number(true)
|
||||
.json()
|
||||
.with_filter(env_filter_with_libp2p_info(
|
||||
LevelFilter::TRACE,
|
||||
|
@ -116,7 +126,9 @@ pub fn init(
|
|||
.with_writer(std::io::stderr)
|
||||
.with_ansi(is_terminal)
|
||||
.with_timer(UtcTime::rfc_3339())
|
||||
.with_target(true);
|
||||
.with_target(true)
|
||||
.with_file(true)
|
||||
.with_line_number(true);
|
||||
|
||||
// Layer for writing to the Tauri guest. This will be displayed in the GUI.
|
||||
// Crates: All crates with libp2p at INFO+ level
|
||||
|
@ -126,6 +138,8 @@ pub fn init(
|
|||
.with_ansi(false)
|
||||
.with_timer(UtcTime::rfc_3339())
|
||||
.with_target(true)
|
||||
.with_file(true)
|
||||
.with_line_number(true)
|
||||
.json()
|
||||
.with_filter(env_filter_with_libp2p_info(
|
||||
level_filter,
|
||||
|
|
|
@ -146,23 +146,9 @@ async fn next_state(
|
|||
BobState::SwapSetupCompleted(state2)
|
||||
}
|
||||
BobState::SwapSetupCompleted(state2) => {
|
||||
// Record the current monero wallet block height so we don't have to scan from
|
||||
// block 0 once we create the redeem wallet.
|
||||
// This has to be done **before** the Bitcoin is locked in order to ensure that
|
||||
// if Bob goes offline the recorded wallet-height is correct.
|
||||
// If we only record this later, it can happen that Bob publishes the Bitcoin
|
||||
// transaction, goes offline, while offline Alice publishes Monero.
|
||||
// If the Monero transaction gets confirmed before Bob comes online again then
|
||||
// Bob would record a wallet-height that is past the lock transaction height,
|
||||
// which can lead to the wallet not detect the transaction.
|
||||
let monero_wallet_restore_blockheight = monero_wallet
|
||||
.blockchain_height()
|
||||
.await
|
||||
.context("Failed to fetch current Monero blockheight")?;
|
||||
|
||||
// Alice and Bob have exchanged all necessary signatures
|
||||
let xmr_receive_amount = state2.xmr;
|
||||
|
||||
// Alice and Bob have exchanged info
|
||||
// Sign the Bitcoin lock transaction
|
||||
let (state3, tx_lock) = state2.lock_btc().await?;
|
||||
let signed_tx = bitcoin_wallet
|
||||
|
@ -184,8 +170,9 @@ async fn next_state(
|
|||
swap_id,
|
||||
});
|
||||
|
||||
// We request approval before publishing the Bitcoin lock transaction, as the exchange rate determined at this step might be different from the
|
||||
// we previously displayed to the user.
|
||||
// We request approval before publishing the Bitcoin lock transaction,
|
||||
// as the exchange rate determined at this step might be different
|
||||
// from the one we previously displayed to the user.
|
||||
let approval_result = event_emitter
|
||||
.request_approval(request, PRE_BTC_LOCK_APPROVAL_TIMEOUT_SECS)
|
||||
.await;
|
||||
|
@ -194,6 +181,20 @@ async fn next_state(
|
|||
Ok(true) => {
|
||||
tracing::debug!("User approved swap offer");
|
||||
|
||||
// Record the current monero wallet block height so we don't have to scan from
|
||||
// block 0 once we create the redeem wallet.
|
||||
// This has to be done **before** the Bitcoin is locked in order to ensure that
|
||||
// if Bob goes offline the recorded wallet-height is correct.
|
||||
// If we only record this later, it can happen that Bob publishes the Bitcoin
|
||||
// transaction, goes offline, while offline Alice publishes Monero.
|
||||
// If the Monero transaction gets confirmed before Bob comes online again then
|
||||
// Bob would record a wallet-height that is past the lock transaction height,
|
||||
// which can lead to the wallet not detect the transaction.
|
||||
let monero_wallet_restore_blockheight = monero_wallet
|
||||
.blockchain_height()
|
||||
.await
|
||||
.context("Failed to fetch current Monero blockheight")?;
|
||||
|
||||
// Publish the signed Bitcoin lock transaction
|
||||
let (..) = bitcoin_wallet.broadcast(signed_tx, "lock").await?;
|
||||
|
||||
|
@ -224,7 +225,7 @@ async fn next_state(
|
|||
swap_id,
|
||||
TauriSwapProgressEvent::BtcLockTxInMempool {
|
||||
btc_lock_txid: state3.tx_lock_id(),
|
||||
btc_lock_confirmations: 0,
|
||||
btc_lock_confirmations: None,
|
||||
},
|
||||
);
|
||||
|
||||
|
@ -289,7 +290,7 @@ async fn next_state(
|
|||
swap_id,
|
||||
TauriSwapProgressEvent::BtcLockTxInMempool {
|
||||
btc_lock_txid: state3.tx_lock_id(),
|
||||
btc_lock_confirmations: u64::from(confirmed.confirmations()),
|
||||
btc_lock_confirmations: Some(u64::from(confirmed.confirmations())),
|
||||
},
|
||||
);
|
||||
}
|
||||
|
@ -334,7 +335,7 @@ async fn next_state(
|
|||
swap_id,
|
||||
TauriSwapProgressEvent::XmrLockTxInMempool {
|
||||
xmr_lock_txid: lock_transfer_proof.tx_hash(),
|
||||
xmr_lock_tx_confirmations: 0,
|
||||
xmr_lock_tx_confirmations: None,
|
||||
},
|
||||
);
|
||||
|
||||
|
@ -369,7 +370,7 @@ async fn next_state(
|
|||
swap_id,
|
||||
TauriSwapProgressEvent::XmrLockTxInMempool {
|
||||
xmr_lock_txid: lock_transfer_proof.clone().tx_hash(),
|
||||
xmr_lock_tx_confirmations: confirmations,
|
||||
xmr_lock_tx_confirmations: Some(confirmations),
|
||||
},
|
||||
);
|
||||
}),
|
||||
|
|
|
@ -18,6 +18,8 @@ pub fn capture_logs(min_level: LevelFilter) -> MakeCapturingWriter {
|
|||
tracing_subscriber::fmt()
|
||||
.with_ansi(false)
|
||||
.without_time()
|
||||
.with_file(true)
|
||||
.with_line_number(true)
|
||||
.with_writer(make_writer.clone())
|
||||
.with_env_filter(format!("{}", min_level))
|
||||
.finish(),
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue