mirror of
https://github.com/comit-network/xmr-btc-swap.git
synced 2025-08-24 05:59:42 -04:00
Merge branch 'master' of https://github.com/UnstoppableSwap/core
This commit is contained in:
commit
8741ac7619
66 changed files with 5596 additions and 681 deletions
2
.github/workflows/draft-new-release.yml
vendored
2
.github/workflows/draft-new-release.yml
vendored
|
@ -57,7 +57,7 @@ jobs:
|
|||
id: make-commit
|
||||
env:
|
||||
DPRINT_VERSION: "0.50.0"
|
||||
RUST_TOOLCHAIN: "1.82"
|
||||
RUST_TOOLCHAIN: "1.85"
|
||||
run: |
|
||||
rustup component add rustfmt --toolchain "$RUST_TOOLCHAIN-x86_64-unknown-linux-gnu"
|
||||
curl -fsSL https://dprint.dev/install.sh | sh -s $DPRINT_VERSION
|
||||
|
|
5
.gitignore
vendored
5
.gitignore
vendored
|
@ -3,3 +3,8 @@ target/
|
|||
.claude/settings.local.json
|
||||
.DS_Store
|
||||
build/
|
||||
release-build.sh
|
||||
cn_macos
|
||||
target-check
|
||||
monero-rpc-pool/temp_db.sqlite
|
||||
monero-rpc-pool/temp.db
|
||||
|
|
9
.vscode/settings.json
vendored
9
.vscode/settings.json
vendored
|
@ -69,6 +69,13 @@
|
|||
"unordered_set": "cpp",
|
||||
"variant": "cpp",
|
||||
"algorithm": "cpp",
|
||||
"*.rs": "rust"
|
||||
"*.rs": "rust",
|
||||
"shared_mutex": "cpp",
|
||||
"source_location": "cpp",
|
||||
"strstream": "cpp",
|
||||
"typeindex": "cpp"
|
||||
},
|
||||
"rust-analyzer.cargo.extraEnv": {
|
||||
"CARGO_TARGET_DIR": "target-check"
|
||||
}
|
||||
}
|
|
@ -7,7 +7,14 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||
|
||||
## [Unreleased]
|
||||
|
||||
## [2.2.0-beta] - 2025-06-17
|
||||
- ASB + GUI + CLI: We now cache fee estimates for the Bitcoin wallet for up to 2 minutes. This improves the speed of fee estimation and reduces the number of requests to the Electrum servers.
|
||||
|
||||
## [2.3.0-beta.1] - 2025-06-19
|
||||
|
||||
- ASB + CLI + GUI: Introduce a load-balancing proxy for Monero RPC nodes that automatically discovers healthy nodes and routes requests to improve connection reliability.
|
||||
- ASB: Added `monero_node_pool` boolean option to ASB config. When enabled, the ASB uses the internal Monero RPC pool instead of connecting directly to a single daemon URL, providing improved reliability and automatic failover across multiple Monero nodes.
|
||||
|
||||
## [2.2.0-beta.2] - 2025-06-17
|
||||
|
||||
- We now call Monero function directly (via FFI bindings) instead of using `monero-wallet-rpc`.
|
||||
- ASB: Since we don't communicate with `monero-wallet-rpc` anymore, the Monero wallet's will no longer be accessible by connecting to it. If you are using the asb-docker-compose setup, run this command to migrate the wallet files from the volume of the monero-wallet-rpc container to the volume of the asb container:
|
||||
|
|
1150
Cargo.lock
generated
1150
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
|
@ -1,6 +1,6 @@
|
|||
[workspace]
|
||||
resolver = "2"
|
||||
members = ["monero-rpc", "monero-sys", "src-tauri", "swap"]
|
||||
members = ["monero-rpc", "monero-rpc-pool", "monero-sys", "src-tauri", "swap", "electrum-pool"]
|
||||
|
||||
[profile.release]
|
||||
opt-level = 0
|
||||
|
|
15
Dockerfile
15
Dockerfile
|
@ -36,18 +36,23 @@ RUN apt-get update && \
|
|||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install Rust 1.82
|
||||
RUN curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain 1.82.0
|
||||
# Install Rust 1.85
|
||||
RUN curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain 1.85.0
|
||||
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||
|
||||
COPY . .
|
||||
|
||||
# Update submodules recursively
|
||||
# Force update to handle any local changes in submodules
|
||||
RUN git submodule sync --recursive && git submodule update --init --recursive --force
|
||||
# Check that submodules are present (they should be initialized before building)
|
||||
RUN if [ ! -f "monero-sys/monero/CMakeLists.txt" ]; then \
|
||||
echo "ERROR: Submodules not initialized. Run 'git submodule update --init --recursive' before building Docker image."; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
WORKDIR /build/swap
|
||||
|
||||
# Act as if we are in a GitHub Actions environment
|
||||
ENV DOCKER_BUILD=true
|
||||
|
||||
RUN cargo build -vv --bin=asb
|
||||
|
||||
FROM ubuntu:24.04
|
||||
|
|
26
dev_scripts/bump-version.sh
Executable file
26
dev_scripts/bump-version.sh
Executable file
|
@ -0,0 +1,26 @@
|
|||
#!/bin/bash
|
||||
set -eu
|
||||
|
||||
if [ "$#" -ne 1 ]; then
|
||||
echo "Usage: $0 <version>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
VERSION=$1
|
||||
TODAY=$(date +%Y-%m-%d)
|
||||
echo "Bumping version to $VERSION"
|
||||
|
||||
# Using sed and assuming GNU sed syntax as this is for the github workflow.
|
||||
|
||||
# Update version in tauri.conf.json
|
||||
sed -i 's/"version": "[^"]*"/"version": "'"$VERSION"'"/' src-tauri/tauri.conf.json
|
||||
|
||||
# Update version in Cargo.toml files
|
||||
sed -i -E 's/^version = "[0-9]+\.[0-9]+\.[0-9]+"/version = "'"$VERSION"'"/' swap/Cargo.toml src-tauri/Cargo.toml
|
||||
|
||||
# Update changelog
|
||||
sed -i "s/^## \\[Unreleased\\]/## [$VERSION] - $TODAY/" CHANGELOG.md
|
||||
# Add a new [Unreleased] section at the top
|
||||
sed -i '3i## [Unreleased]\n' CHANGELOG.md
|
||||
|
||||
echo "Updated all files to version $VERSION."
|
17
electrum-pool/Cargo.toml
Normal file
17
electrum-pool/Cargo.toml
Normal file
|
@ -0,0 +1,17 @@
|
|||
[package]
|
||||
name = "electrum-pool"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
authors = ["UnstoppableSwap Team <help@unstoppableswap.net>"]
|
||||
|
||||
[dependencies]
|
||||
backoff = { version = "0.4", features = ["tokio"] }
|
||||
bdk_electrum = { version = "0.19", default-features = false, features = ["use-rustls-ring"] }
|
||||
bitcoin = { version = "0.32", features = ["rand", "serde"] }
|
||||
futures = { version = "0.3", default-features = false, features = ["std"] }
|
||||
once_cell = "1.19"
|
||||
tokio = { version = "1", features = ["rt-multi-thread", "time", "macros", "sync"] }
|
||||
tracing = { version = "0.1", features = ["attributes"] }
|
||||
|
||||
[dev-dependencies]
|
||||
serde_json = "1"
|
|
@ -197,7 +197,7 @@ where
|
|||
&self,
|
||||
kind: &str,
|
||||
f: F,
|
||||
) -> Result<T, crate::bitcoin::electrum_balancer::MultiError>
|
||||
) -> Result<T, MultiError>
|
||||
where
|
||||
F: Fn(&C) -> Result<T, Error> + Send + Sync + Clone + 'static,
|
||||
T: Send + 'static,
|
7
justfile
7
justfile
|
@ -100,4 +100,9 @@ docker-prune-network:
|
|||
|
||||
# Install dependencies required for building monero-sys
|
||||
prepare_mac_os_brew_dependencies:
|
||||
cd dev_scripts && chmod +x ./brew_dependencies_install.sh && ./brew_dependencies_install.sh
|
||||
cd dev_scripts && chmod +x ./brew_dependencies_install.sh && ./brew_dependencies_install.sh
|
||||
|
||||
# Takes a crate (e.g monero-rpc-pool) and uses code2prompt to copy to clipboard
|
||||
# E.g code2prompt . --exclude "*.lock" --exclude ".sqlx/*" --exclude "target"
|
||||
code2prompt_single_crate crate:
|
||||
cd {{crate}} && code2prompt . --exclude "*.lock" --exclude ".sqlx/*" --exclude "target"
|
48
monero-rpc-pool/.sqlx/query-3e8f39a6ec4443cec6497672891d12bbf7c1d0aca061827740af88ced863ae23.json
generated
Normal file
48
monero-rpc-pool/.sqlx/query-3e8f39a6ec4443cec6497672891d12bbf7c1d0aca061827740af88ced863ae23.json
generated
Normal file
|
@ -0,0 +1,48 @@
|
|||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "\n SELECT \n id as \"id!: i64\",\n scheme,\n host,\n port,\n full_url,\n network as \"network!: String\",\n first_seen_at\n FROM monero_nodes \n ORDER BY id\n ",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "id!: i64",
|
||||
"ordinal": 0,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "scheme",
|
||||
"ordinal": 1,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "host",
|
||||
"ordinal": 2,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "port",
|
||||
"ordinal": 3,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "full_url",
|
||||
"ordinal": 4,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "network!: String",
|
||||
"ordinal": 5,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "first_seen_at",
|
||||
"ordinal": 6,
|
||||
"type_info": "Text"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 0
|
||||
},
|
||||
"nullable": [false, false, false, false, false, false, false]
|
||||
},
|
||||
"hash": "3e8f39a6ec4443cec6497672891d12bbf7c1d0aca061827740af88ced863ae23"
|
||||
}
|
116
monero-rpc-pool/.sqlx/query-549f5ef13ec7bf5d987dcb893753a9c903edcafa3a66bd82965b40a9e7f238b6.json
generated
Normal file
116
monero-rpc-pool/.sqlx/query-549f5ef13ec7bf5d987dcb893753a9c903edcafa3a66bd82965b40a9e7f238b6.json
generated
Normal file
|
@ -0,0 +1,116 @@
|
|||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "\n SELECT \n n.id as \"id!: i64\",\n n.scheme,\n n.host,\n n.port,\n n.full_url,\n n.network,\n n.first_seen_at,\n CAST(COALESCE(stats.success_count, 0) AS INTEGER) as \"success_count!: i64\",\n CAST(COALESCE(stats.failure_count, 0) AS INTEGER) as \"failure_count!: i64\",\n stats.last_success as \"last_success?: String\",\n stats.last_failure as \"last_failure?: String\",\n stats.last_checked as \"last_checked?: String\",\n CAST(CASE WHEN reliable_nodes.node_id IS NOT NULL THEN 1 ELSE 0 END AS INTEGER) as \"is_reliable!: i64\",\n stats.avg_latency_ms as \"avg_latency_ms?: f64\",\n stats.min_latency_ms as \"min_latency_ms?: f64\",\n stats.max_latency_ms as \"max_latency_ms?: f64\",\n stats.last_latency_ms as \"last_latency_ms?: f64\"\n FROM monero_nodes n\n LEFT JOIN (\n SELECT \n node_id,\n SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,\n SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,\n MAX(CASE WHEN was_successful THEN timestamp END) as last_success,\n MAX(CASE WHEN NOT was_successful THEN timestamp END) as last_failure,\n MAX(timestamp) as last_checked,\n AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms,\n MIN(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as min_latency_ms,\n MAX(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as max_latency_ms,\n (SELECT latency_ms FROM health_checks hc2 WHERE hc2.node_id = health_checks.node_id ORDER BY timestamp DESC LIMIT 1) as last_latency_ms\n FROM health_checks \n GROUP BY node_id\n ) stats ON n.id = stats.node_id\n LEFT JOIN (\n SELECT DISTINCT node_id FROM (\n SELECT \n n2.id as node_id,\n COALESCE(s2.success_count, 0) as success_count,\n COALESCE(s2.failure_count, 0) as failure_count,\n s2.avg_latency_ms,\n (CAST(COALESCE(s2.success_count, 0) AS REAL) / CAST(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0) AS REAL)) * \n (MIN(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0), 200) / 200.0) * 0.8 +\n CASE \n WHEN s2.avg_latency_ms IS NOT NULL THEN (1.0 - (MIN(s2.avg_latency_ms, 2000) / 2000.0)) * 0.2\n ELSE 0.0 \n END as reliability_score\n FROM monero_nodes n2\n LEFT JOIN (\n SELECT \n node_id,\n SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,\n SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,\n AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms\n FROM health_checks \n GROUP BY node_id\n ) s2 ON n2.id = s2.node_id\n WHERE n2.network = ? AND (COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0)) > 0\n ORDER BY reliability_score DESC\n LIMIT 4\n )\n ) reliable_nodes ON n.id = reliable_nodes.node_id\n WHERE n.network = ? AND (COALESCE(stats.success_count, 0) + COALESCE(stats.failure_count, 0)) > 0\n ORDER BY \n (CAST(COALESCE(stats.success_count, 0) AS REAL) / CAST(COALESCE(stats.success_count, 0) + COALESCE(stats.failure_count, 0) AS REAL)) DESC,\n stats.avg_latency_ms ASC\n LIMIT ?\n ",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "id!: i64",
|
||||
"ordinal": 0,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "scheme",
|
||||
"ordinal": 1,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "host",
|
||||
"ordinal": 2,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "port",
|
||||
"ordinal": 3,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "full_url",
|
||||
"ordinal": 4,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "network",
|
||||
"ordinal": 5,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "first_seen_at",
|
||||
"ordinal": 6,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "success_count!: i64",
|
||||
"ordinal": 7,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "failure_count!: i64",
|
||||
"ordinal": 8,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_success?: String",
|
||||
"ordinal": 9,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_failure?: String",
|
||||
"ordinal": 10,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_checked?: String",
|
||||
"ordinal": 11,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "is_reliable!: i64",
|
||||
"ordinal": 12,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "avg_latency_ms?: f64",
|
||||
"ordinal": 13,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "min_latency_ms?: f64",
|
||||
"ordinal": 14,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "max_latency_ms?: f64",
|
||||
"ordinal": 15,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_latency_ms?: f64",
|
||||
"ordinal": 16,
|
||||
"type_info": "Float"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 3
|
||||
},
|
||||
"nullable": [
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
true
|
||||
]
|
||||
},
|
||||
"hash": "549f5ef13ec7bf5d987dcb893753a9c903edcafa3a66bd82965b40a9e7f238b6"
|
||||
}
|
12
monero-rpc-pool/.sqlx/query-56549d93f0e2106297b85565a52b2d9ac64d5b50fb7aa6028be3fcf266fc1d5d.json
generated
Normal file
12
monero-rpc-pool/.sqlx/query-56549d93f0e2106297b85565a52b2d9ac64d5b50fb7aa6028be3fcf266fc1d5d.json
generated
Normal file
|
@ -0,0 +1,12 @@
|
|||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "\n INSERT INTO health_checks (node_id, timestamp, was_successful, latency_ms)\n VALUES (?, ?, ?, ?)\n ",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"parameters": {
|
||||
"Right": 4
|
||||
},
|
||||
"nullable": []
|
||||
},
|
||||
"hash": "56549d93f0e2106297b85565a52b2d9ac64d5b50fb7aa6028be3fcf266fc1d5d"
|
||||
}
|
12
monero-rpc-pool/.sqlx/query-5736de2aac47eb69d7f6835d266aa28732b02a5e8e055ffaebcb452ed1b5044c.json
generated
Normal file
12
monero-rpc-pool/.sqlx/query-5736de2aac47eb69d7f6835d266aa28732b02a5e8e055ffaebcb452ed1b5044c.json
generated
Normal file
|
@ -0,0 +1,12 @@
|
|||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "\n UPDATE monero_nodes \n SET network = ?, updated_at = ?\n WHERE full_url = ?\n ",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"parameters": {
|
||||
"Right": 3
|
||||
},
|
||||
"nullable": []
|
||||
},
|
||||
"hash": "5736de2aac47eb69d7f6835d266aa28732b02a5e8e055ffaebcb452ed1b5044c"
|
||||
}
|
18
monero-rpc-pool/.sqlx/query-5798d9589772742f074e0ecc2551a40d943bfb7ed2e295f09f12d77cb65ce821.json
generated
Normal file
18
monero-rpc-pool/.sqlx/query-5798d9589772742f074e0ecc2551a40d943bfb7ed2e295f09f12d77cb65ce821.json
generated
Normal file
|
@ -0,0 +1,18 @@
|
|||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "\n INSERT INTO monero_nodes (scheme, host, port, full_url, network, first_seen_at, updated_at)\n VALUES (?, ?, ?, ?, ?, ?, ?)\n ON CONFLICT(full_url) DO UPDATE SET\n network = excluded.network,\n updated_at = excluded.updated_at\n RETURNING id\n ",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "id",
|
||||
"ordinal": 0,
|
||||
"type_info": "Integer"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 7
|
||||
},
|
||||
"nullable": [false]
|
||||
},
|
||||
"hash": "5798d9589772742f074e0ecc2551a40d943bfb7ed2e295f09f12d77cb65ce821"
|
||||
}
|
116
monero-rpc-pool/.sqlx/query-5a25c95c04b11a60a04ad97b5fb684e9a0cc2eb5daf64f33e924f0c38a2edfec.json
generated
Normal file
116
monero-rpc-pool/.sqlx/query-5a25c95c04b11a60a04ad97b5fb684e9a0cc2eb5daf64f33e924f0c38a2edfec.json
generated
Normal file
|
@ -0,0 +1,116 @@
|
|||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "\n SELECT \n n.id as \"id!: i64\",\n n.scheme,\n n.host,\n n.port,\n n.full_url,\n n.network,\n n.first_seen_at,\n CAST(COALESCE(stats.success_count, 0) AS INTEGER) as \"success_count!: i64\",\n CAST(COALESCE(stats.failure_count, 0) AS INTEGER) as \"failure_count!: i64\",\n stats.last_success as \"last_success?: String\",\n stats.last_failure as \"last_failure?: String\",\n stats.last_checked as \"last_checked?: String\",\n CAST(CASE WHEN reliable_nodes.node_id IS NOT NULL THEN 1 ELSE 0 END AS INTEGER) as \"is_reliable!: i64\",\n stats.avg_latency_ms as \"avg_latency_ms?: f64\",\n stats.min_latency_ms as \"min_latency_ms?: f64\",\n stats.max_latency_ms as \"max_latency_ms?: f64\",\n stats.last_latency_ms as \"last_latency_ms?: f64\"\n FROM monero_nodes n\n LEFT JOIN (\n SELECT \n node_id,\n SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,\n SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,\n MAX(CASE WHEN was_successful THEN timestamp END) as last_success,\n MAX(CASE WHEN NOT was_successful THEN timestamp END) as last_failure,\n MAX(timestamp) as last_checked,\n AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms,\n MIN(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as min_latency_ms,\n MAX(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as max_latency_ms,\n (SELECT latency_ms FROM health_checks hc2 WHERE hc2.node_id = health_checks.node_id ORDER BY timestamp DESC LIMIT 1) as last_latency_ms\n FROM health_checks \n GROUP BY node_id\n ) stats ON n.id = stats.node_id\n LEFT JOIN (\n SELECT DISTINCT node_id FROM (\n SELECT \n n2.id as node_id,\n COALESCE(s2.success_count, 0) as success_count,\n COALESCE(s2.failure_count, 0) as failure_count,\n s2.avg_latency_ms,\n (CAST(COALESCE(s2.success_count, 0) AS REAL) / CAST(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0) AS REAL)) * \n (MIN(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0), 200) / 200.0) * 0.8 +\n CASE \n WHEN s2.avg_latency_ms IS NOT NULL THEN (1.0 - (MIN(s2.avg_latency_ms, 2000) / 2000.0)) * 0.2\n ELSE 0.0 \n END as reliability_score\n FROM monero_nodes n2\n LEFT JOIN (\n SELECT \n node_id,\n SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,\n SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,\n AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms\n FROM health_checks \n GROUP BY node_id\n ) s2 ON n2.id = s2.node_id\n WHERE n2.network = ? AND (COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0)) > 0\n ORDER BY reliability_score DESC\n LIMIT 4\n )\n ) reliable_nodes ON n.id = reliable_nodes.node_id\n WHERE n.network = ?\n ORDER BY RANDOM()\n LIMIT ?\n ",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "id!: i64",
|
||||
"ordinal": 0,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "scheme",
|
||||
"ordinal": 1,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "host",
|
||||
"ordinal": 2,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "port",
|
||||
"ordinal": 3,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "full_url",
|
||||
"ordinal": 4,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "network",
|
||||
"ordinal": 5,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "first_seen_at",
|
||||
"ordinal": 6,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "success_count!: i64",
|
||||
"ordinal": 7,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "failure_count!: i64",
|
||||
"ordinal": 8,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_success?: String",
|
||||
"ordinal": 9,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_failure?: String",
|
||||
"ordinal": 10,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_checked?: String",
|
||||
"ordinal": 11,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "is_reliable!: i64",
|
||||
"ordinal": 12,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "avg_latency_ms?: f64",
|
||||
"ordinal": 13,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "min_latency_ms?: f64",
|
||||
"ordinal": 14,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "max_latency_ms?: f64",
|
||||
"ordinal": 15,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_latency_ms?: f64",
|
||||
"ordinal": 16,
|
||||
"type_info": "Float"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 3
|
||||
},
|
||||
"nullable": [
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
true
|
||||
]
|
||||
},
|
||||
"hash": "5a25c95c04b11a60a04ad97b5fb684e9a0cc2eb5daf64f33e924f0c38a2edfec"
|
||||
}
|
116
monero-rpc-pool/.sqlx/query-5ff27bdd9b6e7aadc8dd4936e0ee7e6a611aaef28697a0e9535dfb30d1c4861d.json
generated
Normal file
116
monero-rpc-pool/.sqlx/query-5ff27bdd9b6e7aadc8dd4936e0ee7e6a611aaef28697a0e9535dfb30d1c4861d.json
generated
Normal file
|
@ -0,0 +1,116 @@
|
|||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "\n SELECT \n n.id as \"id!: i64\",\n n.scheme,\n n.host,\n n.port,\n n.full_url,\n n.network,\n n.first_seen_at,\n CAST(COALESCE(stats.success_count, 0) AS INTEGER) as \"success_count!: i64\",\n CAST(COALESCE(stats.failure_count, 0) AS INTEGER) as \"failure_count!: i64\",\n stats.last_success as \"last_success?: String\",\n stats.last_failure as \"last_failure?: String\",\n stats.last_checked as \"last_checked?: String\",\n CAST(CASE WHEN reliable_nodes.node_id IS NOT NULL THEN 1 ELSE 0 END AS INTEGER) as \"is_reliable!: i64\",\n stats.avg_latency_ms as \"avg_latency_ms?: f64\",\n stats.min_latency_ms as \"min_latency_ms?: f64\",\n stats.max_latency_ms as \"max_latency_ms?: f64\",\n stats.last_latency_ms as \"last_latency_ms?: f64\"\n FROM monero_nodes n\n LEFT JOIN (\n SELECT \n node_id,\n SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,\n SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,\n MAX(CASE WHEN was_successful THEN timestamp END) as last_success,\n MAX(CASE WHEN NOT was_successful THEN timestamp END) as last_failure,\n MAX(timestamp) as last_checked,\n AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms,\n MIN(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as min_latency_ms,\n MAX(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as max_latency_ms,\n (SELECT latency_ms FROM health_checks hc2 WHERE hc2.node_id = health_checks.node_id ORDER BY timestamp DESC LIMIT 1) as last_latency_ms\n FROM health_checks \n GROUP BY node_id\n ) stats ON n.id = stats.node_id\n LEFT JOIN (\n SELECT DISTINCT node_id FROM (\n SELECT \n n2.id as node_id,\n COALESCE(s2.success_count, 0) as success_count,\n COALESCE(s2.failure_count, 0) as failure_count,\n s2.avg_latency_ms,\n (CAST(COALESCE(s2.success_count, 0) AS REAL) / CAST(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0) AS REAL)) * \n (MIN(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0), 200) / 200.0) * 0.8 +\n CASE \n WHEN s2.avg_latency_ms IS NOT NULL THEN (1.0 - (MIN(s2.avg_latency_ms, 2000) / 2000.0)) * 0.2\n ELSE 0.0 \n END as reliability_score\n FROM monero_nodes n2\n LEFT JOIN (\n SELECT \n node_id,\n SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,\n SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,\n AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms\n FROM health_checks \n GROUP BY node_id\n ) s2 ON n2.id = s2.node_id\n WHERE n2.network = ? AND (COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0)) > 0\n ORDER BY reliability_score DESC\n LIMIT 4\n )\n ) reliable_nodes ON n.id = reliable_nodes.node_id\n WHERE n.network = ?\n ORDER BY stats.avg_latency_ms ASC, stats.success_count DESC\n ",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "id!: i64",
|
||||
"ordinal": 0,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "scheme",
|
||||
"ordinal": 1,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "host",
|
||||
"ordinal": 2,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "port",
|
||||
"ordinal": 3,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "full_url",
|
||||
"ordinal": 4,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "network",
|
||||
"ordinal": 5,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "first_seen_at",
|
||||
"ordinal": 6,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "success_count!: i64",
|
||||
"ordinal": 7,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "failure_count!: i64",
|
||||
"ordinal": 8,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_success?: String",
|
||||
"ordinal": 9,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_failure?: String",
|
||||
"ordinal": 10,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_checked?: String",
|
||||
"ordinal": 11,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "is_reliable!: i64",
|
||||
"ordinal": 12,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "avg_latency_ms?: f64",
|
||||
"ordinal": 13,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "min_latency_ms?: f64",
|
||||
"ordinal": 14,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "max_latency_ms?: f64",
|
||||
"ordinal": 15,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_latency_ms?: f64",
|
||||
"ordinal": 16,
|
||||
"type_info": "Float"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 2
|
||||
},
|
||||
"nullable": [
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
true
|
||||
]
|
||||
},
|
||||
"hash": "5ff27bdd9b6e7aadc8dd4936e0ee7e6a611aaef28697a0e9535dfb30d1c4861d"
|
||||
}
|
116
monero-rpc-pool/.sqlx/query-a032eb9773d4553aeaff4fb15ed99dbaef7d16d48750ee7bd4ab83233a9a732b.json
generated
Normal file
116
monero-rpc-pool/.sqlx/query-a032eb9773d4553aeaff4fb15ed99dbaef7d16d48750ee7bd4ab83233a9a732b.json
generated
Normal file
|
@ -0,0 +1,116 @@
|
|||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "\n SELECT \n n.id as \"id!: i64\",\n n.scheme,\n n.host,\n n.port,\n n.full_url,\n n.network,\n n.first_seen_at,\n CAST(COALESCE(stats.success_count, 0) AS INTEGER) as \"success_count!: i64\",\n CAST(COALESCE(stats.failure_count, 0) AS INTEGER) as \"failure_count!: i64\",\n stats.last_success as \"last_success?: String\",\n stats.last_failure as \"last_failure?: String\",\n stats.last_checked as \"last_checked?: String\",\n CAST(1 AS INTEGER) as \"is_reliable!: i64\",\n stats.avg_latency_ms as \"avg_latency_ms?: f64\",\n stats.min_latency_ms as \"min_latency_ms?: f64\",\n stats.max_latency_ms as \"max_latency_ms?: f64\",\n stats.last_latency_ms as \"last_latency_ms?: f64\"\n FROM monero_nodes n\n LEFT JOIN (\n SELECT \n node_id,\n SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,\n SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,\n MAX(CASE WHEN was_successful THEN timestamp END) as last_success,\n MAX(CASE WHEN NOT was_successful THEN timestamp END) as last_failure,\n MAX(timestamp) as last_checked,\n AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms,\n MIN(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as min_latency_ms,\n MAX(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as max_latency_ms,\n (SELECT latency_ms FROM health_checks hc2 WHERE hc2.node_id = health_checks.node_id ORDER BY timestamp DESC LIMIT 1) as last_latency_ms\n FROM health_checks \n GROUP BY node_id\n ) stats ON n.id = stats.node_id\n WHERE n.network = ? AND (COALESCE(stats.success_count, 0) + COALESCE(stats.failure_count, 0)) > 0\n ORDER BY \n (CAST(COALESCE(stats.success_count, 0) AS REAL) / CAST(COALESCE(stats.success_count, 0) + COALESCE(stats.failure_count, 0) AS REAL)) * \n (MIN(COALESCE(stats.success_count, 0) + COALESCE(stats.failure_count, 0), 200) / 200.0) * 0.8 +\n CASE \n WHEN stats.avg_latency_ms IS NOT NULL THEN (1.0 - (MIN(stats.avg_latency_ms, 2000) / 2000.0)) * 0.2\n ELSE 0.0 \n END DESC\n LIMIT 4\n ",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "id!: i64",
|
||||
"ordinal": 0,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "scheme",
|
||||
"ordinal": 1,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "host",
|
||||
"ordinal": 2,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "port",
|
||||
"ordinal": 3,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "full_url",
|
||||
"ordinal": 4,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "network",
|
||||
"ordinal": 5,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "first_seen_at",
|
||||
"ordinal": 6,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "success_count!: i64",
|
||||
"ordinal": 7,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "failure_count!: i64",
|
||||
"ordinal": 8,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_success?: String",
|
||||
"ordinal": 9,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_failure?: String",
|
||||
"ordinal": 10,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_checked?: String",
|
||||
"ordinal": 11,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "is_reliable!: i64",
|
||||
"ordinal": 12,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "avg_latency_ms?: f64",
|
||||
"ordinal": 13,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "min_latency_ms?: f64",
|
||||
"ordinal": 14,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "max_latency_ms?: f64",
|
||||
"ordinal": 15,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_latency_ms?: f64",
|
||||
"ordinal": 16,
|
||||
"type_info": "Float"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
},
|
||||
"nullable": [
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
true
|
||||
]
|
||||
},
|
||||
"hash": "a032eb9773d4553aeaff4fb15ed99dbaef7d16d48750ee7bd4ab83233a9a732b"
|
||||
}
|
116
monero-rpc-pool/.sqlx/query-ba231efaf208a42fa857f716ef296b428c937f2eb7c8ce9c631f7f721e914c14.json
generated
Normal file
116
monero-rpc-pool/.sqlx/query-ba231efaf208a42fa857f716ef296b428c937f2eb7c8ce9c631f7f721e914c14.json
generated
Normal file
|
@ -0,0 +1,116 @@
|
|||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "\n SELECT \n n.id as \"id!: i64\",\n n.scheme,\n n.host,\n n.port,\n n.full_url,\n n.network,\n n.first_seen_at,\n CAST(COALESCE(stats.success_count, 0) AS INTEGER) as \"success_count!: i64\",\n CAST(COALESCE(stats.failure_count, 0) AS INTEGER) as \"failure_count!: i64\",\n stats.last_success as \"last_success?: String\",\n stats.last_failure as \"last_failure?: String\",\n stats.last_checked as \"last_checked?: String\",\n CAST(CASE WHEN reliable_nodes.node_id IS NOT NULL THEN 1 ELSE 0 END AS INTEGER) as \"is_reliable!: i64\",\n stats.avg_latency_ms as \"avg_latency_ms?: f64\",\n stats.min_latency_ms as \"min_latency_ms?: f64\",\n stats.max_latency_ms as \"max_latency_ms?: f64\",\n stats.last_latency_ms as \"last_latency_ms?: f64\"\n FROM monero_nodes n\n LEFT JOIN (\n SELECT \n node_id,\n SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,\n SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,\n MAX(CASE WHEN was_successful THEN timestamp END) as last_success,\n MAX(CASE WHEN NOT was_successful THEN timestamp END) as last_failure,\n MAX(timestamp) as last_checked,\n AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms,\n MIN(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as min_latency_ms,\n MAX(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as max_latency_ms,\n (SELECT latency_ms FROM health_checks hc2 WHERE hc2.node_id = health_checks.node_id ORDER BY timestamp DESC LIMIT 1) as last_latency_ms\n FROM health_checks \n GROUP BY node_id\n ) stats ON n.id = stats.node_id\n LEFT JOIN (\n SELECT DISTINCT node_id FROM (\n SELECT \n n2.id as node_id,\n COALESCE(s2.success_count, 0) as success_count,\n COALESCE(s2.failure_count, 0) as failure_count,\n s2.avg_latency_ms,\n (CAST(COALESCE(s2.success_count, 0) AS REAL) / CAST(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0) AS REAL)) * \n (MIN(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0), 200) / 200.0) * 0.8 +\n CASE \n WHEN s2.avg_latency_ms IS NOT NULL THEN (1.0 - (MIN(s2.avg_latency_ms, 2000) / 2000.0)) * 0.2\n ELSE 0.0 \n END as reliability_score\n FROM monero_nodes n2\n LEFT JOIN (\n SELECT \n node_id,\n SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,\n SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,\n AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms\n FROM health_checks \n GROUP BY node_id\n ) s2 ON n2.id = s2.node_id\n WHERE n2.network = ? AND (COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0)) > 0\n ORDER BY reliability_score DESC\n LIMIT 4\n )\n ) reliable_nodes ON n.id = reliable_nodes.node_id\n WHERE n.network = ?\n ORDER BY RANDOM()\n LIMIT ?\n ",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "id!: i64",
|
||||
"ordinal": 0,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "scheme",
|
||||
"ordinal": 1,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "host",
|
||||
"ordinal": 2,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "port",
|
||||
"ordinal": 3,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "full_url",
|
||||
"ordinal": 4,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "network",
|
||||
"ordinal": 5,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "first_seen_at",
|
||||
"ordinal": 6,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "success_count!: i64",
|
||||
"ordinal": 7,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "failure_count!: i64",
|
||||
"ordinal": 8,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_success?: String",
|
||||
"ordinal": 9,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_failure?: String",
|
||||
"ordinal": 10,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_checked?: String",
|
||||
"ordinal": 11,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "is_reliable!: i64",
|
||||
"ordinal": 12,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "avg_latency_ms?: f64",
|
||||
"ordinal": 13,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "min_latency_ms?: f64",
|
||||
"ordinal": 14,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "max_latency_ms?: f64",
|
||||
"ordinal": 15,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_latency_ms?: f64",
|
||||
"ordinal": 16,
|
||||
"type_info": "Float"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 3
|
||||
},
|
||||
"nullable": [
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
true
|
||||
]
|
||||
},
|
||||
"hash": "ba231efaf208a42fa857f716ef296b428c937f2eb7c8ce9c631f7f721e914c14"
|
||||
}
|
23
monero-rpc-pool/.sqlx/query-d32d91ca2debc4212841282533482b2ff081234c7f9f848a7223ae04234995d9.json
generated
Normal file
23
monero-rpc-pool/.sqlx/query-d32d91ca2debc4212841282533482b2ff081234c7f9f848a7223ae04234995d9.json
generated
Normal file
|
@ -0,0 +1,23 @@
|
|||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "\n SELECT \n CAST(SUM(CASE WHEN hc.was_successful THEN 1 ELSE 0 END) AS INTEGER) as \"successful!: i64\",\n CAST(SUM(CASE WHEN NOT hc.was_successful THEN 1 ELSE 0 END) AS INTEGER) as \"unsuccessful!: i64\"\n FROM (\n SELECT hc.was_successful\n FROM health_checks hc\n JOIN monero_nodes n ON hc.node_id = n.id\n WHERE n.network = ?\n ORDER BY hc.timestamp DESC\n LIMIT 100\n ) hc\n ",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "successful!: i64",
|
||||
"ordinal": 0,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "unsuccessful!: i64",
|
||||
"ordinal": 1,
|
||||
"type_info": "Integer"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
},
|
||||
"nullable": [true, true]
|
||||
},
|
||||
"hash": "d32d91ca2debc4212841282533482b2ff081234c7f9f848a7223ae04234995d9"
|
||||
}
|
18
monero-rpc-pool/.sqlx/query-e0865335c2dcb040a34e3f1305fe1a823d6fcde4a061def602cba30971817781.json
generated
Normal file
18
monero-rpc-pool/.sqlx/query-e0865335c2dcb040a34e3f1305fe1a823d6fcde4a061def602cba30971817781.json
generated
Normal file
|
@ -0,0 +1,18 @@
|
|||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "SELECT id FROM monero_nodes WHERE full_url = ?",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "id",
|
||||
"ordinal": 0,
|
||||
"type_info": "Integer"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
},
|
||||
"nullable": [true]
|
||||
},
|
||||
"hash": "e0865335c2dcb040a34e3f1305fe1a823d6fcde4a061def602cba30971817781"
|
||||
}
|
116
monero-rpc-pool/.sqlx/query-fac12e3ca6ac1db1a4812a5390a333ec95a2e5e2cd554c169ceecc61b7ff2864.json
generated
Normal file
116
monero-rpc-pool/.sqlx/query-fac12e3ca6ac1db1a4812a5390a333ec95a2e5e2cd554c169ceecc61b7ff2864.json
generated
Normal file
|
@ -0,0 +1,116 @@
|
|||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "\n SELECT \n n.id as \"id!: i64\",\n n.scheme,\n n.host,\n n.port,\n n.full_url,\n n.network,\n n.first_seen_at,\n CAST(COALESCE(stats.success_count, 0) AS INTEGER) as \"success_count!: i64\",\n CAST(COALESCE(stats.failure_count, 0) AS INTEGER) as \"failure_count!: i64\",\n stats.last_success as \"last_success?: String\",\n stats.last_failure as \"last_failure?: String\",\n stats.last_checked as \"last_checked?: String\",\n CAST(CASE WHEN reliable_nodes.node_id IS NOT NULL THEN 1 ELSE 0 END AS INTEGER) as \"is_reliable!: i64\",\n stats.avg_latency_ms as \"avg_latency_ms?: f64\",\n stats.min_latency_ms as \"min_latency_ms?: f64\",\n stats.max_latency_ms as \"max_latency_ms?: f64\",\n stats.last_latency_ms as \"last_latency_ms?: f64\"\n FROM monero_nodes n\n LEFT JOIN (\n SELECT \n node_id,\n SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,\n SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,\n MAX(CASE WHEN was_successful THEN timestamp END) as last_success,\n MAX(CASE WHEN NOT was_successful THEN timestamp END) as last_failure,\n MAX(timestamp) as last_checked,\n AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms,\n MIN(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as min_latency_ms,\n MAX(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as max_latency_ms,\n (SELECT latency_ms FROM health_checks hc2 WHERE hc2.node_id = health_checks.node_id ORDER BY timestamp DESC LIMIT 1) as last_latency_ms\n FROM health_checks \n GROUP BY node_id\n ) stats ON n.id = stats.node_id\n LEFT JOIN (\n SELECT DISTINCT node_id FROM (\n SELECT \n n2.id as node_id,\n COALESCE(s2.success_count, 0) as success_count,\n COALESCE(s2.failure_count, 0) as failure_count,\n s2.avg_latency_ms,\n (CAST(COALESCE(s2.success_count, 0) AS REAL) / CAST(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0) AS REAL)) * \n (MIN(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0), 200) / 200.0) * 0.8 +\n CASE \n WHEN s2.avg_latency_ms IS NOT NULL THEN (1.0 - (MIN(s2.avg_latency_ms, 2000) / 2000.0)) * 0.2\n ELSE 0.0 \n END as reliability_score\n FROM monero_nodes n2\n LEFT JOIN (\n SELECT \n node_id,\n SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,\n SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,\n AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms\n FROM health_checks \n GROUP BY node_id\n ) s2 ON n2.id = s2.node_id\n WHERE n2.network = ? AND (COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0)) > 0\n ORDER BY reliability_score DESC\n LIMIT 4\n )\n ) reliable_nodes ON n.id = reliable_nodes.node_id\n WHERE n.network = ? AND stats.success_count > 0\n ORDER BY stats.avg_latency_ms ASC, stats.success_count DESC\n ",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "id!: i64",
|
||||
"ordinal": 0,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "scheme",
|
||||
"ordinal": 1,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "host",
|
||||
"ordinal": 2,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "port",
|
||||
"ordinal": 3,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "full_url",
|
||||
"ordinal": 4,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "network",
|
||||
"ordinal": 5,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "first_seen_at",
|
||||
"ordinal": 6,
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"name": "success_count!: i64",
|
||||
"ordinal": 7,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "failure_count!: i64",
|
||||
"ordinal": 8,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_success?: String",
|
||||
"ordinal": 9,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_failure?: String",
|
||||
"ordinal": 10,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_checked?: String",
|
||||
"ordinal": 11,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "is_reliable!: i64",
|
||||
"ordinal": 12,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "avg_latency_ms?: f64",
|
||||
"ordinal": 13,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "min_latency_ms?: f64",
|
||||
"ordinal": 14,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "max_latency_ms?: f64",
|
||||
"ordinal": 15,
|
||||
"type_info": "Null"
|
||||
},
|
||||
{
|
||||
"name": "last_latency_ms?: f64",
|
||||
"ordinal": 16,
|
||||
"type_info": "Float"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 2
|
||||
},
|
||||
"nullable": [
|
||||
true,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
true
|
||||
]
|
||||
},
|
||||
"hash": "fac12e3ca6ac1db1a4812a5390a333ec95a2e5e2cd554c169ceecc61b7ff2864"
|
||||
}
|
28
monero-rpc-pool/.sqlx/query-ffa1b76d20c86d6bea02bd03e5e7de159adbb7c7c0ef585ce4df9ec648bea7f8.json
generated
Normal file
28
monero-rpc-pool/.sqlx/query-ffa1b76d20c86d6bea02bd03e5e7de159adbb7c7c0ef585ce4df9ec648bea7f8.json
generated
Normal file
|
@ -0,0 +1,28 @@
|
|||
{
|
||||
"db_name": "SQLite",
|
||||
"query": "\n SELECT \n COUNT(*) as total,\n CAST(SUM(CASE WHEN stats.success_count > 0 THEN 1 ELSE 0 END) AS INTEGER) as \"reachable!: i64\",\n CAST((SELECT COUNT(*) FROM (\n SELECT n2.id\n FROM monero_nodes n2\n LEFT JOIN (\n SELECT \n node_id,\n SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,\n SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,\n AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms\n FROM health_checks \n GROUP BY node_id\n ) s2 ON n2.id = s2.node_id\n WHERE n2.network = ? AND (COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0)) > 0\n ORDER BY \n (CAST(COALESCE(s2.success_count, 0) AS REAL) / CAST(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0) AS REAL)) * \n (MIN(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0), 200) / 200.0) * 0.8 +\n CASE \n WHEN s2.avg_latency_ms IS NOT NULL THEN (1.0 - (MIN(s2.avg_latency_ms, 2000) / 2000.0)) * 0.2\n ELSE 0.0 \n END DESC\n LIMIT 4\n )) AS INTEGER) as \"reliable!: i64\"\n FROM monero_nodes n\n LEFT JOIN (\n SELECT \n node_id,\n SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,\n SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count\n FROM health_checks \n GROUP BY node_id\n ) stats ON n.id = stats.node_id\n WHERE n.network = ?\n ",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "total",
|
||||
"ordinal": 0,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "reachable!: i64",
|
||||
"ordinal": 1,
|
||||
"type_info": "Integer"
|
||||
},
|
||||
{
|
||||
"name": "reliable!: i64",
|
||||
"ordinal": 2,
|
||||
"type_info": "Integer"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 2
|
||||
},
|
||||
"nullable": [false, true, false]
|
||||
},
|
||||
"hash": "ffa1b76d20c86d6bea02bd03e5e7de159adbb7c7c0ef585ce4df9ec648bea7f8"
|
||||
}
|
36
monero-rpc-pool/Cargo.toml
Normal file
36
monero-rpc-pool/Cargo.toml
Normal file
|
@ -0,0 +1,36 @@
|
|||
[package]
|
||||
name = "monero-rpc-pool"
|
||||
version = "0.1.0"
|
||||
authors = ["UnstoppableSwap Team <help@unstoppableswap.net>"]
|
||||
edition = "2021"
|
||||
|
||||
[[bin]]
|
||||
name = "monero-rpc-pool"
|
||||
path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1"
|
||||
axum = { version = "0.7", features = ["macros"] }
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
clap = { version = "4.0", features = ["derive"] }
|
||||
dirs = "5.0"
|
||||
futures = "0.3"
|
||||
monero = { version = "0.12", features = ["serde_support"] }
|
||||
monero-rpc = { path = "../monero-rpc" }
|
||||
rand = "0.8"
|
||||
regex = "1.0"
|
||||
reqwest = { version = "0.12", default-features = false, features = ["json", "rustls-tls"] }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
sqlx = { version = "0.8", features = ["runtime-tokio-rustls", "sqlite", "chrono", "migrate"] }
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tower = "0.4"
|
||||
tower-http = { version = "0.5", features = ["cors"] }
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
typeshare = "1.0.3"
|
||||
url = "2.0"
|
||||
uuid = { version = "1.0", features = ["v4"] }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio-test = "0.4"
|
30
monero-rpc-pool/migrations/20250618212026_initial_schema.sql
Normal file
30
monero-rpc-pool/migrations/20250618212026_initial_schema.sql
Normal file
|
@ -0,0 +1,30 @@
|
|||
-- Add migration script here
|
||||
|
||||
-- Create monero_nodes table - stores node identity and current state
|
||||
CREATE TABLE IF NOT EXISTS monero_nodes (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
scheme TEXT NOT NULL,
|
||||
host TEXT NOT NULL,
|
||||
port INTEGER NOT NULL,
|
||||
full_url TEXT NOT NULL UNIQUE,
|
||||
network TEXT NOT NULL, -- mainnet/stagenet/testnet - always known at insertion time
|
||||
first_seen_at TEXT NOT NULL,
|
||||
created_at TEXT NOT NULL DEFAULT (datetime('now')),
|
||||
updated_at TEXT NOT NULL DEFAULT (datetime('now'))
|
||||
);
|
||||
|
||||
-- Create health_checks table - stores raw event data
|
||||
CREATE TABLE IF NOT EXISTS health_checks (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
node_id INTEGER NOT NULL,
|
||||
timestamp TEXT NOT NULL,
|
||||
was_successful BOOLEAN NOT NULL,
|
||||
latency_ms REAL,
|
||||
FOREIGN KEY (node_id) REFERENCES monero_nodes(id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
-- Create indexes for performance
|
||||
CREATE INDEX IF NOT EXISTS idx_nodes_full_url ON monero_nodes(full_url);
|
||||
CREATE INDEX IF NOT EXISTS idx_nodes_network ON monero_nodes(network);
|
||||
CREATE INDEX IF NOT EXISTS idx_health_checks_node_id ON health_checks(node_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_health_checks_timestamp ON health_checks(timestamp);
|
|
@ -0,0 +1,31 @@
|
|||
-- Insert default mainnet bootstrap nodes
|
||||
INSERT OR IGNORE INTO monero_nodes (scheme, host, port, full_url, network, first_seen_at) VALUES
|
||||
('http', 'node.supportxmr.com', 18081, 'http://node.supportxmr.com:18081', 'mainnet', datetime('now')),
|
||||
('http', 'nodes.hashvault.pro', 18081, 'http://nodes.hashvault.pro:18081', 'mainnet', datetime('now')),
|
||||
('http', 'xmr-node.cakewallet.com', 18081, 'http://xmr-node.cakewallet.com:18081', 'mainnet', datetime('now')),
|
||||
('http', 'node.xmr.to', 18081, 'http://node.xmr.to:18081', 'mainnet', datetime('now')),
|
||||
('https', 'opennode.xmr-tw.org', 18089, 'https://opennode.xmr-tw.org:18089', 'mainnet', datetime('now')),
|
||||
('https', 'monero.stackwallet.com', 18081, 'https://monero.stackwallet.com:18081', 'mainnet', datetime('now')),
|
||||
('https', 'node.sethforprivacy.com', 18089, 'https://node.sethforprivacy.com:18089', 'mainnet', datetime('now')),
|
||||
('https', 'node.monero.net', 18081, 'https://node.monero.net:18081', 'mainnet', datetime('now')),
|
||||
('https', 'moneronode.org', 18081, 'https://moneronode.org:18081', 'mainnet', datetime('now')),
|
||||
('http', 'node.majesticbank.at', 18089, 'http://node.majesticbank.at:18089', 'mainnet', datetime('now')),
|
||||
('http', 'node.majesticbank.is', 18089, 'http://node.majesticbank.is:18089', 'mainnet', datetime('now')),
|
||||
('https', 'xmr.cryptostorm.is', 18081, 'https://xmr.cryptostorm.is:18081', 'mainnet', datetime('now')),
|
||||
('https', 'xmr.privex.io', 18081, 'https://xmr.privex.io:18081', 'mainnet', datetime('now')),
|
||||
('https', 'nodes.hashvault.pro', 18081, 'https://nodes.hashvault.pro:18081', 'mainnet', datetime('now')),
|
||||
('http', 'hashvaultsvg2rinvxz7kos77hdfm6zrd5yco3tx2yh2linsmusfwyad.onion', 18081, 'http://hashvaultsvg2rinvxz7kos77hdfm6zrd5yco3tx2yh2linsmusfwyad.onion:18081', 'mainnet', datetime('now')),
|
||||
('https', 'plowsof3t5hogddwabaeiyrno25efmzfxyro2vligremt7sxpsclfaid.onion', 18089, 'https://plowsof3t5hogddwabaeiyrno25efmzfxyro2vligremt7sxpsclfaid.onion:18089', 'mainnet', datetime('now')),
|
||||
('http', 'moneroexnovtlp4datcwbgjznnulgm7q34wcl6r4gcvccruhkceb2xyd.onion', 18089, 'http://moneroexnovtlp4datcwbgjznnulgm7q34wcl6r4gcvccruhkceb2xyd.onion:18089', 'mainnet', datetime('now')),
|
||||
('https', 'yqz7oikk5fyxhyy32lyy3bkwcfw4rh2o5i77wuwslqll24g3bgd44iid.onion', 18081, 'https://yqz7oikk5fyxhyy32lyy3bkwcfw4rh2o5i77wuwslqll24g3bgd44iid.onion:18081', 'mainnet', datetime('now'));
|
||||
|
||||
-- Insert default stagenet bootstrap nodes
|
||||
INSERT OR IGNORE INTO monero_nodes (scheme, host, port, full_url, network, first_seen_at) VALUES
|
||||
('http', 'stagenet.xmr-tw.org', 38081, 'http://stagenet.xmr-tw.org:38081', 'stagenet', datetime('now')),
|
||||
('https', 'node.monerodevs.org', 38089, 'https://node.monerodevs.org:38089', 'stagenet', datetime('now')),
|
||||
('https', 'node2.monerodevs.org', 38089, 'https://node2.monerodevs.org:38089', 'stagenet', datetime('now')),
|
||||
('https', 'node3.monerodevs.org', 38089, 'https://node3.monerodevs.org:38089', 'stagenet', datetime('now')),
|
||||
('https', 'xmr-lux.boldsuck.org', 38081, 'https://xmr-lux.boldsuck.org:38081', 'stagenet', datetime('now')),
|
||||
('http', 'plowsofe6cleftfmk2raiw5h2x66atrik3nja4bfd3zrfa2hdlgworad.onion', 38089, 'http://plowsofe6cleftfmk2raiw5h2x66atrik3nja4bfd3zrfa2hdlgworad.onion:38089', 'stagenet', datetime('now')),
|
||||
('http', 'plowsoffjexmxalw73tkjmf422gq6575fc7vicuu4javzn2ynnte6tyd.onion', 38089, 'http://plowsoffjexmxalw73tkjmf422gq6575fc7vicuu4javzn2ynnte6tyd.onion:38089', 'stagenet', datetime('now')),
|
||||
('https', 'stagenet.xmr.ditatompel.com', 38081, 'https://stagenet.xmr.ditatompel.com:38081', 'stagenet', datetime('now'));
|
56
monero-rpc-pool/regenerate_sqlx_cache.sh
Executable file
56
monero-rpc-pool/regenerate_sqlx_cache.sh
Executable file
|
@ -0,0 +1,56 @@
|
|||
#!/bin/bash
|
||||
|
||||
# regenerate_sqlx_cache.sh
|
||||
#
|
||||
# Script to regenerate SQLx query cache for monero-rpc-pool
|
||||
#
|
||||
# This script:
|
||||
# 1. Creates a temporary SQLite database in a temp directory
|
||||
# 2. Runs all database migrations to set up the schema
|
||||
# 3. Regenerates the SQLx query cache (.sqlx directory)
|
||||
# 4. Cleans up temporary files automatically
|
||||
#
|
||||
# Usage:
|
||||
# ./regenerate_sqlx_cache.sh
|
||||
#
|
||||
# Requirements:
|
||||
# - cargo and sqlx-cli must be installed
|
||||
# - Must be run from the monero-rpc-pool directory
|
||||
# - migrations/ directory must exist with valid migration files
|
||||
#
|
||||
# The generated .sqlx directory should be committed to version control
|
||||
# to enable offline compilation without requiring DATABASE_URL.
|
||||
|
||||
set -e # Exit on any error
|
||||
|
||||
echo "🔄 Regenerating SQLx query cache..."
|
||||
|
||||
# Create a temporary directory for the database
|
||||
TEMP_DIR=$(mktemp -d)
|
||||
TEMP_DB="$TEMP_DIR/temp_sqlx_cache.sqlite"
|
||||
DATABASE_URL="sqlite:$TEMP_DB"
|
||||
|
||||
echo "📁 Using temporary database: $TEMP_DB"
|
||||
|
||||
# Function to cleanup on exit
|
||||
cleanup() {
|
||||
echo "🧹 Cleaning up temporary files..."
|
||||
rm -rf "$TEMP_DIR"
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
# Export DATABASE_URL for sqlx commands
|
||||
export DATABASE_URL
|
||||
|
||||
echo "🗄️ Creating database..."
|
||||
cargo sqlx database create
|
||||
|
||||
echo "🔄 Running migrations..."
|
||||
cargo sqlx migrate run
|
||||
|
||||
echo "⚡ Preparing SQLx query cache..."
|
||||
cargo sqlx prepare
|
||||
|
||||
echo "✅ SQLx query cache regenerated successfully!"
|
||||
echo "📝 The .sqlx directory has been updated with the latest query metadata."
|
||||
echo "💡 Make sure to commit the .sqlx directory to version control."
|
27
monero-rpc-pool/src/config.rs
Normal file
27
monero-rpc-pool/src/config.rs
Normal file
|
@ -0,0 +1,27 @@
|
|||
use serde::{Deserialize, Serialize};
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Config {
|
||||
pub host: String,
|
||||
pub port: u16,
|
||||
pub data_dir: PathBuf,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
pub fn new_with_port(host: String, port: u16, data_dir: PathBuf) -> Self {
|
||||
Self {
|
||||
host,
|
||||
port,
|
||||
data_dir,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_random_port(host: String, data_dir: PathBuf) -> Self {
|
||||
Self {
|
||||
host,
|
||||
port: 0,
|
||||
data_dir,
|
||||
}
|
||||
}
|
||||
}
|
952
monero-rpc-pool/src/database.rs
Normal file
952
monero-rpc-pool/src/database.rs
Normal file
|
@ -0,0 +1,952 @@
|
|||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::Result;
|
||||
use dirs::data_dir;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sqlx::SqlitePool;
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, sqlx::FromRow)]
|
||||
pub struct MoneroNode {
|
||||
pub id: Option<i64>,
|
||||
pub scheme: String, // http or https
|
||||
pub host: String,
|
||||
pub port: i64,
|
||||
pub full_url: String,
|
||||
pub network: String, // mainnet, stagenet, or testnet - always known at insertion time
|
||||
pub first_seen_at: String, // ISO 8601 timestamp when first discovered
|
||||
// Computed fields from health_checks (not stored in monero_nodes table)
|
||||
#[sqlx(default)]
|
||||
pub success_count: i64,
|
||||
#[sqlx(default)]
|
||||
pub failure_count: i64,
|
||||
#[sqlx(default)]
|
||||
pub last_success: Option<String>,
|
||||
#[sqlx(default)]
|
||||
pub last_failure: Option<String>,
|
||||
#[sqlx(default)]
|
||||
pub last_checked: Option<String>,
|
||||
#[sqlx(default)]
|
||||
pub is_reliable: bool,
|
||||
#[sqlx(default)]
|
||||
pub avg_latency_ms: Option<f64>,
|
||||
#[sqlx(default)]
|
||||
pub min_latency_ms: Option<f64>,
|
||||
#[sqlx(default)]
|
||||
pub max_latency_ms: Option<f64>,
|
||||
#[sqlx(default)]
|
||||
pub last_latency_ms: Option<f64>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, sqlx::FromRow)]
|
||||
pub struct HealthCheck {
|
||||
pub id: Option<i64>,
|
||||
pub node_id: i64,
|
||||
pub timestamp: String, // ISO 8601 timestamp
|
||||
pub was_successful: bool,
|
||||
pub latency_ms: Option<f64>,
|
||||
}
|
||||
|
||||
impl MoneroNode {
|
||||
pub fn new(scheme: String, host: String, port: i64, network: String) -> Self {
|
||||
let full_url = format!("{}://{}:{}", scheme, host, port);
|
||||
let now = chrono::Utc::now().to_rfc3339();
|
||||
Self {
|
||||
id: None,
|
||||
scheme,
|
||||
host,
|
||||
port,
|
||||
full_url,
|
||||
network,
|
||||
first_seen_at: now,
|
||||
// These are computed from health_checks
|
||||
success_count: 0,
|
||||
failure_count: 0,
|
||||
last_success: None,
|
||||
last_failure: None,
|
||||
last_checked: None,
|
||||
is_reliable: false,
|
||||
avg_latency_ms: None,
|
||||
min_latency_ms: None,
|
||||
max_latency_ms: None,
|
||||
last_latency_ms: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn success_rate(&self) -> f64 {
|
||||
let total = self.success_count + self.failure_count;
|
||||
if total == 0 {
|
||||
0.0
|
||||
} else {
|
||||
self.success_count as f64 / total as f64
|
||||
}
|
||||
}
|
||||
|
||||
pub fn reliability_score(&self) -> f64 {
|
||||
let success_rate = self.success_rate();
|
||||
let total_requests = self.success_count + self.failure_count;
|
||||
|
||||
// Weight success rate by total requests (more requests = more reliable data)
|
||||
let request_weight = (total_requests as f64).min(200.0) / 200.0;
|
||||
let mut score = success_rate * request_weight;
|
||||
|
||||
// Factor in latency - lower latency = higher score
|
||||
if let Some(avg_latency) = self.avg_latency_ms {
|
||||
// Normalize latency to 0-1 range (assuming 0-2000ms range)
|
||||
let latency_factor = 1.0 - (avg_latency.min(2000.0) / 2000.0);
|
||||
score = score * 0.8 + latency_factor * 0.2; // 80% success rate, 20% latency
|
||||
}
|
||||
|
||||
score
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Database {
|
||||
pub pool: SqlitePool,
|
||||
}
|
||||
|
||||
impl Database {
|
||||
pub async fn new() -> Result<Self> {
|
||||
let app_data_dir = get_app_data_dir()?;
|
||||
Self::new_with_data_dir(app_data_dir).await
|
||||
}
|
||||
|
||||
pub async fn new_with_data_dir(data_dir: PathBuf) -> Result<Self> {
|
||||
if !data_dir.exists() {
|
||||
std::fs::create_dir_all(&data_dir)?;
|
||||
info!("Created application data directory: {}", data_dir.display());
|
||||
}
|
||||
|
||||
let db_path = data_dir.join("nodes.db");
|
||||
info!("Using database at: {}", db_path.display());
|
||||
|
||||
let database_url = format!("sqlite:{}?mode=rwc", db_path.display());
|
||||
let pool = SqlitePool::connect(&database_url).await?;
|
||||
|
||||
let db = Self { pool };
|
||||
db.migrate().await?;
|
||||
|
||||
Ok(db)
|
||||
}
|
||||
|
||||
async fn migrate(&self) -> Result<()> {
|
||||
// Run sqlx migrations
|
||||
sqlx::migrate!("./migrations").run(&self.pool).await?;
|
||||
|
||||
info!("Database migration completed");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Insert a node if it doesn't exist, return the node_id
|
||||
pub async fn upsert_node(
|
||||
&self,
|
||||
scheme: &str,
|
||||
host: &str,
|
||||
port: i64,
|
||||
network: &str,
|
||||
) -> Result<i64> {
|
||||
let full_url = format!("{}://{}:{}", scheme, host, port);
|
||||
let now = chrono::Utc::now().to_rfc3339();
|
||||
|
||||
let result = sqlx::query!(
|
||||
r#"
|
||||
INSERT INTO monero_nodes (scheme, host, port, full_url, network, first_seen_at, updated_at)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?)
|
||||
ON CONFLICT(full_url) DO UPDATE SET
|
||||
network = excluded.network,
|
||||
updated_at = excluded.updated_at
|
||||
RETURNING id
|
||||
"#,
|
||||
scheme,
|
||||
host,
|
||||
port,
|
||||
full_url,
|
||||
network,
|
||||
now,
|
||||
now
|
||||
)
|
||||
.fetch_one(&self.pool)
|
||||
.await?;
|
||||
|
||||
Ok(result.id)
|
||||
}
|
||||
|
||||
/// Update a node's network after it has been identified
|
||||
pub async fn update_node_network(&self, url: &str, network: &str) -> Result<()> {
|
||||
let now = chrono::Utc::now().to_rfc3339();
|
||||
|
||||
let result = sqlx::query!(
|
||||
r#"
|
||||
UPDATE monero_nodes
|
||||
SET network = ?, updated_at = ?
|
||||
WHERE full_url = ?
|
||||
"#,
|
||||
network,
|
||||
now,
|
||||
url
|
||||
)
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
|
||||
if result.rows_affected() > 0 {
|
||||
debug!("Updated network for node {} to {}", url, network);
|
||||
} else {
|
||||
warn!("Failed to update network for node {}: not found", url);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Record a health check event
|
||||
pub async fn record_health_check(
|
||||
&self,
|
||||
url: &str,
|
||||
was_successful: bool,
|
||||
latency_ms: Option<f64>,
|
||||
) -> Result<()> {
|
||||
let now = chrono::Utc::now().to_rfc3339();
|
||||
|
||||
// First get the node_id
|
||||
let node_row = sqlx::query!("SELECT id FROM monero_nodes WHERE full_url = ?", url)
|
||||
.fetch_optional(&self.pool)
|
||||
.await?;
|
||||
|
||||
let node_id = match node_row {
|
||||
Some(row) => row.id,
|
||||
None => {
|
||||
warn!("Cannot record health check for unknown node: {}", url);
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
sqlx::query!(
|
||||
r#"
|
||||
INSERT INTO health_checks (node_id, timestamp, was_successful, latency_ms)
|
||||
VALUES (?, ?, ?, ?)
|
||||
"#,
|
||||
node_id,
|
||||
now,
|
||||
was_successful,
|
||||
latency_ms
|
||||
)
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get nodes that have been identified (have network set)
|
||||
pub async fn get_identified_nodes(&self, network: &str) -> Result<Vec<MoneroNode>> {
|
||||
let rows = sqlx::query!(
|
||||
r#"
|
||||
SELECT
|
||||
n.id as "id!: i64",
|
||||
n.scheme,
|
||||
n.host,
|
||||
n.port,
|
||||
n.full_url,
|
||||
n.network,
|
||||
n.first_seen_at,
|
||||
CAST(COALESCE(stats.success_count, 0) AS INTEGER) as "success_count!: i64",
|
||||
CAST(COALESCE(stats.failure_count, 0) AS INTEGER) as "failure_count!: i64",
|
||||
stats.last_success as "last_success?: String",
|
||||
stats.last_failure as "last_failure?: String",
|
||||
stats.last_checked as "last_checked?: String",
|
||||
CAST(CASE WHEN reliable_nodes.node_id IS NOT NULL THEN 1 ELSE 0 END AS INTEGER) as "is_reliable!: i64",
|
||||
stats.avg_latency_ms as "avg_latency_ms?: f64",
|
||||
stats.min_latency_ms as "min_latency_ms?: f64",
|
||||
stats.max_latency_ms as "max_latency_ms?: f64",
|
||||
stats.last_latency_ms as "last_latency_ms?: f64"
|
||||
FROM monero_nodes n
|
||||
LEFT JOIN (
|
||||
SELECT
|
||||
node_id,
|
||||
SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,
|
||||
SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,
|
||||
MAX(CASE WHEN was_successful THEN timestamp END) as last_success,
|
||||
MAX(CASE WHEN NOT was_successful THEN timestamp END) as last_failure,
|
||||
MAX(timestamp) as last_checked,
|
||||
AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms,
|
||||
MIN(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as min_latency_ms,
|
||||
MAX(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as max_latency_ms,
|
||||
(SELECT latency_ms FROM health_checks hc2 WHERE hc2.node_id = health_checks.node_id ORDER BY timestamp DESC LIMIT 1) as last_latency_ms
|
||||
FROM health_checks
|
||||
GROUP BY node_id
|
||||
) stats ON n.id = stats.node_id
|
||||
LEFT JOIN (
|
||||
SELECT DISTINCT node_id FROM (
|
||||
SELECT
|
||||
n2.id as node_id,
|
||||
COALESCE(s2.success_count, 0) as success_count,
|
||||
COALESCE(s2.failure_count, 0) as failure_count,
|
||||
s2.avg_latency_ms,
|
||||
(CAST(COALESCE(s2.success_count, 0) AS REAL) / CAST(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0) AS REAL)) *
|
||||
(MIN(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0), 200) / 200.0) * 0.8 +
|
||||
CASE
|
||||
WHEN s2.avg_latency_ms IS NOT NULL THEN (1.0 - (MIN(s2.avg_latency_ms, 2000) / 2000.0)) * 0.2
|
||||
ELSE 0.0
|
||||
END as reliability_score
|
||||
FROM monero_nodes n2
|
||||
LEFT JOIN (
|
||||
SELECT
|
||||
node_id,
|
||||
SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,
|
||||
SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,
|
||||
AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms
|
||||
FROM health_checks
|
||||
GROUP BY node_id
|
||||
) s2 ON n2.id = s2.node_id
|
||||
WHERE n2.network = ? AND (COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0)) > 0
|
||||
ORDER BY reliability_score DESC
|
||||
LIMIT 4
|
||||
)
|
||||
) reliable_nodes ON n.id = reliable_nodes.node_id
|
||||
WHERE n.network = ?
|
||||
ORDER BY stats.avg_latency_ms ASC, stats.success_count DESC
|
||||
"#,
|
||||
network,
|
||||
network
|
||||
)
|
||||
.fetch_all(&self.pool)
|
||||
.await?;
|
||||
|
||||
let nodes: Vec<MoneroNode> = rows
|
||||
.into_iter()
|
||||
.map(|row| MoneroNode {
|
||||
id: Some(row.id),
|
||||
scheme: row.scheme,
|
||||
host: row.host,
|
||||
port: row.port,
|
||||
full_url: row.full_url,
|
||||
network: row.network,
|
||||
first_seen_at: row.first_seen_at,
|
||||
success_count: row.success_count,
|
||||
failure_count: row.failure_count,
|
||||
last_success: row.last_success,
|
||||
last_failure: row.last_failure,
|
||||
last_checked: row.last_checked,
|
||||
is_reliable: row.is_reliable != 0,
|
||||
avg_latency_ms: row.avg_latency_ms,
|
||||
min_latency_ms: row.min_latency_ms,
|
||||
max_latency_ms: row.max_latency_ms,
|
||||
last_latency_ms: row.last_latency_ms,
|
||||
})
|
||||
.collect();
|
||||
|
||||
debug!(
|
||||
"Retrieved {} identified nodes for network {}",
|
||||
nodes.len(),
|
||||
network
|
||||
);
|
||||
Ok(nodes)
|
||||
}
|
||||
|
||||
/// Get reliable nodes (top 4 by reliability score)
|
||||
pub async fn get_reliable_nodes(&self, network: &str) -> Result<Vec<MoneroNode>> {
|
||||
let rows = sqlx::query!(
|
||||
r#"
|
||||
SELECT
|
||||
n.id as "id!: i64",
|
||||
n.scheme,
|
||||
n.host,
|
||||
n.port,
|
||||
n.full_url,
|
||||
n.network,
|
||||
n.first_seen_at,
|
||||
CAST(COALESCE(stats.success_count, 0) AS INTEGER) as "success_count!: i64",
|
||||
CAST(COALESCE(stats.failure_count, 0) AS INTEGER) as "failure_count!: i64",
|
||||
stats.last_success as "last_success?: String",
|
||||
stats.last_failure as "last_failure?: String",
|
||||
stats.last_checked as "last_checked?: String",
|
||||
CAST(1 AS INTEGER) as "is_reliable!: i64",
|
||||
stats.avg_latency_ms as "avg_latency_ms?: f64",
|
||||
stats.min_latency_ms as "min_latency_ms?: f64",
|
||||
stats.max_latency_ms as "max_latency_ms?: f64",
|
||||
stats.last_latency_ms as "last_latency_ms?: f64"
|
||||
FROM monero_nodes n
|
||||
LEFT JOIN (
|
||||
SELECT
|
||||
node_id,
|
||||
SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,
|
||||
SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,
|
||||
MAX(CASE WHEN was_successful THEN timestamp END) as last_success,
|
||||
MAX(CASE WHEN NOT was_successful THEN timestamp END) as last_failure,
|
||||
MAX(timestamp) as last_checked,
|
||||
AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms,
|
||||
MIN(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as min_latency_ms,
|
||||
MAX(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as max_latency_ms,
|
||||
(SELECT latency_ms FROM health_checks hc2 WHERE hc2.node_id = health_checks.node_id ORDER BY timestamp DESC LIMIT 1) as last_latency_ms
|
||||
FROM health_checks
|
||||
GROUP BY node_id
|
||||
) stats ON n.id = stats.node_id
|
||||
WHERE n.network = ? AND (COALESCE(stats.success_count, 0) + COALESCE(stats.failure_count, 0)) > 0
|
||||
ORDER BY
|
||||
(CAST(COALESCE(stats.success_count, 0) AS REAL) / CAST(COALESCE(stats.success_count, 0) + COALESCE(stats.failure_count, 0) AS REAL)) *
|
||||
(MIN(COALESCE(stats.success_count, 0) + COALESCE(stats.failure_count, 0), 200) / 200.0) * 0.8 +
|
||||
CASE
|
||||
WHEN stats.avg_latency_ms IS NOT NULL THEN (1.0 - (MIN(stats.avg_latency_ms, 2000) / 2000.0)) * 0.2
|
||||
ELSE 0.0
|
||||
END DESC
|
||||
LIMIT 4
|
||||
"#,
|
||||
network
|
||||
)
|
||||
.fetch_all(&self.pool)
|
||||
.await?;
|
||||
|
||||
let nodes = rows
|
||||
.into_iter()
|
||||
.map(|row| MoneroNode {
|
||||
id: Some(row.id),
|
||||
scheme: row.scheme,
|
||||
host: row.host,
|
||||
port: row.port,
|
||||
full_url: row.full_url,
|
||||
network: row.network,
|
||||
first_seen_at: row.first_seen_at,
|
||||
success_count: row.success_count,
|
||||
failure_count: row.failure_count,
|
||||
last_success: row.last_success,
|
||||
last_failure: row.last_failure,
|
||||
last_checked: row.last_checked,
|
||||
is_reliable: true,
|
||||
avg_latency_ms: row.avg_latency_ms,
|
||||
min_latency_ms: row.min_latency_ms,
|
||||
max_latency_ms: row.max_latency_ms,
|
||||
last_latency_ms: row.last_latency_ms,
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(nodes)
|
||||
}
|
||||
|
||||
/// Get node statistics for a network
|
||||
pub async fn get_node_stats(&self, network: &str) -> Result<(i64, i64, i64)> {
|
||||
let row = sqlx::query!(
|
||||
r#"
|
||||
SELECT
|
||||
COUNT(*) as total,
|
||||
CAST(SUM(CASE WHEN stats.success_count > 0 THEN 1 ELSE 0 END) AS INTEGER) as "reachable!: i64",
|
||||
CAST((SELECT COUNT(*) FROM (
|
||||
SELECT n2.id
|
||||
FROM monero_nodes n2
|
||||
LEFT JOIN (
|
||||
SELECT
|
||||
node_id,
|
||||
SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,
|
||||
SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,
|
||||
AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms
|
||||
FROM health_checks
|
||||
GROUP BY node_id
|
||||
) s2 ON n2.id = s2.node_id
|
||||
WHERE n2.network = ? AND (COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0)) > 0
|
||||
ORDER BY
|
||||
(CAST(COALESCE(s2.success_count, 0) AS REAL) / CAST(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0) AS REAL)) *
|
||||
(MIN(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0), 200) / 200.0) * 0.8 +
|
||||
CASE
|
||||
WHEN s2.avg_latency_ms IS NOT NULL THEN (1.0 - (MIN(s2.avg_latency_ms, 2000) / 2000.0)) * 0.2
|
||||
ELSE 0.0
|
||||
END DESC
|
||||
LIMIT 4
|
||||
)) AS INTEGER) as "reliable!: i64"
|
||||
FROM monero_nodes n
|
||||
LEFT JOIN (
|
||||
SELECT
|
||||
node_id,
|
||||
SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,
|
||||
SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count
|
||||
FROM health_checks
|
||||
GROUP BY node_id
|
||||
) stats ON n.id = stats.node_id
|
||||
WHERE n.network = ?
|
||||
"#,
|
||||
network,
|
||||
network
|
||||
)
|
||||
.fetch_one(&self.pool)
|
||||
.await?;
|
||||
|
||||
let total = row.total;
|
||||
let reachable = row.reachable;
|
||||
let reliable = row.reliable;
|
||||
|
||||
Ok((total, reachable, reliable))
|
||||
}
|
||||
|
||||
/// Get health check statistics for a network
|
||||
pub async fn get_health_check_stats(&self, network: &str) -> Result<(u64, u64)> {
|
||||
let row = sqlx::query!(
|
||||
r#"
|
||||
SELECT
|
||||
CAST(SUM(CASE WHEN hc.was_successful THEN 1 ELSE 0 END) AS INTEGER) as "successful!: i64",
|
||||
CAST(SUM(CASE WHEN NOT hc.was_successful THEN 1 ELSE 0 END) AS INTEGER) as "unsuccessful!: i64"
|
||||
FROM (
|
||||
SELECT hc.was_successful
|
||||
FROM health_checks hc
|
||||
JOIN monero_nodes n ON hc.node_id = n.id
|
||||
WHERE n.network = ?
|
||||
ORDER BY hc.timestamp DESC
|
||||
LIMIT 100
|
||||
) hc
|
||||
"#,
|
||||
network
|
||||
)
|
||||
.fetch_one(&self.pool)
|
||||
.await?;
|
||||
|
||||
let successful = row.successful as u64;
|
||||
let unsuccessful = row.unsuccessful as u64;
|
||||
|
||||
Ok((successful, unsuccessful))
|
||||
}
|
||||
|
||||
/// Get top nodes based on recent success rate and latency
|
||||
pub async fn get_top_nodes_by_recent_success(
|
||||
&self,
|
||||
network: &str,
|
||||
_recent_checks_limit: i64,
|
||||
limit: i64,
|
||||
) -> Result<Vec<MoneroNode>> {
|
||||
let rows = sqlx::query!(
|
||||
r#"
|
||||
SELECT
|
||||
n.id as "id!: i64",
|
||||
n.scheme,
|
||||
n.host,
|
||||
n.port,
|
||||
n.full_url,
|
||||
n.network,
|
||||
n.first_seen_at,
|
||||
CAST(COALESCE(stats.success_count, 0) AS INTEGER) as "success_count!: i64",
|
||||
CAST(COALESCE(stats.failure_count, 0) AS INTEGER) as "failure_count!: i64",
|
||||
stats.last_success as "last_success?: String",
|
||||
stats.last_failure as "last_failure?: String",
|
||||
stats.last_checked as "last_checked?: String",
|
||||
CAST(CASE WHEN reliable_nodes.node_id IS NOT NULL THEN 1 ELSE 0 END AS INTEGER) as "is_reliable!: i64",
|
||||
stats.avg_latency_ms as "avg_latency_ms?: f64",
|
||||
stats.min_latency_ms as "min_latency_ms?: f64",
|
||||
stats.max_latency_ms as "max_latency_ms?: f64",
|
||||
stats.last_latency_ms as "last_latency_ms?: f64"
|
||||
FROM monero_nodes n
|
||||
LEFT JOIN (
|
||||
SELECT
|
||||
node_id,
|
||||
SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,
|
||||
SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,
|
||||
MAX(CASE WHEN was_successful THEN timestamp END) as last_success,
|
||||
MAX(CASE WHEN NOT was_successful THEN timestamp END) as last_failure,
|
||||
MAX(timestamp) as last_checked,
|
||||
AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms,
|
||||
MIN(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as min_latency_ms,
|
||||
MAX(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as max_latency_ms,
|
||||
(SELECT latency_ms FROM health_checks hc2 WHERE hc2.node_id = health_checks.node_id ORDER BY timestamp DESC LIMIT 1) as last_latency_ms
|
||||
FROM health_checks
|
||||
GROUP BY node_id
|
||||
) stats ON n.id = stats.node_id
|
||||
LEFT JOIN (
|
||||
SELECT DISTINCT node_id FROM (
|
||||
SELECT
|
||||
n2.id as node_id,
|
||||
COALESCE(s2.success_count, 0) as success_count,
|
||||
COALESCE(s2.failure_count, 0) as failure_count,
|
||||
s2.avg_latency_ms,
|
||||
(CAST(COALESCE(s2.success_count, 0) AS REAL) / CAST(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0) AS REAL)) *
|
||||
(MIN(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0), 200) / 200.0) * 0.8 +
|
||||
CASE
|
||||
WHEN s2.avg_latency_ms IS NOT NULL THEN (1.0 - (MIN(s2.avg_latency_ms, 2000) / 2000.0)) * 0.2
|
||||
ELSE 0.0
|
||||
END as reliability_score
|
||||
FROM monero_nodes n2
|
||||
LEFT JOIN (
|
||||
SELECT
|
||||
node_id,
|
||||
SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,
|
||||
SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,
|
||||
AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms
|
||||
FROM health_checks
|
||||
GROUP BY node_id
|
||||
) s2 ON n2.id = s2.node_id
|
||||
WHERE n2.network = ? AND (COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0)) > 0
|
||||
ORDER BY reliability_score DESC
|
||||
LIMIT 4
|
||||
)
|
||||
) reliable_nodes ON n.id = reliable_nodes.node_id
|
||||
WHERE n.network = ? AND (COALESCE(stats.success_count, 0) + COALESCE(stats.failure_count, 0)) > 0
|
||||
ORDER BY
|
||||
(CAST(COALESCE(stats.success_count, 0) AS REAL) / CAST(COALESCE(stats.success_count, 0) + COALESCE(stats.failure_count, 0) AS REAL)) DESC,
|
||||
stats.avg_latency_ms ASC
|
||||
LIMIT ?
|
||||
"#,
|
||||
network,
|
||||
network,
|
||||
limit
|
||||
)
|
||||
.fetch_all(&self.pool)
|
||||
.await?;
|
||||
|
||||
let nodes = rows
|
||||
.into_iter()
|
||||
.map(|row| MoneroNode {
|
||||
id: Some(row.id),
|
||||
scheme: row.scheme,
|
||||
host: row.host,
|
||||
port: row.port,
|
||||
full_url: row.full_url,
|
||||
network: row.network,
|
||||
first_seen_at: row.first_seen_at,
|
||||
success_count: row.success_count,
|
||||
failure_count: row.failure_count,
|
||||
last_success: row.last_success,
|
||||
last_failure: row.last_failure,
|
||||
last_checked: row.last_checked,
|
||||
is_reliable: row.is_reliable != 0,
|
||||
avg_latency_ms: row.avg_latency_ms,
|
||||
min_latency_ms: row.min_latency_ms,
|
||||
max_latency_ms: row.max_latency_ms,
|
||||
last_latency_ms: row.last_latency_ms,
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(nodes)
|
||||
}
|
||||
|
||||
/// Get identified nodes that have at least one successful health check
|
||||
pub async fn get_identified_nodes_with_success(
|
||||
&self,
|
||||
network: &str,
|
||||
) -> Result<Vec<MoneroNode>> {
|
||||
let rows = sqlx::query!(
|
||||
r#"
|
||||
SELECT
|
||||
n.id as "id!: i64",
|
||||
n.scheme,
|
||||
n.host,
|
||||
n.port,
|
||||
n.full_url,
|
||||
n.network,
|
||||
n.first_seen_at,
|
||||
CAST(COALESCE(stats.success_count, 0) AS INTEGER) as "success_count!: i64",
|
||||
CAST(COALESCE(stats.failure_count, 0) AS INTEGER) as "failure_count!: i64",
|
||||
stats.last_success as "last_success?: String",
|
||||
stats.last_failure as "last_failure?: String",
|
||||
stats.last_checked as "last_checked?: String",
|
||||
CAST(CASE WHEN reliable_nodes.node_id IS NOT NULL THEN 1 ELSE 0 END AS INTEGER) as "is_reliable!: i64",
|
||||
stats.avg_latency_ms as "avg_latency_ms?: f64",
|
||||
stats.min_latency_ms as "min_latency_ms?: f64",
|
||||
stats.max_latency_ms as "max_latency_ms?: f64",
|
||||
stats.last_latency_ms as "last_latency_ms?: f64"
|
||||
FROM monero_nodes n
|
||||
LEFT JOIN (
|
||||
SELECT
|
||||
node_id,
|
||||
SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,
|
||||
SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,
|
||||
MAX(CASE WHEN was_successful THEN timestamp END) as last_success,
|
||||
MAX(CASE WHEN NOT was_successful THEN timestamp END) as last_failure,
|
||||
MAX(timestamp) as last_checked,
|
||||
AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms,
|
||||
MIN(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as min_latency_ms,
|
||||
MAX(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as max_latency_ms,
|
||||
(SELECT latency_ms FROM health_checks hc2 WHERE hc2.node_id = health_checks.node_id ORDER BY timestamp DESC LIMIT 1) as last_latency_ms
|
||||
FROM health_checks
|
||||
GROUP BY node_id
|
||||
) stats ON n.id = stats.node_id
|
||||
LEFT JOIN (
|
||||
SELECT DISTINCT node_id FROM (
|
||||
SELECT
|
||||
n2.id as node_id,
|
||||
COALESCE(s2.success_count, 0) as success_count,
|
||||
COALESCE(s2.failure_count, 0) as failure_count,
|
||||
s2.avg_latency_ms,
|
||||
(CAST(COALESCE(s2.success_count, 0) AS REAL) / CAST(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0) AS REAL)) *
|
||||
(MIN(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0), 200) / 200.0) * 0.8 +
|
||||
CASE
|
||||
WHEN s2.avg_latency_ms IS NOT NULL THEN (1.0 - (MIN(s2.avg_latency_ms, 2000) / 2000.0)) * 0.2
|
||||
ELSE 0.0
|
||||
END as reliability_score
|
||||
FROM monero_nodes n2
|
||||
LEFT JOIN (
|
||||
SELECT
|
||||
node_id,
|
||||
SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,
|
||||
SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,
|
||||
AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms
|
||||
FROM health_checks
|
||||
GROUP BY node_id
|
||||
) s2 ON n2.id = s2.node_id
|
||||
WHERE n2.network = ? AND (COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0)) > 0
|
||||
ORDER BY reliability_score DESC
|
||||
LIMIT 4
|
||||
)
|
||||
) reliable_nodes ON n.id = reliable_nodes.node_id
|
||||
WHERE n.network = ? AND stats.success_count > 0
|
||||
ORDER BY stats.avg_latency_ms ASC, stats.success_count DESC
|
||||
"#,
|
||||
network,
|
||||
network
|
||||
)
|
||||
.fetch_all(&self.pool)
|
||||
.await?;
|
||||
|
||||
let nodes: Vec<MoneroNode> = rows
|
||||
.into_iter()
|
||||
.map(|row| MoneroNode {
|
||||
id: Some(row.id),
|
||||
scheme: row.scheme,
|
||||
host: row.host,
|
||||
port: row.port,
|
||||
full_url: row.full_url,
|
||||
network: row.network,
|
||||
first_seen_at: row.first_seen_at,
|
||||
success_count: row.success_count,
|
||||
failure_count: row.failure_count,
|
||||
last_success: row.last_success,
|
||||
last_failure: row.last_failure,
|
||||
last_checked: row.last_checked,
|
||||
is_reliable: row.is_reliable != 0,
|
||||
avg_latency_ms: row.avg_latency_ms,
|
||||
min_latency_ms: row.min_latency_ms,
|
||||
max_latency_ms: row.max_latency_ms,
|
||||
last_latency_ms: row.last_latency_ms,
|
||||
})
|
||||
.collect();
|
||||
|
||||
debug!(
|
||||
"Retrieved {} identified nodes with success for network {}",
|
||||
nodes.len(),
|
||||
network
|
||||
);
|
||||
Ok(nodes)
|
||||
}
|
||||
|
||||
/// Get random nodes for the specified network, excluding specific IDs
|
||||
pub async fn get_random_nodes(
|
||||
&self,
|
||||
network: &str,
|
||||
limit: i64,
|
||||
exclude_ids: &[i64],
|
||||
) -> Result<Vec<MoneroNode>> {
|
||||
if exclude_ids.is_empty() {
|
||||
let rows = sqlx::query!(
|
||||
r#"
|
||||
SELECT
|
||||
n.id as "id!: i64",
|
||||
n.scheme,
|
||||
n.host,
|
||||
n.port,
|
||||
n.full_url,
|
||||
n.network,
|
||||
n.first_seen_at,
|
||||
CAST(COALESCE(stats.success_count, 0) AS INTEGER) as "success_count!: i64",
|
||||
CAST(COALESCE(stats.failure_count, 0) AS INTEGER) as "failure_count!: i64",
|
||||
stats.last_success as "last_success?: String",
|
||||
stats.last_failure as "last_failure?: String",
|
||||
stats.last_checked as "last_checked?: String",
|
||||
CAST(CASE WHEN reliable_nodes.node_id IS NOT NULL THEN 1 ELSE 0 END AS INTEGER) as "is_reliable!: i64",
|
||||
stats.avg_latency_ms as "avg_latency_ms?: f64",
|
||||
stats.min_latency_ms as "min_latency_ms?: f64",
|
||||
stats.max_latency_ms as "max_latency_ms?: f64",
|
||||
stats.last_latency_ms as "last_latency_ms?: f64"
|
||||
FROM monero_nodes n
|
||||
LEFT JOIN (
|
||||
SELECT
|
||||
node_id,
|
||||
SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,
|
||||
SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,
|
||||
MAX(CASE WHEN was_successful THEN timestamp END) as last_success,
|
||||
MAX(CASE WHEN NOT was_successful THEN timestamp END) as last_failure,
|
||||
MAX(timestamp) as last_checked,
|
||||
AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms,
|
||||
MIN(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as min_latency_ms,
|
||||
MAX(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as max_latency_ms,
|
||||
(SELECT latency_ms FROM health_checks hc2 WHERE hc2.node_id = health_checks.node_id ORDER BY timestamp DESC LIMIT 1) as last_latency_ms
|
||||
FROM health_checks
|
||||
GROUP BY node_id
|
||||
) stats ON n.id = stats.node_id
|
||||
LEFT JOIN (
|
||||
SELECT DISTINCT node_id FROM (
|
||||
SELECT
|
||||
n2.id as node_id,
|
||||
COALESCE(s2.success_count, 0) as success_count,
|
||||
COALESCE(s2.failure_count, 0) as failure_count,
|
||||
s2.avg_latency_ms,
|
||||
(CAST(COALESCE(s2.success_count, 0) AS REAL) / CAST(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0) AS REAL)) *
|
||||
(MIN(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0), 200) / 200.0) * 0.8 +
|
||||
CASE
|
||||
WHEN s2.avg_latency_ms IS NOT NULL THEN (1.0 - (MIN(s2.avg_latency_ms, 2000) / 2000.0)) * 0.2
|
||||
ELSE 0.0
|
||||
END as reliability_score
|
||||
FROM monero_nodes n2
|
||||
LEFT JOIN (
|
||||
SELECT
|
||||
node_id,
|
||||
SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,
|
||||
SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,
|
||||
AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms
|
||||
FROM health_checks
|
||||
GROUP BY node_id
|
||||
) s2 ON n2.id = s2.node_id
|
||||
WHERE n2.network = ? AND (COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0)) > 0
|
||||
ORDER BY reliability_score DESC
|
||||
LIMIT 4
|
||||
)
|
||||
) reliable_nodes ON n.id = reliable_nodes.node_id
|
||||
WHERE n.network = ?
|
||||
ORDER BY RANDOM()
|
||||
LIMIT ?
|
||||
"#,
|
||||
network,
|
||||
network,
|
||||
limit
|
||||
)
|
||||
.fetch_all(&self.pool)
|
||||
.await?;
|
||||
|
||||
return Ok(rows
|
||||
.into_iter()
|
||||
.map(|row| MoneroNode {
|
||||
id: Some(row.id),
|
||||
scheme: row.scheme,
|
||||
host: row.host,
|
||||
port: row.port,
|
||||
full_url: row.full_url,
|
||||
network: row.network,
|
||||
first_seen_at: row.first_seen_at,
|
||||
success_count: row.success_count,
|
||||
failure_count: row.failure_count,
|
||||
last_success: row.last_success,
|
||||
last_failure: row.last_failure,
|
||||
last_checked: row.last_checked,
|
||||
is_reliable: row.is_reliable != 0,
|
||||
avg_latency_ms: row.avg_latency_ms,
|
||||
min_latency_ms: row.min_latency_ms,
|
||||
max_latency_ms: row.max_latency_ms,
|
||||
last_latency_ms: row.last_latency_ms,
|
||||
})
|
||||
.collect());
|
||||
}
|
||||
|
||||
// If exclude_ids is not empty, we need to handle it differently
|
||||
// For now, get all nodes and filter in Rust (can be optimized with dynamic SQL)
|
||||
let fetch_limit = limit + exclude_ids.len() as i64 + 10; // Get extra to account for exclusions
|
||||
let all_rows = sqlx::query!(
|
||||
r#"
|
||||
SELECT
|
||||
n.id as "id!: i64",
|
||||
n.scheme,
|
||||
n.host,
|
||||
n.port,
|
||||
n.full_url,
|
||||
n.network,
|
||||
n.first_seen_at,
|
||||
CAST(COALESCE(stats.success_count, 0) AS INTEGER) as "success_count!: i64",
|
||||
CAST(COALESCE(stats.failure_count, 0) AS INTEGER) as "failure_count!: i64",
|
||||
stats.last_success as "last_success?: String",
|
||||
stats.last_failure as "last_failure?: String",
|
||||
stats.last_checked as "last_checked?: String",
|
||||
CAST(CASE WHEN reliable_nodes.node_id IS NOT NULL THEN 1 ELSE 0 END AS INTEGER) as "is_reliable!: i64",
|
||||
stats.avg_latency_ms as "avg_latency_ms?: f64",
|
||||
stats.min_latency_ms as "min_latency_ms?: f64",
|
||||
stats.max_latency_ms as "max_latency_ms?: f64",
|
||||
stats.last_latency_ms as "last_latency_ms?: f64"
|
||||
FROM monero_nodes n
|
||||
LEFT JOIN (
|
||||
SELECT
|
||||
node_id,
|
||||
SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,
|
||||
SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,
|
||||
MAX(CASE WHEN was_successful THEN timestamp END) as last_success,
|
||||
MAX(CASE WHEN NOT was_successful THEN timestamp END) as last_failure,
|
||||
MAX(timestamp) as last_checked,
|
||||
AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms,
|
||||
MIN(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as min_latency_ms,
|
||||
MAX(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as max_latency_ms,
|
||||
(SELECT latency_ms FROM health_checks hc2 WHERE hc2.node_id = health_checks.node_id ORDER BY timestamp DESC LIMIT 1) as last_latency_ms
|
||||
FROM health_checks
|
||||
GROUP BY node_id
|
||||
) stats ON n.id = stats.node_id
|
||||
LEFT JOIN (
|
||||
SELECT DISTINCT node_id FROM (
|
||||
SELECT
|
||||
n2.id as node_id,
|
||||
COALESCE(s2.success_count, 0) as success_count,
|
||||
COALESCE(s2.failure_count, 0) as failure_count,
|
||||
s2.avg_latency_ms,
|
||||
(CAST(COALESCE(s2.success_count, 0) AS REAL) / CAST(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0) AS REAL)) *
|
||||
(MIN(COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0), 200) / 200.0) * 0.8 +
|
||||
CASE
|
||||
WHEN s2.avg_latency_ms IS NOT NULL THEN (1.0 - (MIN(s2.avg_latency_ms, 2000) / 2000.0)) * 0.2
|
||||
ELSE 0.0
|
||||
END as reliability_score
|
||||
FROM monero_nodes n2
|
||||
LEFT JOIN (
|
||||
SELECT
|
||||
node_id,
|
||||
SUM(CASE WHEN was_successful THEN 1 ELSE 0 END) as success_count,
|
||||
SUM(CASE WHEN NOT was_successful THEN 1 ELSE 0 END) as failure_count,
|
||||
AVG(CASE WHEN was_successful AND latency_ms IS NOT NULL THEN latency_ms END) as avg_latency_ms
|
||||
FROM health_checks
|
||||
GROUP BY node_id
|
||||
) s2 ON n2.id = s2.node_id
|
||||
WHERE n2.network = ? AND (COALESCE(s2.success_count, 0) + COALESCE(s2.failure_count, 0)) > 0
|
||||
ORDER BY reliability_score DESC
|
||||
LIMIT 4
|
||||
)
|
||||
) reliable_nodes ON n.id = reliable_nodes.node_id
|
||||
WHERE n.network = ?
|
||||
ORDER BY RANDOM()
|
||||
LIMIT ?
|
||||
"#,
|
||||
network,
|
||||
network,
|
||||
fetch_limit
|
||||
)
|
||||
.fetch_all(&self.pool)
|
||||
.await?;
|
||||
|
||||
// Convert exclude_ids to a HashSet for O(1) lookup
|
||||
let exclude_set: std::collections::HashSet<i64> = exclude_ids.iter().cloned().collect();
|
||||
|
||||
let nodes: Vec<MoneroNode> = all_rows
|
||||
.into_iter()
|
||||
.filter(|row| !exclude_set.contains(&row.id))
|
||||
.take(limit as usize)
|
||||
.map(|row| MoneroNode {
|
||||
id: Some(row.id),
|
||||
scheme: row.scheme,
|
||||
host: row.host,
|
||||
port: row.port,
|
||||
full_url: row.full_url,
|
||||
network: row.network,
|
||||
first_seen_at: row.first_seen_at,
|
||||
success_count: row.success_count,
|
||||
failure_count: row.failure_count,
|
||||
last_success: row.last_success,
|
||||
last_failure: row.last_failure,
|
||||
last_checked: row.last_checked,
|
||||
is_reliable: row.is_reliable != 0,
|
||||
avg_latency_ms: row.avg_latency_ms,
|
||||
min_latency_ms: row.min_latency_ms,
|
||||
max_latency_ms: row.max_latency_ms,
|
||||
last_latency_ms: row.last_latency_ms,
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(nodes)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_app_data_dir() -> Result<PathBuf> {
|
||||
let base_dir =
|
||||
data_dir().ok_or_else(|| anyhow::anyhow!("Could not determine system data directory"))?;
|
||||
|
||||
let app_dir = base_dir.join("monero-rpc-pool");
|
||||
|
||||
if !app_dir.exists() {
|
||||
std::fs::create_dir_all(&app_dir)?;
|
||||
info!("Created application data directory: {}", app_dir.display());
|
||||
}
|
||||
|
||||
Ok(app_dir)
|
||||
}
|
383
monero-rpc-pool/src/discovery.rs
Normal file
383
monero-rpc-pool/src/discovery.rs
Normal file
|
@ -0,0 +1,383 @@
|
|||
use std::collections::HashSet;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use anyhow::Result;
|
||||
use monero::Network;
|
||||
use rand::seq::SliceRandom;
|
||||
use reqwest::Client;
|
||||
use serde::Deserialize;
|
||||
use serde_json::Value;
|
||||
use tracing::{error, info, warn};
|
||||
use url;
|
||||
|
||||
use crate::database::Database;
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct MoneroFailResponse {
|
||||
monero: MoneroNodes,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct MoneroNodes {
|
||||
clear: Vec<String>,
|
||||
#[serde(default)]
|
||||
web_compatible: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct HealthCheckOutcome {
|
||||
pub was_successful: bool,
|
||||
pub latency: Duration,
|
||||
pub discovered_network: Option<Network>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct NodeDiscovery {
|
||||
client: Client,
|
||||
db: Database,
|
||||
}
|
||||
|
||||
fn network_to_string(network: &Network) -> String {
|
||||
match network {
|
||||
Network::Mainnet => "mainnet".to_string(),
|
||||
Network::Stagenet => "stagenet".to_string(),
|
||||
Network::Testnet => "testnet".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
impl NodeDiscovery {
|
||||
pub fn new(db: Database) -> Result<Self> {
|
||||
let client = Client::builder()
|
||||
.timeout(Duration::from_secs(10))
|
||||
.user_agent("monero-rpc-pool/1.0")
|
||||
.build()
|
||||
.map_err(|e| anyhow::anyhow!("Failed to build HTTP client: {}", e))?;
|
||||
|
||||
Ok(Self { client, db })
|
||||
}
|
||||
|
||||
/// Fetch nodes from monero.fail API
|
||||
pub async fn fetch_mainnet_nodes_from_api(&self) -> Result<Vec<String>> {
|
||||
let url = "https://monero.fail/nodes.json?chain=monero";
|
||||
|
||||
let response = self
|
||||
.client
|
||||
.get(url)
|
||||
.timeout(Duration::from_secs(30))
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
return Err(anyhow::anyhow!("HTTP error: {}", response.status()));
|
||||
}
|
||||
|
||||
let monero_fail_response: MoneroFailResponse = response.json().await?;
|
||||
|
||||
// Combine clear and web_compatible nodes
|
||||
let mut nodes = monero_fail_response.monero.web_compatible;
|
||||
nodes.extend(monero_fail_response.monero.clear);
|
||||
|
||||
// Remove duplicates using HashSet for O(n) complexity
|
||||
let mut seen = HashSet::new();
|
||||
let mut unique_nodes = Vec::new();
|
||||
for node in nodes {
|
||||
if seen.insert(node.clone()) {
|
||||
unique_nodes.push(node);
|
||||
}
|
||||
}
|
||||
|
||||
// Shuffle nodes in random order
|
||||
let mut rng = rand::thread_rng();
|
||||
unique_nodes.shuffle(&mut rng);
|
||||
|
||||
info!(
|
||||
"Fetched {} mainnet nodes from monero.fail API",
|
||||
unique_nodes.len()
|
||||
);
|
||||
Ok(unique_nodes)
|
||||
}
|
||||
|
||||
/// Fetch nodes from monero.fail API and discover from other sources
|
||||
pub async fn discover_nodes_from_sources(&self, target_network: Network) -> Result<()> {
|
||||
// Only fetch from external sources for mainnet to avoid polluting test networks
|
||||
if target_network == Network::Mainnet {
|
||||
match self.fetch_mainnet_nodes_from_api().await {
|
||||
Ok(nodes) => {
|
||||
self.discover_and_insert_nodes(target_network, nodes)
|
||||
.await?;
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Failed to fetch nodes from monero.fail API: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Enhanced health check that detects network and validates node identity
|
||||
pub async fn check_node_health(&self, url: &str) -> Result<HealthCheckOutcome> {
|
||||
let start_time = Instant::now();
|
||||
|
||||
let rpc_request = serde_json::json!({
|
||||
"jsonrpc": "2.0",
|
||||
"id": "0",
|
||||
"method": "get_info"
|
||||
});
|
||||
|
||||
let full_url = format!("{}/json_rpc", url);
|
||||
let response = self.client.post(&full_url).json(&rpc_request).send().await;
|
||||
|
||||
let latency = start_time.elapsed();
|
||||
|
||||
match response {
|
||||
Ok(resp) => {
|
||||
if resp.status().is_success() {
|
||||
match resp.json::<Value>().await {
|
||||
Ok(json) => {
|
||||
if let Some(result) = json.get("result") {
|
||||
// Extract network information from get_info response
|
||||
let discovered_network = self.extract_network_from_info(result);
|
||||
|
||||
Ok(HealthCheckOutcome {
|
||||
was_successful: true,
|
||||
latency,
|
||||
discovered_network,
|
||||
})
|
||||
} else {
|
||||
Ok(HealthCheckOutcome {
|
||||
was_successful: false,
|
||||
latency,
|
||||
discovered_network: None,
|
||||
})
|
||||
}
|
||||
}
|
||||
Err(_e) => Ok(HealthCheckOutcome {
|
||||
was_successful: false,
|
||||
latency,
|
||||
discovered_network: None,
|
||||
}),
|
||||
}
|
||||
} else {
|
||||
Ok(HealthCheckOutcome {
|
||||
was_successful: false,
|
||||
latency,
|
||||
discovered_network: None,
|
||||
})
|
||||
}
|
||||
}
|
||||
Err(_e) => Ok(HealthCheckOutcome {
|
||||
was_successful: false,
|
||||
latency,
|
||||
discovered_network: None,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
/// Extract network type from get_info response
|
||||
fn extract_network_from_info(&self, info_result: &Value) -> Option<Network> {
|
||||
// Check nettype field (0 = mainnet, 1 = testnet, 2 = stagenet)
|
||||
if let Some(nettype) = info_result.get("nettype").and_then(|v| v.as_u64()) {
|
||||
return match nettype {
|
||||
0 => Some(Network::Mainnet),
|
||||
1 => Some(Network::Testnet),
|
||||
2 => Some(Network::Stagenet),
|
||||
_ => None,
|
||||
};
|
||||
}
|
||||
|
||||
// Fallback: check if testnet or stagenet is mentioned in fields
|
||||
if let Some(testnet) = info_result.get("testnet").and_then(|v| v.as_bool()) {
|
||||
return if testnet {
|
||||
Some(Network::Testnet)
|
||||
} else {
|
||||
Some(Network::Mainnet)
|
||||
};
|
||||
}
|
||||
|
||||
// Additional heuristics could be added here
|
||||
None
|
||||
}
|
||||
|
||||
/// Updated health check workflow with identification and validation logic
|
||||
pub async fn health_check_all_nodes(&self, target_network: Network) -> Result<()> {
|
||||
info!(
|
||||
"Starting health check for all nodes targeting network: {}",
|
||||
network_to_string(&target_network)
|
||||
);
|
||||
|
||||
// Get all nodes from database with proper field mapping
|
||||
let all_nodes = sqlx::query!(
|
||||
r#"
|
||||
SELECT
|
||||
id as "id!: i64",
|
||||
scheme,
|
||||
host,
|
||||
port,
|
||||
full_url,
|
||||
network as "network!: String",
|
||||
first_seen_at
|
||||
FROM monero_nodes
|
||||
ORDER BY id
|
||||
"#
|
||||
)
|
||||
.fetch_all(&self.db.pool)
|
||||
.await?;
|
||||
|
||||
let mut checked_count = 0;
|
||||
let mut healthy_count = 0;
|
||||
let mut corrected_count = 0;
|
||||
|
||||
for node in all_nodes {
|
||||
match self.check_node_health(&node.full_url).await {
|
||||
Ok(outcome) => {
|
||||
// Always record the health check
|
||||
self.db
|
||||
.record_health_check(
|
||||
&node.full_url,
|
||||
outcome.was_successful,
|
||||
if outcome.was_successful {
|
||||
Some(outcome.latency.as_millis() as f64)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
if outcome.was_successful {
|
||||
healthy_count += 1;
|
||||
|
||||
// Validate network consistency
|
||||
if let Some(discovered_network) = outcome.discovered_network {
|
||||
let discovered_network_str = network_to_string(&discovered_network);
|
||||
if node.network != discovered_network_str {
|
||||
warn!("Network mismatch detected for node {}: stored={}, discovered={}. Correcting...",
|
||||
node.full_url, node.network, discovered_network_str);
|
||||
self.db
|
||||
.update_node_network(&node.full_url, &discovered_network_str)
|
||||
.await?;
|
||||
corrected_count += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
checked_count += 1;
|
||||
}
|
||||
Err(_e) => {
|
||||
self.db
|
||||
.record_health_check(&node.full_url, false, None)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
||||
// Small delay to avoid hammering nodes
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
}
|
||||
|
||||
info!(
|
||||
"Health check completed: {}/{} nodes healthy, {} corrected",
|
||||
healthy_count, checked_count, corrected_count
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Periodic discovery task with improved error handling
|
||||
pub async fn periodic_discovery_task(&self, target_network: Network) -> Result<()> {
|
||||
let mut interval = tokio::time::interval(Duration::from_secs(3600)); // Every hour
|
||||
|
||||
loop {
|
||||
interval.tick().await;
|
||||
|
||||
info!(
|
||||
"Running periodic node discovery for network: {}",
|
||||
network_to_string(&target_network)
|
||||
);
|
||||
|
||||
// Discover new nodes from sources
|
||||
if let Err(e) = self.discover_nodes_from_sources(target_network).await {
|
||||
error!("Failed to discover nodes: {}", e);
|
||||
}
|
||||
|
||||
// Health check all nodes (will identify networks automatically)
|
||||
if let Err(e) = self.health_check_all_nodes(target_network).await {
|
||||
error!("Failed to perform health check: {}", e);
|
||||
}
|
||||
|
||||
// Log stats for all networks
|
||||
for network in &[Network::Mainnet, Network::Stagenet, Network::Testnet] {
|
||||
let network_str = network_to_string(network);
|
||||
if let Ok((total, reachable, reliable)) = self.db.get_node_stats(&network_str).await
|
||||
{
|
||||
if total > 0 {
|
||||
info!(
|
||||
"Node stats for {}: {} total, {} reachable, {} reliable",
|
||||
network_str, total, reachable, reliable
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Insert configured nodes for a specific network
|
||||
pub async fn discover_and_insert_nodes(
|
||||
&self,
|
||||
target_network: Network,
|
||||
nodes: Vec<String>,
|
||||
) -> Result<()> {
|
||||
let mut success_count = 0;
|
||||
let mut error_count = 0;
|
||||
let target_network_str = network_to_string(&target_network);
|
||||
|
||||
for node_url in nodes.iter() {
|
||||
if let Ok(url) = url::Url::parse(node_url) {
|
||||
let scheme = url.scheme();
|
||||
|
||||
// Validate scheme - must be http or https
|
||||
if !matches!(scheme, "http" | "https") {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Validate host - must be non-empty
|
||||
let Some(host) = url.host_str() else {
|
||||
continue;
|
||||
};
|
||||
if host.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Validate port - must be present
|
||||
let Some(port) = url.port() else {
|
||||
continue;
|
||||
};
|
||||
let port = port as i64;
|
||||
|
||||
match self
|
||||
.db
|
||||
.upsert_node(scheme, host, port, &target_network_str)
|
||||
.await
|
||||
{
|
||||
Ok(_) => {
|
||||
success_count += 1;
|
||||
}
|
||||
Err(e) => {
|
||||
error_count += 1;
|
||||
error!(
|
||||
"Failed to insert configured node {}://{}:{}: {}",
|
||||
scheme, host, port, e
|
||||
);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
error_count += 1;
|
||||
error!("Failed to parse node URL: {}", node_url);
|
||||
}
|
||||
}
|
||||
|
||||
info!(
|
||||
"Configured node insertion complete: {} successful, {} errors",
|
||||
success_count, error_count
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
228
monero-rpc-pool/src/lib.rs
Normal file
228
monero-rpc-pool/src/lib.rs
Normal file
|
@ -0,0 +1,228 @@
|
|||
use std::sync::Arc;
|
||||
|
||||
use anyhow::Result;
|
||||
use axum::{
|
||||
routing::{any, get},
|
||||
Router,
|
||||
};
|
||||
use monero::Network;
|
||||
use tokio::sync::RwLock;
|
||||
use tokio::task::JoinHandle;
|
||||
use tower_http::cors::CorsLayer;
|
||||
use tracing::{error, info};
|
||||
|
||||
fn network_to_string(network: &Network) -> String {
|
||||
match network {
|
||||
Network::Mainnet => "mainnet".to_string(),
|
||||
Network::Stagenet => "stagenet".to_string(),
|
||||
Network::Testnet => "testnet".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
pub mod config;
|
||||
pub mod database;
|
||||
pub mod discovery;
|
||||
pub mod pool;
|
||||
pub mod simple_handlers;
|
||||
|
||||
use config::Config;
|
||||
use database::Database;
|
||||
use discovery::NodeDiscovery;
|
||||
use pool::{NodePool, PoolStatus};
|
||||
use simple_handlers::{simple_proxy_handler, simple_stats_handler};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct AppState {
|
||||
pub node_pool: Arc<RwLock<NodePool>>,
|
||||
}
|
||||
|
||||
/// Manages background tasks for the RPC pool
|
||||
pub struct PoolHandle {
|
||||
pub status_update_handle: JoinHandle<()>,
|
||||
pub discovery_handle: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl Drop for PoolHandle {
|
||||
fn drop(&mut self) {
|
||||
self.status_update_handle.abort();
|
||||
self.discovery_handle.abort();
|
||||
}
|
||||
}
|
||||
|
||||
/// Information about a running RPC pool server
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ServerInfo {
|
||||
pub port: u16,
|
||||
pub host: String,
|
||||
}
|
||||
|
||||
async fn create_app_with_receiver(
|
||||
config: Config,
|
||||
network: Network,
|
||||
) -> Result<(
|
||||
Router,
|
||||
tokio::sync::broadcast::Receiver<PoolStatus>,
|
||||
PoolHandle,
|
||||
)> {
|
||||
// Initialize database
|
||||
let db = Database::new_with_data_dir(config.data_dir.clone()).await?;
|
||||
|
||||
// Initialize node pool with network
|
||||
let network_str = network_to_string(&network);
|
||||
let (node_pool, status_receiver) = NodePool::new(db.clone(), network_str.clone());
|
||||
let node_pool = Arc::new(RwLock::new(node_pool));
|
||||
|
||||
// Initialize discovery service
|
||||
let discovery = NodeDiscovery::new(db.clone())?;
|
||||
|
||||
// Publish initial status immediately to ensure first event is sent
|
||||
{
|
||||
let pool_guard = node_pool.read().await;
|
||||
if let Err(e) = pool_guard.publish_status_update().await {
|
||||
error!("Failed to publish initial status update: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
// Start background tasks
|
||||
let node_pool_for_health_check = node_pool.clone();
|
||||
let status_update_handle = tokio::spawn(async move {
|
||||
let mut interval = tokio::time::interval(std::time::Duration::from_secs(10));
|
||||
|
||||
loop {
|
||||
interval.tick().await;
|
||||
|
||||
// Publish status update
|
||||
let pool_guard = node_pool_for_health_check.read().await;
|
||||
if let Err(e) = pool_guard.publish_status_update().await {
|
||||
error!("Failed to publish status update: {}", e);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Start periodic discovery task
|
||||
let discovery_clone = discovery.clone();
|
||||
let network_clone = network;
|
||||
let discovery_handle = tokio::spawn(async move {
|
||||
if let Err(e) = discovery_clone.periodic_discovery_task(network_clone).await {
|
||||
error!(
|
||||
"Periodic discovery task failed for network {}: {}",
|
||||
network_to_string(&network_clone),
|
||||
e
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
let pool_handle = PoolHandle {
|
||||
status_update_handle,
|
||||
discovery_handle,
|
||||
};
|
||||
|
||||
let app_state = AppState { node_pool };
|
||||
|
||||
// Build the app
|
||||
let app = Router::new()
|
||||
.route("/stats", get(simple_stats_handler))
|
||||
.route("/*path", any(simple_proxy_handler))
|
||||
.layer(CorsLayer::permissive())
|
||||
.with_state(app_state);
|
||||
|
||||
Ok((app, status_receiver, pool_handle))
|
||||
}
|
||||
|
||||
pub async fn create_app(config: Config, network: Network) -> Result<Router> {
|
||||
let (app, _, _pool_handle) = create_app_with_receiver(config, network).await?;
|
||||
// Note: pool_handle is dropped here, so tasks will be aborted when this function returns
|
||||
// This is intentional for the simple create_app use case
|
||||
Ok(app)
|
||||
}
|
||||
|
||||
/// Create an app with a custom data directory for the database
|
||||
pub async fn create_app_with_data_dir(
|
||||
config: Config,
|
||||
network: Network,
|
||||
data_dir: std::path::PathBuf,
|
||||
) -> Result<Router> {
|
||||
let config_with_data_dir = Config::new_with_port(config.host, config.port, data_dir);
|
||||
create_app(config_with_data_dir, network).await
|
||||
}
|
||||
|
||||
pub async fn run_server(config: Config, network: Network) -> Result<()> {
|
||||
let app = create_app(config.clone(), network).await?;
|
||||
|
||||
let bind_address = format!("{}:{}", config.host, config.port);
|
||||
info!("Starting server on {}", bind_address);
|
||||
|
||||
let listener = tokio::net::TcpListener::bind(&bind_address).await?;
|
||||
info!("Server listening on {}", bind_address);
|
||||
|
||||
axum::serve(listener, app).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Run a server with a custom data directory
|
||||
pub async fn run_server_with_data_dir(
|
||||
config: Config,
|
||||
network: Network,
|
||||
data_dir: std::path::PathBuf,
|
||||
) -> Result<()> {
|
||||
let config_with_data_dir = Config::new_with_port(config.host, config.port, data_dir);
|
||||
run_server(config_with_data_dir, network).await
|
||||
}
|
||||
|
||||
/// Start a server with a random port for library usage
|
||||
/// Returns the server info with the actual port used, a receiver for pool status updates, and pool handle
|
||||
pub async fn start_server_with_random_port(
|
||||
config: Config,
|
||||
network: Network,
|
||||
) -> Result<(
|
||||
ServerInfo,
|
||||
tokio::sync::broadcast::Receiver<PoolStatus>,
|
||||
PoolHandle,
|
||||
)> {
|
||||
// Clone the host before moving config
|
||||
let host = config.host.clone();
|
||||
|
||||
// If port is 0, the system will assign a random available port
|
||||
let config_with_random_port = Config::new_random_port(config.host, config.data_dir);
|
||||
|
||||
let (app, status_receiver, pool_handle) =
|
||||
create_app_with_receiver(config_with_random_port, network).await?;
|
||||
|
||||
// Bind to port 0 to get a random available port
|
||||
let listener = tokio::net::TcpListener::bind(format!("{}:0", host)).await?;
|
||||
let actual_addr = listener.local_addr()?;
|
||||
|
||||
let server_info = ServerInfo {
|
||||
port: actual_addr.port(),
|
||||
host: host.clone(),
|
||||
};
|
||||
|
||||
info!(
|
||||
"Started server on {}:{} (random port)",
|
||||
server_info.host, server_info.port
|
||||
);
|
||||
|
||||
// Start the server in a background task
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = axum::serve(listener, app).await {
|
||||
error!("Server error: {}", e);
|
||||
}
|
||||
});
|
||||
|
||||
Ok((server_info, status_receiver, pool_handle))
|
||||
}
|
||||
|
||||
/// Start a server with a random port and custom data directory for library usage
|
||||
/// Returns the server info with the actual port used, a receiver for pool status updates, and pool handle
|
||||
pub async fn start_server_with_random_port_and_data_dir(
|
||||
config: Config,
|
||||
network: Network,
|
||||
data_dir: std::path::PathBuf,
|
||||
) -> Result<(
|
||||
ServerInfo,
|
||||
tokio::sync::broadcast::Receiver<PoolStatus>,
|
||||
PoolHandle,
|
||||
)> {
|
||||
let config_with_data_dir = Config::new_random_port(config.host, data_dir);
|
||||
start_server_with_random_port(config_with_data_dir, network).await
|
||||
}
|
177
monero-rpc-pool/src/main.rs
Normal file
177
monero-rpc-pool/src/main.rs
Normal file
|
@ -0,0 +1,177 @@
|
|||
use clap::Parser;
|
||||
use tracing::{info, warn};
|
||||
use tracing_subscriber::{self, EnvFilter};
|
||||
|
||||
use monero_rpc_pool::database::Database;
|
||||
use monero_rpc_pool::discovery::NodeDiscovery;
|
||||
use monero_rpc_pool::{config::Config, run_server};
|
||||
|
||||
use monero::Network;
|
||||
|
||||
fn parse_network(s: &str) -> Result<Network, String> {
|
||||
match s.to_lowercase().as_str() {
|
||||
"mainnet" => Ok(Network::Mainnet),
|
||||
"stagenet" => Ok(Network::Stagenet),
|
||||
"testnet" => Ok(Network::Testnet),
|
||||
_ => Err(format!(
|
||||
"Invalid network: {}. Must be mainnet, stagenet, or testnet",
|
||||
s
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
fn network_to_string(network: &Network) -> String {
|
||||
match network {
|
||||
Network::Mainnet => "mainnet".to_string(),
|
||||
Network::Stagenet => "stagenet".to_string(),
|
||||
Network::Testnet => "testnet".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(name = "monero-rpc-pool")]
|
||||
#[command(about = "A load-balancing HTTP proxy for Monero RPC nodes")]
|
||||
#[command(version)]
|
||||
struct Args {
|
||||
#[arg(long, default_value = "127.0.0.1")]
|
||||
#[arg(help = "Host address to bind the server to")]
|
||||
host: String,
|
||||
|
||||
#[arg(short, long, default_value = "18081")]
|
||||
#[arg(help = "Port to bind the server to")]
|
||||
port: u16,
|
||||
|
||||
#[arg(long, value_delimiter = ',')]
|
||||
#[arg(help = "Comma-separated list of Monero node URLs (overrides network-based discovery)")]
|
||||
nodes: Option<Vec<String>>,
|
||||
|
||||
#[arg(short, long, default_value = "mainnet")]
|
||||
#[arg(help = "Network to use for automatic node discovery")]
|
||||
#[arg(value_parser = parse_network)]
|
||||
network: Network,
|
||||
|
||||
#[arg(short, long)]
|
||||
#[arg(help = "Enable verbose logging")]
|
||||
verbose: bool,
|
||||
}
|
||||
|
||||
// Custom filter function that overrides log levels for our crate
|
||||
fn create_level_override_filter(base_filter: &str) -> EnvFilter {
|
||||
// Parse the base filter and modify it to treat all monero_rpc_pool logs as trace
|
||||
let mut filter = EnvFilter::new(base_filter);
|
||||
|
||||
// Add a directive that treats all levels from our crate as trace
|
||||
filter = filter.add_directive("monero_rpc_pool=trace".parse().unwrap());
|
||||
|
||||
filter
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let args = Args::parse();
|
||||
|
||||
// Create a filter that treats all logs from our crate as traces
|
||||
let base_filter = if args.verbose {
|
||||
// In verbose mode, show logs from other crates at WARN level
|
||||
"warn"
|
||||
} else {
|
||||
// In normal mode, show logs from other crates at ERROR level
|
||||
"error"
|
||||
};
|
||||
|
||||
let filter = create_level_override_filter(base_filter);
|
||||
|
||||
tracing_subscriber::fmt()
|
||||
.with_env_filter(filter)
|
||||
.with_target(false)
|
||||
.with_file(true)
|
||||
.with_line_number(true)
|
||||
.init();
|
||||
|
||||
// Store node count for later logging before potentially moving args.nodes
|
||||
let manual_node_count = args.nodes.as_ref().map(|nodes| nodes.len());
|
||||
|
||||
// Determine nodes to use and set up discovery
|
||||
let _nodes = if let Some(manual_nodes) = args.nodes {
|
||||
info!(
|
||||
"Using manually specified nodes for network: {}",
|
||||
network_to_string(&args.network)
|
||||
);
|
||||
|
||||
// Insert manual nodes into database with network information
|
||||
let db = Database::new().await?;
|
||||
let discovery = NodeDiscovery::new(db.clone())?;
|
||||
let mut parsed_nodes = Vec::new();
|
||||
|
||||
for node_url in &manual_nodes {
|
||||
// Parse the URL to extract components
|
||||
if let Ok(url) = url::Url::parse(node_url) {
|
||||
let scheme = url.scheme().to_string();
|
||||
let _protocol = if scheme == "https" { "ssl" } else { "tcp" };
|
||||
let host = url.host_str().unwrap_or("").to_string();
|
||||
let port = url
|
||||
.port()
|
||||
.unwrap_or(if scheme == "https" { 443 } else { 80 })
|
||||
as i64;
|
||||
|
||||
let full_url = format!("{}://{}:{}", scheme, host, port);
|
||||
|
||||
// Insert into database
|
||||
if let Err(e) = db
|
||||
.upsert_node(&scheme, &host, port, &network_to_string(&args.network))
|
||||
.await
|
||||
{
|
||||
warn!("Failed to insert manual node {}: {}", node_url, e);
|
||||
} else {
|
||||
parsed_nodes.push(full_url);
|
||||
}
|
||||
} else {
|
||||
warn!("Failed to parse manual node URL: {}", node_url);
|
||||
}
|
||||
}
|
||||
|
||||
// Use manual nodes for discovery
|
||||
discovery
|
||||
.discover_and_insert_nodes(args.network, manual_nodes)
|
||||
.await?;
|
||||
parsed_nodes
|
||||
} else {
|
||||
info!(
|
||||
"Setting up automatic node discovery for {} network",
|
||||
network_to_string(&args.network)
|
||||
);
|
||||
let db = Database::new().await?;
|
||||
let discovery = NodeDiscovery::new(db.clone())?;
|
||||
|
||||
// Start discovery process
|
||||
discovery.discover_nodes_from_sources(args.network).await?;
|
||||
Vec::new() // Return empty vec for consistency
|
||||
};
|
||||
|
||||
let config = Config::new_with_port(
|
||||
args.host,
|
||||
args.port,
|
||||
std::env::temp_dir().join("monero-rpc-pool"),
|
||||
);
|
||||
|
||||
let node_count_msg = if args.verbose {
|
||||
match manual_node_count {
|
||||
Some(count) => format!("{} manual nodes configured", count),
|
||||
None => "using automatic discovery".to_string(),
|
||||
}
|
||||
} else {
|
||||
"configured".to_string()
|
||||
};
|
||||
|
||||
info!(
|
||||
"Starting Monero RPC Pool\nConfiguration:\n Host: {}\n Port: {}\n Network: {}\n Nodes: {}",
|
||||
config.host, config.port, network_to_string(&args.network), node_count_msg
|
||||
);
|
||||
|
||||
if let Err(e) = run_server(config, args.network).await {
|
||||
eprintln!("Server error: {}", e);
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
270
monero-rpc-pool/src/pool.rs
Normal file
270
monero-rpc-pool/src/pool.rs
Normal file
|
@ -0,0 +1,270 @@
|
|||
use anyhow::{Context, Result};
|
||||
use rand::prelude::*;
|
||||
use tokio::sync::broadcast;
|
||||
use tracing::debug;
|
||||
use typeshare::typeshare;
|
||||
|
||||
use crate::database::Database;
|
||||
|
||||
#[derive(Debug, Clone, serde::Serialize)]
|
||||
#[typeshare]
|
||||
pub struct PoolStatus {
|
||||
pub total_node_count: u32,
|
||||
pub healthy_node_count: u32,
|
||||
#[typeshare(serialized_as = "number")]
|
||||
pub successful_health_checks: u64,
|
||||
#[typeshare(serialized_as = "number")]
|
||||
pub unsuccessful_health_checks: u64,
|
||||
pub top_reliable_nodes: Vec<ReliableNodeInfo>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, serde::Serialize)]
|
||||
#[typeshare]
|
||||
pub struct ReliableNodeInfo {
|
||||
pub url: String,
|
||||
pub success_rate: f64,
|
||||
pub avg_latency_ms: Option<f64>,
|
||||
}
|
||||
|
||||
pub struct NodePool {
|
||||
db: Database,
|
||||
network: String,
|
||||
status_sender: broadcast::Sender<PoolStatus>,
|
||||
}
|
||||
|
||||
impl NodePool {
|
||||
pub fn new(db: Database, network: String) -> (Self, broadcast::Receiver<PoolStatus>) {
|
||||
let (status_sender, status_receiver) = broadcast::channel(100);
|
||||
let pool = Self {
|
||||
db,
|
||||
network,
|
||||
status_sender,
|
||||
};
|
||||
(pool, status_receiver)
|
||||
}
|
||||
|
||||
/// Get next node using Power of Two Choices algorithm
|
||||
/// Only considers identified nodes (nodes with network set)
|
||||
pub async fn get_next_node(&self) -> Result<Option<String>> {
|
||||
let candidate_nodes = self.db.get_identified_nodes(&self.network).await?;
|
||||
|
||||
if candidate_nodes.is_empty() {
|
||||
debug!("No identified nodes available for network {}", self.network);
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
if candidate_nodes.len() == 1 {
|
||||
return Ok(Some(candidate_nodes[0].full_url.clone()));
|
||||
}
|
||||
|
||||
// Power of Two Choices: pick 2 random nodes, select the better one
|
||||
let mut rng = thread_rng();
|
||||
let node1 = candidate_nodes.choose(&mut rng).unwrap();
|
||||
let node2 = candidate_nodes.choose(&mut rng).unwrap();
|
||||
|
||||
let selected =
|
||||
if self.calculate_goodness_score(node1) >= self.calculate_goodness_score(node2) {
|
||||
node1
|
||||
} else {
|
||||
node2
|
||||
};
|
||||
|
||||
debug!(
|
||||
"Selected node using P2C for network {}: {}",
|
||||
self.network, selected.full_url
|
||||
);
|
||||
Ok(Some(selected.full_url.clone()))
|
||||
}
|
||||
|
||||
/// Calculate goodness score based on usage-based recency
|
||||
/// Score is a function of success rate and latency from last N health checks
|
||||
fn calculate_goodness_score(&self, node: &crate::database::MoneroNode) -> f64 {
|
||||
let total_checks = node.success_count + node.failure_count;
|
||||
if total_checks == 0 {
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
let success_rate = node.success_count as f64 / total_checks as f64;
|
||||
|
||||
// Weight by recency (more recent interactions = higher weight)
|
||||
let recency_weight = (total_checks as f64).min(200.0) / 200.0;
|
||||
let mut score = success_rate * recency_weight;
|
||||
|
||||
// Factor in latency - lower latency = higher score
|
||||
if let Some(avg_latency) = node.avg_latency_ms {
|
||||
let latency_factor = 1.0 - (avg_latency.min(2000.0) / 2000.0);
|
||||
score = score * 0.8 + latency_factor * 0.2; // 80% success rate, 20% latency
|
||||
}
|
||||
|
||||
score
|
||||
}
|
||||
|
||||
pub async fn record_success(&self, url: &str, latency_ms: f64) -> Result<()> {
|
||||
self.db
|
||||
.record_health_check(url, true, Some(latency_ms))
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn record_failure(&self, url: &str) -> Result<()> {
|
||||
self.db.record_health_check(url, false, None).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn publish_status_update(&self) -> Result<()> {
|
||||
let status = self.get_current_status().await?;
|
||||
let _ = self.status_sender.send(status); // Ignore if no receivers
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn get_current_status(&self) -> Result<PoolStatus> {
|
||||
let (total, reachable, _reliable) = self.db.get_node_stats(&self.network).await?;
|
||||
let reliable_nodes = self.db.get_reliable_nodes(&self.network).await?;
|
||||
let (successful_checks, unsuccessful_checks) =
|
||||
self.db.get_health_check_stats(&self.network).await?;
|
||||
|
||||
let top_reliable_nodes = reliable_nodes
|
||||
.into_iter()
|
||||
.take(5)
|
||||
.map(|node| ReliableNodeInfo {
|
||||
url: node.full_url.clone(),
|
||||
success_rate: node.success_rate(),
|
||||
avg_latency_ms: node.avg_latency_ms,
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(PoolStatus {
|
||||
total_node_count: total as u32,
|
||||
healthy_node_count: reachable as u32,
|
||||
successful_health_checks: successful_checks,
|
||||
unsuccessful_health_checks: unsuccessful_checks,
|
||||
top_reliable_nodes,
|
||||
})
|
||||
}
|
||||
|
||||
/// Get top reliable nodes with fill-up logic to ensure pool size
|
||||
/// First tries to get top nodes based on recent success, then fills up with random nodes
|
||||
pub async fn get_top_reliable_nodes(
|
||||
&self,
|
||||
limit: usize,
|
||||
) -> Result<Vec<crate::database::MoneroNode>> {
|
||||
debug!(
|
||||
"Getting top reliable nodes for network {} (target: {})",
|
||||
self.network, limit
|
||||
);
|
||||
|
||||
// Step 1: Try primary fetch - get top nodes based on recent success (last 200 health checks)
|
||||
let mut top_nodes = self
|
||||
.db
|
||||
.get_top_nodes_by_recent_success(&self.network, 200, limit as i64)
|
||||
.await
|
||||
.context("Failed to get top nodes by recent success")?;
|
||||
|
||||
debug!(
|
||||
"Primary fetch returned {} nodes for network {} (target: {})",
|
||||
top_nodes.len(),
|
||||
self.network,
|
||||
limit
|
||||
);
|
||||
|
||||
// Step 2: If primary fetch didn't return enough nodes, fall back to any identified nodes with successful health checks
|
||||
if top_nodes.len() < limit {
|
||||
debug!("Primary fetch returned insufficient nodes, falling back to any identified nodes with successful health checks");
|
||||
top_nodes = self
|
||||
.db
|
||||
.get_identified_nodes_with_success(&self.network)
|
||||
.await?;
|
||||
|
||||
debug!(
|
||||
"Fallback fetch returned {} nodes with successful health checks for network {}",
|
||||
top_nodes.len(),
|
||||
self.network
|
||||
);
|
||||
}
|
||||
|
||||
// Step 3: Check if we still don't have enough nodes
|
||||
if top_nodes.len() < limit {
|
||||
let needed = limit - top_nodes.len();
|
||||
debug!(
|
||||
"Pool needs {} more nodes to reach target of {} for network {}",
|
||||
needed, limit, self.network
|
||||
);
|
||||
|
||||
// Step 4: Collect exclusion IDs from nodes already selected
|
||||
let exclude_ids: Vec<i64> = top_nodes.iter().filter_map(|node| node.id).collect();
|
||||
|
||||
// Step 5: Secondary fetch - get random nodes to fill up
|
||||
let random_fillers = self
|
||||
.db
|
||||
.get_random_nodes(&self.network, needed as i64, &exclude_ids)
|
||||
.await?;
|
||||
|
||||
debug!(
|
||||
"Secondary fetch returned {} random nodes for network {}",
|
||||
random_fillers.len(),
|
||||
self.network
|
||||
);
|
||||
|
||||
// Step 6: Combine lists
|
||||
top_nodes.extend(random_fillers);
|
||||
}
|
||||
|
||||
debug!(
|
||||
"Final pool size: {} nodes for network {} (target: {})",
|
||||
top_nodes.len(),
|
||||
self.network,
|
||||
limit
|
||||
);
|
||||
|
||||
Ok(top_nodes)
|
||||
}
|
||||
|
||||
pub async fn get_pool_stats(&self) -> Result<PoolStats> {
|
||||
let (total, reachable, reliable) = self.db.get_node_stats(&self.network).await?;
|
||||
let reliable_nodes = self.db.get_reliable_nodes(&self.network).await?;
|
||||
|
||||
let avg_reliable_latency = if reliable_nodes.is_empty() {
|
||||
None
|
||||
} else {
|
||||
let total_latency: f64 = reliable_nodes
|
||||
.iter()
|
||||
.filter_map(|node| node.avg_latency_ms)
|
||||
.sum();
|
||||
let count = reliable_nodes
|
||||
.iter()
|
||||
.filter(|node| node.avg_latency_ms.is_some())
|
||||
.count();
|
||||
|
||||
if count > 0 {
|
||||
Some(total_latency / count as f64)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
Ok(PoolStats {
|
||||
total_nodes: total,
|
||||
reachable_nodes: reachable,
|
||||
reliable_nodes: reliable,
|
||||
avg_reliable_latency_ms: avg_reliable_latency,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct PoolStats {
|
||||
pub total_nodes: i64,
|
||||
pub reachable_nodes: i64,
|
||||
pub reliable_nodes: i64,
|
||||
pub avg_reliable_latency_ms: Option<f64>, // TOOD: Why is this an Option, we hate Options
|
||||
}
|
||||
|
||||
impl PoolStats {
|
||||
pub fn health_percentage(&self) -> f64 {
|
||||
if self.total_nodes == 0 {
|
||||
0.0
|
||||
} else {
|
||||
(self.reachable_nodes as f64 / self.total_nodes as f64) * 100.0
|
||||
}
|
||||
}
|
||||
}
|
579
monero-rpc-pool/src/simple_handlers.rs
Normal file
579
monero-rpc-pool/src/simple_handlers.rs
Normal file
|
@ -0,0 +1,579 @@
|
|||
use axum::{
|
||||
body::Body,
|
||||
extract::State,
|
||||
http::{HeaderMap, Method, StatusCode},
|
||||
response::Response,
|
||||
};
|
||||
use serde_json::json;
|
||||
use std::{error::Error, time::Instant};
|
||||
use tracing::{debug, error, info_span, Instrument};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::AppState;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
enum HandlerError {
|
||||
NoNodes,
|
||||
PoolError(String),
|
||||
RequestError(String),
|
||||
AllRequestsFailed(Vec<(String, String)>), // Vec of (node_url, error_message)
|
||||
}
|
||||
|
||||
impl std::fmt::Display for HandlerError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
HandlerError::NoNodes => write!(f, "No nodes available"),
|
||||
HandlerError::PoolError(msg) => write!(f, "Pool error: {}", msg),
|
||||
HandlerError::RequestError(msg) => write!(f, "Request error: {}", msg),
|
||||
HandlerError::AllRequestsFailed(errors) => {
|
||||
write!(f, "All requests failed: [")?;
|
||||
for (i, (node, error)) in errors.iter().enumerate() {
|
||||
if i > 0 {
|
||||
write!(f, ", ")?;
|
||||
}
|
||||
write!(f, "{}: {}", node, error)?;
|
||||
}
|
||||
write!(f, "]")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn is_jsonrpc_error(body: &[u8]) -> bool {
|
||||
// Try to parse as JSON
|
||||
if let Ok(json) = serde_json::from_slice::<serde_json::Value>(body) {
|
||||
// Check if there's an "error" field
|
||||
return json.get("error").is_some();
|
||||
}
|
||||
|
||||
// If we can't parse JSON, treat it as an error
|
||||
true
|
||||
}
|
||||
|
||||
fn extract_jsonrpc_method(body: &[u8]) -> Option<String> {
|
||||
if let Ok(json) = serde_json::from_slice::<serde_json::Value>(body) {
|
||||
if let Some(method) = json.get("method").and_then(|m| m.as_str()) {
|
||||
return Some(method.to_string());
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
async fn raw_http_request(
|
||||
node_url: &str,
|
||||
path: &str,
|
||||
method: &str,
|
||||
headers: &HeaderMap,
|
||||
body: Option<&[u8]>,
|
||||
) -> Result<Response, HandlerError> {
|
||||
let client = reqwest::Client::builder()
|
||||
.timeout(std::time::Duration::from_secs(30))
|
||||
.build()
|
||||
.map_err(|e| HandlerError::RequestError(e.to_string()))?;
|
||||
|
||||
let url = format!("{}{}", node_url, path);
|
||||
|
||||
// Use generic request method to support any HTTP verb
|
||||
let http_method = method
|
||||
.parse::<reqwest::Method>()
|
||||
.map_err(|e| HandlerError::RequestError(format!("Invalid method '{}': {}", method, e)))?;
|
||||
|
||||
let mut request_builder = client.request(http_method, &url);
|
||||
|
||||
// Forward body if present
|
||||
if let Some(body_bytes) = body {
|
||||
request_builder = request_builder.body(body_bytes.to_vec());
|
||||
}
|
||||
|
||||
// Forward essential headers
|
||||
for (name, value) in headers.iter() {
|
||||
let header_name = name.as_str();
|
||||
let header_name_lc = header_name.to_ascii_lowercase();
|
||||
|
||||
// Skip hop-by-hop headers and any body-related headers when we are **not** forwarding a body.
|
||||
let is_hop_by_hop = matches!(
|
||||
header_name_lc.as_str(),
|
||||
"host"
|
||||
| "connection"
|
||||
| "transfer-encoding"
|
||||
| "upgrade"
|
||||
| "proxy-authenticate"
|
||||
| "proxy-authorization"
|
||||
| "te"
|
||||
| "trailers"
|
||||
);
|
||||
|
||||
// If we are not forwarding a body (e.g. GET request) then forwarding `content-length` or
|
||||
// `content-type` with an absent body makes many Monero nodes hang waiting for bytes and
|
||||
// eventually close the connection. This manifests as the time-outs we have observed.
|
||||
let is_body_header_without_body =
|
||||
body.is_none() && matches!(header_name_lc.as_str(), "content-length" | "content-type");
|
||||
|
||||
if !is_hop_by_hop && !is_body_header_without_body {
|
||||
if let Ok(header_value) = std::str::from_utf8(value.as_bytes()) {
|
||||
request_builder = request_builder.header(header_name, header_value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let response = request_builder
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| HandlerError::RequestError(e.to_string()))?;
|
||||
|
||||
// Convert to axum Response preserving everything
|
||||
let status = response.status();
|
||||
let response_headers = response.headers().clone();
|
||||
|
||||
let body_bytes = response.bytes().await.map_err(|e| {
|
||||
let mut error_msg = format!("Failed to read response body: {}", e);
|
||||
if let Some(source) = e.source() {
|
||||
error_msg.push_str(&format!(" (source: {})", source));
|
||||
}
|
||||
|
||||
HandlerError::RequestError(error_msg)
|
||||
})?;
|
||||
|
||||
let mut axum_response = Response::new(Body::from(body_bytes));
|
||||
*axum_response.status_mut() =
|
||||
StatusCode::from_u16(status.as_u16()).unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
|
||||
|
||||
// Copy response headers exactly
|
||||
for (name, value) in response_headers.iter() {
|
||||
if let (Ok(header_name), Ok(header_value)) = (
|
||||
axum::http::HeaderName::try_from(name.as_str()),
|
||||
axum::http::HeaderValue::try_from(value.as_bytes()),
|
||||
) {
|
||||
axum_response
|
||||
.headers_mut()
|
||||
.insert(header_name, header_value);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(axum_response)
|
||||
}
|
||||
|
||||
async fn record_success(state: &AppState, node_url: &str, latency_ms: f64) {
|
||||
let node_pool_guard = state.node_pool.read().await;
|
||||
if let Err(e) = node_pool_guard.record_success(node_url, latency_ms).await {
|
||||
error!("Failed to record success for {}: {}", node_url, e);
|
||||
}
|
||||
}
|
||||
|
||||
async fn record_failure(state: &AppState, node_url: &str) {
|
||||
let node_pool_guard = state.node_pool.read().await;
|
||||
if let Err(e) = node_pool_guard.record_failure(node_url).await {
|
||||
error!("Failed to record failure for {}: {}", node_url, e);
|
||||
}
|
||||
}
|
||||
|
||||
async fn single_raw_request(
|
||||
state: &AppState,
|
||||
node_url: String,
|
||||
path: &str,
|
||||
method: &str,
|
||||
headers: &HeaderMap,
|
||||
body: Option<&[u8]>,
|
||||
) -> Result<(Response, String, f64), HandlerError> {
|
||||
let start_time = Instant::now();
|
||||
|
||||
match raw_http_request(&node_url, path, method, headers, body).await {
|
||||
Ok(response) => {
|
||||
let elapsed = start_time.elapsed();
|
||||
let latency_ms = elapsed.as_millis() as f64;
|
||||
|
||||
// Check HTTP status code - only 200 is success!
|
||||
if response.status().is_success() {
|
||||
// For JSON-RPC endpoints, also check for JSON-RPC errors
|
||||
if path == "/json_rpc" {
|
||||
let (parts, body_stream) = response.into_parts();
|
||||
let body_bytes = axum::body::to_bytes(body_stream, usize::MAX)
|
||||
.await
|
||||
.map_err(|e| HandlerError::RequestError(e.to_string()))?;
|
||||
|
||||
if is_jsonrpc_error(&body_bytes) {
|
||||
record_failure(state, &node_url).await;
|
||||
return Err(HandlerError::RequestError("JSON-RPC error".to_string()));
|
||||
}
|
||||
|
||||
// Reconstruct response with the body we consumed
|
||||
let response = Response::from_parts(parts, Body::from(body_bytes));
|
||||
record_success(state, &node_url, latency_ms).await;
|
||||
Ok((response, node_url, latency_ms))
|
||||
} else {
|
||||
// For non-JSON-RPC endpoints, HTTP success is enough
|
||||
record_success(state, &node_url, latency_ms).await;
|
||||
Ok((response, node_url, latency_ms))
|
||||
}
|
||||
} else {
|
||||
// Non-200 status codes are failures
|
||||
record_failure(state, &node_url).await;
|
||||
Err(HandlerError::RequestError(format!(
|
||||
"HTTP {}",
|
||||
response.status()
|
||||
)))
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
record_failure(state, &node_url).await;
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn race_requests(
|
||||
state: &AppState,
|
||||
path: &str,
|
||||
method: &str,
|
||||
headers: &HeaderMap,
|
||||
body: Option<&[u8]>,
|
||||
) -> Result<Response, HandlerError> {
|
||||
// Extract JSON-RPC method for better logging
|
||||
let jsonrpc_method = if path == "/json_rpc" {
|
||||
if let Some(body_data) = body {
|
||||
extract_jsonrpc_method(body_data)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
const POOL_SIZE: usize = 20;
|
||||
let mut tried_nodes = std::collections::HashSet::new();
|
||||
let mut pool_index = 0;
|
||||
let mut collected_errors: Vec<(String, String)> = Vec::new();
|
||||
|
||||
// Get the exclusive pool of 20 nodes once at the beginning
|
||||
let available_pool = {
|
||||
let node_pool_guard = state.node_pool.read().await;
|
||||
let reliable_nodes = node_pool_guard
|
||||
.get_top_reliable_nodes(POOL_SIZE)
|
||||
.await
|
||||
.map_err(|e| HandlerError::PoolError(e.to_string()))?;
|
||||
|
||||
let pool: Vec<String> = reliable_nodes
|
||||
.into_iter()
|
||||
.map(|node| node.full_url)
|
||||
.collect();
|
||||
|
||||
pool
|
||||
};
|
||||
|
||||
if available_pool.is_empty() {
|
||||
return Err(HandlerError::NoNodes);
|
||||
}
|
||||
|
||||
// Power of Two Choices within the exclusive pool
|
||||
while pool_index < available_pool.len() && tried_nodes.len() < POOL_SIZE {
|
||||
let mut node1_option = None;
|
||||
let mut node2_option = None;
|
||||
|
||||
// Select first untried node from pool
|
||||
for (i, node) in available_pool.iter().enumerate().skip(pool_index) {
|
||||
if !tried_nodes.contains(node) {
|
||||
node1_option = Some(node.clone());
|
||||
pool_index = i + 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Select second untried node from pool (different from first)
|
||||
for node in available_pool.iter().skip(pool_index) {
|
||||
if !tried_nodes.contains(node) && Some(node) != node1_option.as_ref() {
|
||||
node2_option = Some(node.clone());
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// If we can't get any new nodes from the pool, we've exhausted our options
|
||||
if node1_option.is_none() && node2_option.is_none() {
|
||||
break;
|
||||
}
|
||||
|
||||
// Store node URLs for error tracking before consuming them
|
||||
let current_nodes: Vec<String> = [&node1_option, &node2_option]
|
||||
.iter()
|
||||
.filter_map(|opt| opt.as_ref())
|
||||
.cloned()
|
||||
.collect();
|
||||
|
||||
let mut requests = Vec::new();
|
||||
|
||||
if let Some(node1) = node1_option {
|
||||
tried_nodes.insert(node1.clone());
|
||||
requests.push(single_raw_request(
|
||||
state,
|
||||
node1.clone(),
|
||||
path,
|
||||
method,
|
||||
headers,
|
||||
body,
|
||||
));
|
||||
}
|
||||
|
||||
if let Some(node2) = node2_option {
|
||||
tried_nodes.insert(node2.clone());
|
||||
requests.push(single_raw_request(
|
||||
state,
|
||||
node2.clone(),
|
||||
path,
|
||||
method,
|
||||
headers,
|
||||
body,
|
||||
));
|
||||
}
|
||||
|
||||
if requests.is_empty() {
|
||||
break;
|
||||
}
|
||||
|
||||
match &jsonrpc_method {
|
||||
Some(rpc_method) => debug!(
|
||||
"Racing {} requests to {} (JSON-RPC: {}): {} nodes (tried {} so far)",
|
||||
method,
|
||||
path,
|
||||
rpc_method,
|
||||
requests.len(),
|
||||
tried_nodes.len()
|
||||
),
|
||||
None => debug!(
|
||||
"Racing {} requests to {}: {} nodes (tried {} so far)",
|
||||
method,
|
||||
path,
|
||||
requests.len(),
|
||||
tried_nodes.len()
|
||||
),
|
||||
}
|
||||
|
||||
// Handle the requests based on how many we have
|
||||
let result = match requests.len() {
|
||||
1 => {
|
||||
// Only one request
|
||||
requests.into_iter().next().unwrap().await
|
||||
}
|
||||
2 => {
|
||||
// Two requests - race them
|
||||
let mut iter = requests.into_iter();
|
||||
let req1 = iter.next().unwrap();
|
||||
let req2 = iter.next().unwrap();
|
||||
|
||||
tokio::select! {
|
||||
result1 = req1 => result1,
|
||||
result2 = req2 => result2,
|
||||
}
|
||||
}
|
||||
_ => unreachable!("We only add 1 or 2 requests"),
|
||||
};
|
||||
|
||||
match result {
|
||||
Ok((response, winning_node, latency_ms)) => {
|
||||
match &jsonrpc_method {
|
||||
Some(rpc_method) => {
|
||||
debug!(
|
||||
"{} response from {} ({}ms) - SUCCESS after trying {} nodes! JSON-RPC: {}",
|
||||
method, winning_node, latency_ms, tried_nodes.len(), rpc_method
|
||||
)
|
||||
}
|
||||
None => debug!(
|
||||
"{} response from {} ({}ms) - SUCCESS after trying {} nodes!",
|
||||
method,
|
||||
winning_node,
|
||||
latency_ms,
|
||||
tried_nodes.len()
|
||||
),
|
||||
}
|
||||
record_success(state, &winning_node, latency_ms).await;
|
||||
return Ok(response);
|
||||
}
|
||||
Err(e) => {
|
||||
// Since we don't know which specific node failed in the race,
|
||||
// record the error for all nodes in this batch
|
||||
for node_url in ¤t_nodes {
|
||||
collected_errors.push((node_url.clone(), e.to_string()));
|
||||
}
|
||||
debug!(
|
||||
"Request failed: {} - retrying with different nodes from pool...",
|
||||
e
|
||||
);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Log detailed error information
|
||||
let detailed_errors: Vec<String> = collected_errors
|
||||
.iter()
|
||||
.map(|(node, error)| format!("{}: {}", node, error))
|
||||
.collect();
|
||||
|
||||
match &jsonrpc_method {
|
||||
Some(rpc_method) => error!(
|
||||
"All {} requests failed after trying {} nodes (JSON-RPC: {}). Detailed errors:\n{}",
|
||||
method,
|
||||
tried_nodes.len(),
|
||||
rpc_method,
|
||||
detailed_errors.join("\n")
|
||||
),
|
||||
None => error!(
|
||||
"All {} requests failed after trying {} nodes. Detailed errors:\n{}",
|
||||
method,
|
||||
tried_nodes.len(),
|
||||
detailed_errors.join("\n")
|
||||
),
|
||||
}
|
||||
|
||||
Err(HandlerError::AllRequestsFailed(collected_errors))
|
||||
}
|
||||
|
||||
/// Forward a request to the node pool, returning either a successful response or a simple
|
||||
/// `500` with text "All nodes failed". Keeps the error handling logic in one place so the
|
||||
/// public handlers stay readable.
|
||||
async fn proxy_request(
|
||||
state: &AppState,
|
||||
path: &str,
|
||||
method: &str,
|
||||
headers: &HeaderMap,
|
||||
body: Option<&[u8]>,
|
||||
) -> Response {
|
||||
match race_requests(state, path, method, headers, body).await {
|
||||
Ok(res) => res,
|
||||
Err(handler_error) => {
|
||||
let error_response = match &handler_error {
|
||||
HandlerError::AllRequestsFailed(node_errors) => {
|
||||
json!({
|
||||
"error": "All nodes failed",
|
||||
"details": {
|
||||
"type": "AllRequestsFailed",
|
||||
"message": "All proxy requests to available nodes failed",
|
||||
"node_errors": node_errors.iter().map(|(node, error)| {
|
||||
json!({
|
||||
"node": node,
|
||||
"error": error
|
||||
})
|
||||
}).collect::<Vec<_>>(),
|
||||
"total_nodes_tried": node_errors.len()
|
||||
}
|
||||
})
|
||||
}
|
||||
HandlerError::NoNodes => {
|
||||
json!({
|
||||
"error": "No nodes available",
|
||||
"details": {
|
||||
"type": "NoNodes",
|
||||
"message": "No healthy nodes available in the pool"
|
||||
}
|
||||
})
|
||||
}
|
||||
HandlerError::PoolError(msg) => {
|
||||
json!({
|
||||
"error": "Pool error",
|
||||
"details": {
|
||||
"type": "PoolError",
|
||||
"message": msg
|
||||
}
|
||||
})
|
||||
}
|
||||
HandlerError::RequestError(msg) => {
|
||||
json!({
|
||||
"error": "Request error",
|
||||
"details": {
|
||||
"type": "RequestError",
|
||||
"message": msg
|
||||
}
|
||||
})
|
||||
}
|
||||
};
|
||||
|
||||
Response::builder()
|
||||
.status(StatusCode::INTERNAL_SERVER_ERROR)
|
||||
.header("content-type", "application/json")
|
||||
.body(Body::from(error_response.to_string()))
|
||||
.unwrap_or_else(|_| Response::new(Body::empty()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[axum::debug_handler]
|
||||
pub async fn simple_proxy_handler(
|
||||
State(state): State<AppState>,
|
||||
method: Method,
|
||||
uri: axum::http::Uri,
|
||||
headers: HeaderMap,
|
||||
body: axum::body::Bytes,
|
||||
) -> Response {
|
||||
let body_size = body.len();
|
||||
let request_id = Uuid::new_v4();
|
||||
let path = uri.path().to_string();
|
||||
let method_str = method.to_string();
|
||||
let path_clone = path.clone();
|
||||
|
||||
// Extract JSON-RPC method for tracing span
|
||||
let body_option = (!body.is_empty()).then_some(&body[..]);
|
||||
let jsonrpc_method = if path == "/json_rpc" {
|
||||
if let Some(body_data) = body_option {
|
||||
extract_jsonrpc_method(body_data)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let jsonrpc_method_for_span = jsonrpc_method.as_deref().unwrap_or("N/A").to_string();
|
||||
|
||||
async move {
|
||||
match &jsonrpc_method {
|
||||
Some(rpc_method) => debug!(
|
||||
"Proxying {} {} ({} bytes) - JSON-RPC method: {}",
|
||||
method, path, body_size, rpc_method
|
||||
),
|
||||
None => debug!("Proxying {} {} ({} bytes)", method, path, body_size),
|
||||
}
|
||||
|
||||
proxy_request(&state, &path, method.as_str(), &headers, body_option).await
|
||||
}
|
||||
.instrument(info_span!("proxy_request",
|
||||
request_id = %request_id,
|
||||
method = %method_str,
|
||||
path = %path_clone,
|
||||
body_size = body_size,
|
||||
jsonrpc_method = %jsonrpc_method_for_span
|
||||
))
|
||||
.await
|
||||
}
|
||||
|
||||
#[axum::debug_handler]
|
||||
pub async fn simple_stats_handler(State(state): State<AppState>) -> Response {
|
||||
async move {
|
||||
let node_pool_guard = state.node_pool.read().await;
|
||||
|
||||
match node_pool_guard.get_current_status().await {
|
||||
Ok(status) => {
|
||||
let stats_json = serde_json::json!({
|
||||
"status": "healthy",
|
||||
"total_node_count": status.total_node_count,
|
||||
"healthy_node_count": status.healthy_node_count,
|
||||
"successful_health_checks": status.successful_health_checks,
|
||||
"unsuccessful_health_checks": status.unsuccessful_health_checks,
|
||||
"top_reliable_nodes": status.top_reliable_nodes
|
||||
});
|
||||
|
||||
Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header("content-type", "application/json")
|
||||
.body(Body::from(stats_json.to_string()))
|
||||
.unwrap_or_else(|_| Response::new(Body::empty()))
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to get pool status: {}", e);
|
||||
let error_json = r#"{"status":"error","message":"Failed to get pool status"}"#;
|
||||
Response::builder()
|
||||
.status(StatusCode::INTERNAL_SERVER_ERROR)
|
||||
.header("content-type", "application/json")
|
||||
.body(Body::from(error_json))
|
||||
.unwrap_or_else(|_| Response::new(Body::empty()))
|
||||
}
|
||||
}
|
||||
}
|
||||
.instrument(info_span!("stats_request"))
|
||||
.await
|
||||
}
|
|
@ -2,6 +2,7 @@ use cmake::Config;
|
|||
|
||||
fn main() {
|
||||
let is_github_actions: bool = std::env::var("GITHUB_ACTIONS").is_ok();
|
||||
let is_docker_build: bool = std::env::var("DOCKER_BUILD").is_ok();
|
||||
|
||||
// Only rerun this when the bridge.rs or static_bridge.h file changes.
|
||||
println!("cargo:rerun-if-changed=src/bridge.rs");
|
||||
|
@ -37,9 +38,10 @@ fn main() {
|
|||
.define("GTEST_HAS_ABSL", "OFF")
|
||||
// Use lightweight crypto library
|
||||
.define("MONERO_WALLET_CRYPTO_LIBRARY", "cn")
|
||||
.build_arg(match is_github_actions {
|
||||
true => "-j1",
|
||||
false => "-j",
|
||||
.build_arg(match (is_github_actions, is_docker_build) {
|
||||
(true, _) => "-j1",
|
||||
(_, true) => "-j1",
|
||||
(_, _) => "-j",
|
||||
})
|
||||
.build();
|
||||
|
||||
|
|
|
@ -1054,7 +1054,7 @@ impl FfiWallet {
|
|||
monero::Address::from_str(&address.to_string()).expect("wallet's own address to be valid")
|
||||
}
|
||||
|
||||
fn set_daemon_address(&mut self, address: &str) -> anyhow::Result<()> {
|
||||
pub fn set_daemon_address(&mut self, address: &str) -> anyhow::Result<()> {
|
||||
tracing::debug!(%address, "Setting daemon address");
|
||||
|
||||
let_cxx_string!(address = address);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
[toolchain]
|
||||
# also update this in the readme, changelog, and github actions
|
||||
channel = "1.82"
|
||||
channel = "1.85"
|
||||
components = ["clippy"]
|
||||
targets = ["armv7-unknown-linux-gnueabihf"]
|
||||
|
|
|
@ -4,9 +4,9 @@
|
|||
"version": "0.7.0",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"check-bindings": "typeshare --lang=typescript --output-file __temp_bindings.ts ../swap/src && dprint fmt __temp_bindings.ts && diff -wbB __temp_bindings.ts ./src/models/tauriModel.ts && rm __temp_bindings.ts",
|
||||
"gen-bindings-verbose": "RUST_LOG=debug RUST_BACKTRACE=1 typeshare --lang=typescript --output-file ./src/models/tauriModel.ts ../swap/src && dprint fmt ./src/models/tauriModel.ts",
|
||||
"gen-bindings": "typeshare --lang=typescript --output-file ./src/models/tauriModel.ts ../swap/src && dprint fmt ./src/models/tauriModel.ts",
|
||||
"check-bindings": "typeshare --lang=typescript --output-file __temp_bindings.ts ../swap/src ../monero-rpc-pool/src ../electrum-pool/src && dprint fmt __temp_bindings.ts && diff -wbB __temp_bindings.ts ./src/models/tauriModel.ts && rm __temp_bindings.ts",
|
||||
"gen-bindings-verbose": "RUST_LOG=debug RUST_BACKTRACE=1 typeshare --lang=typescript --output-file ./src/models/tauriModel.ts ../swap/src ../monero-rpc-pool/src ../electrum-pool/src && dprint fmt ./src/models/tauriModel.ts",
|
||||
"gen-bindings": "typeshare --lang=typescript --output-file ./src/models/tauriModel.ts ../swap/src ../monero-rpc-pool/src ../electrum-pool/src && dprint fmt ./src/models/tauriModel.ts",
|
||||
"test": "vitest",
|
||||
"test:ui": "vitest --ui",
|
||||
"dev": "vite",
|
||||
|
|
|
@ -8,6 +8,7 @@ import {
|
|||
approvalEventReceived,
|
||||
backgroundProgressEventReceived,
|
||||
} from "store/features/rpcSlice";
|
||||
import { poolStatusReceived } from "store/features/poolSlice";
|
||||
import { swapProgressEventReceived } from "store/features/swapSlice";
|
||||
import logger from "utils/logger";
|
||||
import {
|
||||
|
@ -127,6 +128,10 @@ export async function setupBackgroundTasks(): Promise<void> {
|
|||
store.dispatch(backgroundProgressEventReceived(eventData));
|
||||
break;
|
||||
|
||||
case "PoolStatusUpdate":
|
||||
store.dispatch(poolStatusReceived(eventData));
|
||||
break;
|
||||
|
||||
default:
|
||||
exhaustiveGuard(channelName);
|
||||
}
|
||||
|
|
|
@ -24,8 +24,8 @@ export default function UnfinishedSwapsAlert() {
|
|||
>
|
||||
You have{" "}
|
||||
{resumableSwapsCount > 1
|
||||
? `${resumableSwapsCount} unfinished swaps`
|
||||
: "one unfinished swap"}
|
||||
? `${resumableSwapsCount} pending swaps`
|
||||
: "one pending swap"}
|
||||
</Alert>
|
||||
);
|
||||
}
|
||||
|
|
|
@ -63,7 +63,10 @@ function getActiveStep(state: SwapState | null): PathStep | null {
|
|||
// Bitcoin has been locked, waiting for the counterparty to lock their XMR
|
||||
case "BtcLockTxInMempool":
|
||||
// We only display the first step as completed if the Bitcoin lock has been confirmed
|
||||
if (latestState.content.btc_lock_confirmations > 0) {
|
||||
if (
|
||||
latestState.content.btc_lock_confirmations !== undefined &&
|
||||
latestState.content.btc_lock_confirmations > 0
|
||||
) {
|
||||
return [PathType.HAPPY_PATH, 1, isReleased];
|
||||
}
|
||||
return [PathType.HAPPY_PATH, 0, isReleased];
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
import { TauriSwapProgressEventContent } from "models/tauriModelExt";
|
||||
import { formatConfirmations } from "utils/formatUtils";
|
||||
import BitcoinTransactionInfoBox from "../../BitcoinTransactionInfoBox";
|
||||
import SwapStatusAlert from "renderer/components/alert/SwapStatusAlert/SwapStatusAlert";
|
||||
import { useActiveSwapInfo } from "store/hooks";
|
||||
|
@ -15,10 +16,11 @@ export default function BitcoinLockTxInMempoolPage({
|
|||
|
||||
return (
|
||||
<Box>
|
||||
{btc_lock_confirmations < BITCOIN_CONFIRMATIONS_WARNING_THRESHOLD && (
|
||||
{(btc_lock_confirmations === undefined ||
|
||||
btc_lock_confirmations < BITCOIN_CONFIRMATIONS_WARNING_THRESHOLD) && (
|
||||
<DialogContentText>
|
||||
Your Bitcoin has been locked.{" "}
|
||||
{btc_lock_confirmations > 0
|
||||
{btc_lock_confirmations !== undefined && btc_lock_confirmations > 0
|
||||
? "We are waiting for the other party to lock their Monero."
|
||||
: "We are waiting for the blockchain to confirm the transaction. Once confirmed, the other party will lock their Monero."}
|
||||
</DialogContentText>
|
||||
|
@ -30,9 +32,10 @@ export default function BitcoinLockTxInMempoolPage({
|
|||
gap: "1rem",
|
||||
}}
|
||||
>
|
||||
{btc_lock_confirmations >= BITCOIN_CONFIRMATIONS_WARNING_THRESHOLD && (
|
||||
<SwapStatusAlert swap={swapInfo} isRunning={true} />
|
||||
)}
|
||||
{btc_lock_confirmations !== undefined &&
|
||||
btc_lock_confirmations >= BITCOIN_CONFIRMATIONS_WARNING_THRESHOLD && (
|
||||
<SwapStatusAlert swap={swapInfo} isRunning={true} />
|
||||
)}
|
||||
<BitcoinTransactionInfoBox
|
||||
title="Bitcoin Lock Transaction"
|
||||
txId={btc_lock_txid}
|
||||
|
@ -43,7 +46,7 @@ export default function BitcoinLockTxInMempoolPage({
|
|||
After they lock their funds and the Monero transaction receives
|
||||
one confirmation, the swap will proceed to the next step.
|
||||
<br />
|
||||
Confirmations: {btc_lock_confirmations}
|
||||
Confirmations: {formatConfirmations(btc_lock_confirmations)}
|
||||
</>
|
||||
}
|
||||
/>
|
||||
|
|
|
@ -1,12 +1,13 @@
|
|||
import { Box, DialogContentText } from "@mui/material";
|
||||
import { TauriSwapProgressEventContent } from "models/tauriModelExt";
|
||||
import { formatConfirmations } from "utils/formatUtils";
|
||||
import MoneroTransactionInfoBox from "../../MoneroTransactionInfoBox";
|
||||
|
||||
export default function XmrLockTxInMempoolPage({
|
||||
xmr_lock_tx_confirmations,
|
||||
xmr_lock_txid,
|
||||
}: TauriSwapProgressEventContent<"XmrLockTxInMempool">) {
|
||||
const additionalContent = `Confirmations: ${xmr_lock_tx_confirmations}/10`;
|
||||
const additionalContent = `Confirmations: ${formatConfirmations(xmr_lock_tx_confirmations, 10)}`;
|
||||
|
||||
return (
|
||||
<Box>
|
||||
|
|
|
@ -0,0 +1,192 @@
|
|||
import {
|
||||
Box,
|
||||
Typography,
|
||||
Table,
|
||||
TableBody,
|
||||
TableCell,
|
||||
TableContainer,
|
||||
TableHead,
|
||||
TableRow,
|
||||
Chip,
|
||||
LinearProgress,
|
||||
useTheme,
|
||||
} from "@mui/material";
|
||||
import InfoBox from "renderer/components/modal/swap/InfoBox";
|
||||
import { ReliableNodeInfo } from "models/tauriModel";
|
||||
import NetworkWifiIcon from "@mui/icons-material/NetworkWifi";
|
||||
import { useAppSelector } from "store/hooks";
|
||||
|
||||
export default function MoneroPoolHealthBox() {
|
||||
const { poolStatus, isLoading } = useAppSelector((state) => ({
|
||||
poolStatus: state.pool.status,
|
||||
isLoading: state.pool.isLoading,
|
||||
}));
|
||||
const theme = useTheme();
|
||||
|
||||
const formatLatency = (latencyMs?: number) => {
|
||||
if (latencyMs === undefined || latencyMs === null) return "N/A";
|
||||
return `${Math.round(latencyMs)}ms`;
|
||||
};
|
||||
|
||||
const formatSuccessRate = (rate: number) => {
|
||||
return `${(rate * 100).toFixed(1)}%`;
|
||||
};
|
||||
|
||||
const getHealthColor = (healthyCount: number, reliableCount: number) => {
|
||||
if (reliableCount === 0) return theme.palette.error.main;
|
||||
if (reliableCount < 3) return theme.palette.warning.main;
|
||||
return theme.palette.success.main;
|
||||
};
|
||||
|
||||
const renderHealthSummary = () => {
|
||||
if (!poolStatus) return null;
|
||||
|
||||
const totalChecks =
|
||||
poolStatus.successful_health_checks +
|
||||
poolStatus.unsuccessful_health_checks;
|
||||
const overallSuccessRate =
|
||||
totalChecks > 0
|
||||
? (poolStatus.successful_health_checks / totalChecks) * 100
|
||||
: 0;
|
||||
|
||||
return (
|
||||
<Box sx={{ display: "flex", gap: 2, flexWrap: "wrap" }}>
|
||||
<Chip
|
||||
label={`${poolStatus.total_node_count} Total Known`}
|
||||
color="info"
|
||||
variant="outlined"
|
||||
size="small"
|
||||
/>
|
||||
<Chip
|
||||
label={`${poolStatus.healthy_node_count} Healthy`}
|
||||
color={poolStatus.healthy_node_count > 0 ? "success" : "error"}
|
||||
variant="outlined"
|
||||
size="small"
|
||||
/>
|
||||
<Chip
|
||||
label={`${(100 - overallSuccessRate).toFixed(1)}% Retry Rate (last 200 operations)`}
|
||||
color={
|
||||
overallSuccessRate > 80
|
||||
? "success"
|
||||
: overallSuccessRate > 60
|
||||
? "warning"
|
||||
: "error"
|
||||
}
|
||||
variant="outlined"
|
||||
size="small"
|
||||
/>
|
||||
</Box>
|
||||
);
|
||||
};
|
||||
|
||||
const renderTopNodes = () => {
|
||||
if (!poolStatus || poolStatus.top_reliable_nodes.length === 0) {
|
||||
return (
|
||||
<>
|
||||
<Box sx={{ display: "flex", alignItems: "center", gap: 1 }}>
|
||||
<Typography variant="h6" sx={{ fontSize: "1rem" }}>
|
||||
🚧
|
||||
</Typography>
|
||||
<Typography variant="body2" color="text.secondary">
|
||||
Bootstrapping remote Monero node registry... But you can already
|
||||
start swapping!
|
||||
</Typography>
|
||||
</Box>
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<TableContainer>
|
||||
<Table size="small">
|
||||
<TableHead>
|
||||
<TableRow>
|
||||
<TableCell>Node URL</TableCell>
|
||||
<TableCell align="right">Success Rate</TableCell>
|
||||
<TableCell align="right">Avg Latency</TableCell>
|
||||
</TableRow>
|
||||
</TableHead>
|
||||
<TableBody>
|
||||
{poolStatus.top_reliable_nodes.map(
|
||||
(node: ReliableNodeInfo, index: number) => (
|
||||
<TableRow key={index}>
|
||||
<TableCell>
|
||||
<Typography
|
||||
variant="caption"
|
||||
sx={{ wordBreak: "break-all" }}
|
||||
>
|
||||
{node.url}
|
||||
</Typography>
|
||||
</TableCell>
|
||||
<TableCell align="right">
|
||||
<Typography variant="caption">
|
||||
{formatSuccessRate(node.success_rate)}
|
||||
</Typography>
|
||||
</TableCell>
|
||||
<TableCell align="right">
|
||||
<Typography variant="caption">
|
||||
{formatLatency(node.avg_latency_ms)}
|
||||
</Typography>
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
),
|
||||
)}
|
||||
</TableBody>
|
||||
</Table>
|
||||
</TableContainer>
|
||||
);
|
||||
};
|
||||
|
||||
// Show bootstrapping message when no data is available
|
||||
if (!poolStatus && !isLoading) {
|
||||
return (
|
||||
<InfoBox
|
||||
title={
|
||||
<Box sx={{ display: "flex", alignItems: "center", gap: 1 }}>
|
||||
<NetworkWifiIcon />
|
||||
Monero Pool Health
|
||||
</Box>
|
||||
}
|
||||
mainContent={
|
||||
<Box sx={{ display: "flex", alignItems: "center", gap: 1 }}>
|
||||
<Typography variant="h2" sx={{ fontSize: "1.5rem" }}>
|
||||
🚧
|
||||
</Typography>
|
||||
<Typography variant="subtitle2">
|
||||
Bootstrapping pool health monitoring. You can already start using
|
||||
the app!
|
||||
</Typography>
|
||||
</Box>
|
||||
}
|
||||
additionalContent={null}
|
||||
icon={null}
|
||||
loading={false}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<InfoBox
|
||||
title={
|
||||
<Box sx={{ display: "flex", alignItems: "center", gap: 1 }}>
|
||||
<NetworkWifiIcon />
|
||||
Monero Pool Health
|
||||
</Box>
|
||||
}
|
||||
mainContent={
|
||||
<Typography variant="subtitle2">
|
||||
Real-time health monitoring of the Monero node pool. Shows node
|
||||
availability, success rates, and performance metrics.
|
||||
</Typography>
|
||||
}
|
||||
additionalContent={
|
||||
<Box sx={{ display: "flex", flexDirection: "column", gap: 2 }}>
|
||||
{poolStatus && renderHealthSummary()}
|
||||
<Box>{renderTopNodes()}</Box>
|
||||
</Box>
|
||||
}
|
||||
icon={null}
|
||||
loading={isLoading}
|
||||
/>
|
||||
);
|
||||
}
|
|
@ -20,6 +20,11 @@ import {
|
|||
useTheme,
|
||||
Switch,
|
||||
SelectChangeEvent,
|
||||
TextField,
|
||||
ToggleButton,
|
||||
ToggleButtonGroup,
|
||||
Chip,
|
||||
LinearProgress,
|
||||
} from "@mui/material";
|
||||
import {
|
||||
addNode,
|
||||
|
@ -35,11 +40,13 @@ import {
|
|||
setFiatCurrency,
|
||||
setTheme,
|
||||
setTorEnabled,
|
||||
setUseMoneroRpcPool,
|
||||
} from "store/features/settingsSlice";
|
||||
import { useAppDispatch, useNodes, useSettings } from "store/hooks";
|
||||
import ValidatedTextField from "renderer/components/other/ValidatedTextField";
|
||||
import PromiseInvokeButton from "renderer/components/PromiseInvokeButton";
|
||||
import HelpIcon from "@mui/icons-material/HelpOutline";
|
||||
import { ReactNode, useState } from "react";
|
||||
import { ReactNode, useState, useEffect } from "react";
|
||||
import { Theme } from "renderer/components/theme";
|
||||
import {
|
||||
Add,
|
||||
|
@ -47,12 +54,18 @@ import {
|
|||
Delete,
|
||||
Edit,
|
||||
HourglassEmpty,
|
||||
Refresh,
|
||||
} from "@mui/icons-material";
|
||||
|
||||
import { getNetwork } from "store/config";
|
||||
import { currencySymbol } from "utils/formatUtils";
|
||||
import InfoBox from "renderer/components/modal/swap/InfoBox";
|
||||
import { isValidMultiAddressWithPeerId } from "utils/parseUtils";
|
||||
|
||||
import { useAppSelector } from "store/hooks";
|
||||
import { getNodeStatus } from "renderer/rpc";
|
||||
import { setStatus } from "store/features/nodesSlice";
|
||||
|
||||
const PLACEHOLDER_ELECTRUM_RPC_URL = "ssl://blockstream.info:700";
|
||||
const PLACEHOLDER_MONERO_NODE_URL = "http://xmr-node.cakewallet.com:18081";
|
||||
|
||||
|
@ -83,6 +96,7 @@ export default function SettingsBox() {
|
|||
<TableBody>
|
||||
<TorSettings />
|
||||
<ElectrumRpcUrlSetting />
|
||||
<MoneroRpcPoolSetting />
|
||||
<MoneroNodeUrlSetting />
|
||||
<FetchFiatPricesSetting />
|
||||
<ThemeSetting />
|
||||
|
@ -268,15 +282,21 @@ function ElectrumRpcUrlSetting() {
|
|||
function SettingLabel({
|
||||
label,
|
||||
tooltip,
|
||||
disabled = false,
|
||||
}: {
|
||||
label: ReactNode;
|
||||
tooltip: string | null;
|
||||
disabled?: boolean;
|
||||
}) {
|
||||
const opacity = disabled ? 0.5 : 1;
|
||||
|
||||
return (
|
||||
<Box style={{ display: "flex", alignItems: "center", gap: "0.5rem" }}>
|
||||
<Box
|
||||
style={{ display: "flex", alignItems: "center", gap: "0.5rem", opacity }}
|
||||
>
|
||||
<Box>{label}</Box>
|
||||
<Tooltip title={tooltip}>
|
||||
<IconButton size="small">
|
||||
<IconButton size="small" disabled={disabled}>
|
||||
<HelpIcon />
|
||||
</IconButton>
|
||||
</Tooltip>
|
||||
|
@ -285,38 +305,173 @@ function SettingLabel({
|
|||
}
|
||||
|
||||
/**
|
||||
* A setting that allows you to select the Monero Node URL to use.
|
||||
* A setting that allows you to toggle between using the Monero RPC Pool and custom nodes.
|
||||
*/
|
||||
function MoneroRpcPoolSetting() {
|
||||
const useMoneroRpcPool = useSettings((s) => s.useMoneroRpcPool);
|
||||
const dispatch = useAppDispatch();
|
||||
|
||||
const handleChange = (
|
||||
event: React.MouseEvent<HTMLElement>,
|
||||
newValue: string,
|
||||
) => {
|
||||
if (newValue !== null) {
|
||||
dispatch(setUseMoneroRpcPool(newValue === "pool"));
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<TableRow>
|
||||
<TableCell>
|
||||
<SettingLabel
|
||||
label="Monero Node Selection"
|
||||
tooltip="Choose between using a load-balanced pool of Monero nodes for better reliability, or configure custom Monero nodes."
|
||||
/>
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
<ToggleButtonGroup
|
||||
color="primary"
|
||||
value={useMoneroRpcPool ? "pool" : "custom"}
|
||||
exclusive
|
||||
onChange={handleChange}
|
||||
aria-label="Monero node selection"
|
||||
size="small"
|
||||
>
|
||||
<ToggleButton value="pool">Pool (Recommended)</ToggleButton>
|
||||
<ToggleButton value="custom">Manual</ToggleButton>
|
||||
</ToggleButtonGroup>
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* A setting that allows you to configure a single Monero Node URL.
|
||||
* Gets disabled when RPC pool is enabled.
|
||||
*/
|
||||
function MoneroNodeUrlSetting() {
|
||||
const network = getNetwork();
|
||||
const [tableVisible, setTableVisible] = useState(false);
|
||||
const useMoneroRpcPool = useSettings((s) => s.useMoneroRpcPool);
|
||||
const moneroNodeUrl = useSettings(
|
||||
(s) => s.nodes[network][Blockchain.Monero][0] || "",
|
||||
);
|
||||
const nodeStatuses = useNodes((s) => s.nodes);
|
||||
const dispatch = useAppDispatch();
|
||||
const [isRefreshing, setIsRefreshing] = useState(false);
|
||||
|
||||
const isValid = (url: string) => isValidUrl(url, ["http"]);
|
||||
const currentNodes = useSettings((s) => s.nodes[network][Blockchain.Monero]);
|
||||
|
||||
const handleNodeUrlChange = (newUrl: string) => {
|
||||
// Remove existing nodes and add the new one
|
||||
currentNodes.forEach((node) => {
|
||||
dispatch(removeNode({ network, type: Blockchain.Monero, node }));
|
||||
});
|
||||
|
||||
if (newUrl.trim()) {
|
||||
dispatch(
|
||||
addNode({ network, type: Blockchain.Monero, node: newUrl.trim() }),
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
const handleRefreshStatus = async () => {
|
||||
// Don't refresh if pool is enabled or no node URL is configured
|
||||
if (!moneroNodeUrl || useMoneroRpcPool) return;
|
||||
|
||||
setIsRefreshing(true);
|
||||
try {
|
||||
const status = await getNodeStatus(
|
||||
moneroNodeUrl,
|
||||
Blockchain.Monero,
|
||||
network,
|
||||
);
|
||||
|
||||
// Update the status in the store
|
||||
dispatch(
|
||||
setStatus({
|
||||
node: moneroNodeUrl,
|
||||
status,
|
||||
blockchain: Blockchain.Monero,
|
||||
}),
|
||||
);
|
||||
} catch (error) {
|
||||
console.error("Failed to refresh node status:", error);
|
||||
} finally {
|
||||
setIsRefreshing(false);
|
||||
}
|
||||
};
|
||||
|
||||
const isValid = (url: string) => url === "" || isValidUrl(url, ["http"]);
|
||||
const nodeStatus = moneroNodeUrl
|
||||
? nodeStatuses[Blockchain.Monero][moneroNodeUrl]
|
||||
: null;
|
||||
|
||||
return (
|
||||
<TableRow>
|
||||
<TableCell>
|
||||
<SettingLabel
|
||||
label="Custom Monero Node URL"
|
||||
tooltip="This is the URL of the Monero node that the GUI will connect to. Ensure the node is listening for RPC connections over HTTP. If you leave this field empty, the GUI will choose from a list of known nodes at random."
|
||||
tooltip={
|
||||
useMoneroRpcPool
|
||||
? "This setting is disabled because Monero RPC pool is enabled. Disable the RPC pool to configure a custom node."
|
||||
: "This is the URL of the Monero node that the GUI will connect to. It is used to sync Monero transactions. If you leave this field empty, the GUI will choose from a list of known servers at random."
|
||||
}
|
||||
disabled={useMoneroRpcPool}
|
||||
/>
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
<IconButton onClick={() => setTableVisible(!tableVisible)} size="large">
|
||||
<Edit />
|
||||
</IconButton>
|
||||
{tableVisible ? (
|
||||
<NodeTableModal
|
||||
open={tableVisible}
|
||||
onClose={() => setTableVisible(false)}
|
||||
network={network}
|
||||
blockchain={Blockchain.Monero}
|
||||
isValid={isValid}
|
||||
<Box sx={{ display: "flex", alignItems: "center", gap: 1 }}>
|
||||
<ValidatedTextField
|
||||
value={moneroNodeUrl}
|
||||
onValidatedChange={handleNodeUrlChange}
|
||||
placeholder={PLACEHOLDER_MONERO_NODE_URL}
|
||||
disabled={useMoneroRpcPool}
|
||||
fullWidth
|
||||
isValid={isValid}
|
||||
variant="outlined"
|
||||
noErrorWhenEmpty
|
||||
/>
|
||||
) : (
|
||||
<></>
|
||||
)}
|
||||
<>
|
||||
<Tooltip
|
||||
title={
|
||||
useMoneroRpcPool
|
||||
? "Node status checking is disabled when using the pool"
|
||||
: !moneroNodeUrl
|
||||
? "Enter a node URL to check status"
|
||||
: "Node status"
|
||||
}
|
||||
>
|
||||
<Box sx={{ display: "flex", alignItems: "center" }}>
|
||||
<Circle
|
||||
color={
|
||||
useMoneroRpcPool || !moneroNodeUrl
|
||||
? "gray"
|
||||
: nodeStatus
|
||||
? "green"
|
||||
: "red"
|
||||
}
|
||||
/>
|
||||
</Box>
|
||||
</Tooltip>
|
||||
<Tooltip
|
||||
title={
|
||||
useMoneroRpcPool
|
||||
? "Node status refresh is disabled when using the pool"
|
||||
: !moneroNodeUrl
|
||||
? "Enter a node URL to refresh status"
|
||||
: "Refresh node status"
|
||||
}
|
||||
>
|
||||
<IconButton
|
||||
onClick={handleRefreshStatus}
|
||||
disabled={isRefreshing || useMoneroRpcPool || !moneroNodeUrl}
|
||||
size="small"
|
||||
>
|
||||
{isRefreshing ? <HourglassEmpty /> : <Refresh />}
|
||||
</IconButton>
|
||||
</Tooltip>
|
||||
</>
|
||||
</Box>
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
);
|
||||
|
@ -380,7 +535,7 @@ function NodeTableModal({
|
|||
When the daemon is started, it will attempt to connect to the first
|
||||
available {blockchain} node in this list. If you leave this field
|
||||
empty or all nodes are unavailable, it will choose from a list of
|
||||
known nodes at random. Requires a restart to take effect.
|
||||
known nodes at random.
|
||||
</Typography>
|
||||
<NodeTable
|
||||
network={network}
|
||||
|
@ -413,38 +568,6 @@ function Circle({ color, radius = 6 }: { color: string; radius?: number }) {
|
|||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Displays a status indicator for a node
|
||||
*/
|
||||
function NodeStatus({ status }: { status: boolean | undefined }) {
|
||||
const theme = useTheme();
|
||||
|
||||
switch (status) {
|
||||
case true:
|
||||
return (
|
||||
<Tooltip
|
||||
title={"This node is available and responding to RPC requests"}
|
||||
>
|
||||
<Circle color={theme.palette.success.dark} />
|
||||
</Tooltip>
|
||||
);
|
||||
case false:
|
||||
return (
|
||||
<Tooltip
|
||||
title={"This node is not available or not responding to RPC requests"}
|
||||
>
|
||||
<Circle color={theme.palette.error.dark} />
|
||||
</Tooltip>
|
||||
);
|
||||
default:
|
||||
return (
|
||||
<Tooltip title={"The status of this node is currently unknown"}>
|
||||
<HourglassEmpty />
|
||||
</Tooltip>
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A table that displays the available nodes for a given network and blockchain.
|
||||
* It allows you to add, remove, and move nodes up the list.
|
||||
|
@ -515,7 +638,9 @@ function NodeTable({
|
|||
</TableCell>
|
||||
{/* Node status icon */}
|
||||
<TableCell align="center">
|
||||
<NodeStatus status={nodeStatuses[blockchain][node]} />
|
||||
<Circle
|
||||
color={nodeStatuses[blockchain][node] ? "green" : "red"}
|
||||
/>
|
||||
</TableCell>
|
||||
{/* Remove and move buttons */}
|
||||
<TableCell>
|
||||
|
@ -582,7 +707,7 @@ export function TorSettings() {
|
|||
<TableCell>
|
||||
<SettingLabel
|
||||
label="Use Tor"
|
||||
tooltip="Tor (The Onion Router) is a decentralized network allowing for anonymous browsing. If enabled, the app will use its internal Tor client to hide your IP address from the maker. Requires a restart to take effect."
|
||||
tooltip="Route network traffic through Tor to hide your IP address from the maker."
|
||||
/>
|
||||
</TableCell>
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@ import DaemonControlBox from "./DaemonControlBox";
|
|||
import SettingsBox from "./SettingsBox";
|
||||
import ExportDataBox from "./ExportDataBox";
|
||||
import DiscoveryBox from "./DiscoveryBox";
|
||||
import MoneroPoolHealthBox from "./MoneroPoolHealthBox";
|
||||
import { useLocation } from "react-router-dom";
|
||||
import { useEffect } from "react";
|
||||
|
||||
|
@ -29,6 +30,7 @@ export default function SettingsPage() {
|
|||
>
|
||||
<SettingsBox />
|
||||
<DiscoveryBox />
|
||||
<MoneroPoolHealthBox />
|
||||
<ExportDataBox />
|
||||
<DaemonControlBox />
|
||||
<DonateInfoBox />
|
||||
|
|
|
@ -223,36 +223,29 @@ export async function initializeContext() {
|
|||
const bitcoinNodes =
|
||||
store.getState().settings.nodes[network][Blockchain.Bitcoin];
|
||||
|
||||
// For Monero nodes, check availability and use the first working one
|
||||
const moneroNodes =
|
||||
store.getState().settings.nodes[network][Blockchain.Monero];
|
||||
let moneroNode = null;
|
||||
// For Monero nodes, determine whether to use pool or custom node
|
||||
const useMoneroRpcPool = store.getState().settings.useMoneroRpcPool;
|
||||
|
||||
if (moneroNodes.length > 0) {
|
||||
try {
|
||||
moneroNode = await Promise.any(
|
||||
moneroNodes.map(async (node) => {
|
||||
const isAvailable = await getNodeStatus(
|
||||
node,
|
||||
Blockchain.Monero,
|
||||
network,
|
||||
);
|
||||
if (isAvailable) {
|
||||
return node;
|
||||
}
|
||||
throw new Error(`Monero node ${node} is not available`);
|
||||
}),
|
||||
);
|
||||
} catch {
|
||||
// If no Monero node is available, use null
|
||||
moneroNode = null;
|
||||
}
|
||||
}
|
||||
const moneroNodeUrl =
|
||||
store.getState().settings.nodes[network][Blockchain.Monero][0] ?? null;
|
||||
|
||||
// Check the state of the Monero node
|
||||
const isMoneroNodeOnline = await getMoneroNodeStatus(moneroNodeUrl, network);
|
||||
|
||||
const moneroNodeConfig =
|
||||
useMoneroRpcPool || moneroNodeUrl == null || !isMoneroNodeOnline
|
||||
? { type: "Pool" as const }
|
||||
: {
|
||||
type: "SingleNode" as const,
|
||||
content: {
|
||||
url: moneroNodeUrl,
|
||||
},
|
||||
};
|
||||
|
||||
// Initialize Tauri settings
|
||||
const tauriSettings: TauriSettings = {
|
||||
electrum_rpc_urls: bitcoinNodes,
|
||||
monero_node_url: moneroNode,
|
||||
monero_node_config: moneroNodeConfig,
|
||||
use_tor: useTor,
|
||||
};
|
||||
|
||||
|
@ -325,13 +318,15 @@ export async function updateAllNodeStatuses() {
|
|||
const network = getNetwork();
|
||||
const settings = store.getState().settings;
|
||||
|
||||
// Only check Monero nodes, skip Bitcoin nodes since we pass all electrum servers
|
||||
// to the backend without checking them (ElectrumBalancer handles failover)
|
||||
await Promise.all(
|
||||
settings.nodes[network][Blockchain.Monero].map((node) =>
|
||||
updateNodeStatus(node, Blockchain.Monero, network),
|
||||
),
|
||||
);
|
||||
// Only check Monero nodes if we're using custom nodes (not RPC pool)
|
||||
// Skip Bitcoin nodes since we pass all electrum servers to the backend without checking them (ElectrumBalancer handles failover)
|
||||
if (!settings.useMoneroRpcPool) {
|
||||
await Promise.all(
|
||||
settings.nodes[network][Blockchain.Monero].map((node) =>
|
||||
updateNodeStatus(node, Blockchain.Monero, network),
|
||||
),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
export async function getMoneroAddresses(): Promise<GetMoneroAddressesResponse> {
|
||||
|
@ -361,3 +356,9 @@ export async function saveLogFiles(
|
|||
): Promise<void> {
|
||||
await invokeUnsafe<void>("save_txt_files", { zipFileName, content });
|
||||
}
|
||||
|
||||
export async function saveFilesInDialog(files: Record<string, string>) {
|
||||
await invokeUnsafe<void>("save_txt_files", {
|
||||
files,
|
||||
});
|
||||
}
|
||||
|
|
|
@ -7,6 +7,7 @@ import torSlice from "./features/torSlice";
|
|||
import settingsSlice from "./features/settingsSlice";
|
||||
import nodesSlice from "./features/nodesSlice";
|
||||
import conversationsSlice from "./features/conversationsSlice";
|
||||
import poolSlice from "./features/poolSlice";
|
||||
|
||||
export const reducers = {
|
||||
swap: swapReducer,
|
||||
|
@ -18,4 +19,5 @@ export const reducers = {
|
|||
settings: settingsSlice,
|
||||
nodes: nodesSlice,
|
||||
conversations: conversationsSlice,
|
||||
pool: poolSlice,
|
||||
};
|
||||
|
|
31
src-gui/src/store/features/poolSlice.ts
Normal file
31
src-gui/src/store/features/poolSlice.ts
Normal file
|
@ -0,0 +1,31 @@
|
|||
import { createSlice, PayloadAction } from "@reduxjs/toolkit";
|
||||
import { PoolStatus } from "models/tauriModel";
|
||||
|
||||
interface PoolSlice {
|
||||
status: PoolStatus | null;
|
||||
isLoading: boolean;
|
||||
}
|
||||
|
||||
const initialState: PoolSlice = {
|
||||
status: null,
|
||||
isLoading: true,
|
||||
};
|
||||
|
||||
export const poolSlice = createSlice({
|
||||
name: "pool",
|
||||
initialState,
|
||||
reducers: {
|
||||
poolStatusReceived(slice, action: PayloadAction<PoolStatus>) {
|
||||
slice.status = action.payload;
|
||||
slice.isLoading = false;
|
||||
},
|
||||
poolStatusReset(slice) {
|
||||
slice.status = null;
|
||||
slice.isLoading = true;
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
export const { poolStatusReceived, poolStatusReset } = poolSlice.actions;
|
||||
|
||||
export default poolSlice.reducer;
|
|
@ -17,6 +17,8 @@ export interface SettingsState {
|
|||
fiatCurrency: FiatCurrency;
|
||||
/// Whether to enable Tor for p2p connections
|
||||
enableTor: boolean;
|
||||
/// Whether to use the Monero RPC pool for load balancing (true) or custom nodes (false)
|
||||
useMoneroRpcPool: boolean;
|
||||
userHasSeenIntroduction: boolean;
|
||||
/// List of rendezvous points
|
||||
rendezvousPoints: string[];
|
||||
|
@ -119,6 +121,7 @@ const initialState: SettingsState = {
|
|||
fetchFiatPrices: false,
|
||||
fiatCurrency: FiatCurrency.Usd,
|
||||
enableTor: true,
|
||||
useMoneroRpcPool: true, // Default to using RPC pool
|
||||
userHasSeenIntroduction: false,
|
||||
rendezvousPoints: DEFAULT_RENDEZVOUS_POINTS,
|
||||
};
|
||||
|
@ -206,6 +209,9 @@ const alertsSlice = createSlice({
|
|||
setTorEnabled(slice, action: PayloadAction<boolean>) {
|
||||
slice.enableTor = action.payload;
|
||||
},
|
||||
setUseMoneroRpcPool(slice, action: PayloadAction<boolean>) {
|
||||
slice.useMoneroRpcPool = action.payload;
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
|
@ -218,6 +224,7 @@ export const {
|
|||
setFetchFiatPrices,
|
||||
setFiatCurrency,
|
||||
setTorEnabled,
|
||||
setUseMoneroRpcPool,
|
||||
setUserHasSeenIntroduction,
|
||||
addRendezvousPoint,
|
||||
removeRendezvousPoint,
|
||||
|
|
|
@ -83,3 +83,24 @@ export function currencySymbol(currency: FiatCurrency): string | null {
|
|||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Formats confirmation count, displaying "?" when the transaction state is unknown
|
||||
* @param confirmations - The number of confirmations, or undefined if unknown
|
||||
* @param maxConfirmations - Optional maximum confirmations to show as "X/Y" format
|
||||
* @returns Formatted string showing confirmations or "?" if unknown
|
||||
*/
|
||||
export function formatConfirmations(
|
||||
confirmations: number | undefined | null,
|
||||
maxConfirmations?: number,
|
||||
): string {
|
||||
if (confirmations === undefined || confirmations === null) {
|
||||
return maxConfirmations !== undefined ? `?/${maxConfirmations}` : "?";
|
||||
}
|
||||
|
||||
if (maxConfirmations !== undefined) {
|
||||
return `${confirmations}/${maxConfirmations}`;
|
||||
}
|
||||
|
||||
return confirmations.toString();
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "unstoppableswap-gui-rs"
|
||||
version = "2.2.0-beta"
|
||||
version = "2.3.0-beta.1"
|
||||
authors = [ "binarybaron", "einliterflasche", "unstoppableswap" ]
|
||||
edition = "2021"
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
@ -16,6 +16,7 @@ tauri-build = { version = "^2.0.0", features = [ "config-json5" ] }
|
|||
|
||||
[dependencies]
|
||||
anyhow = "1"
|
||||
monero-rpc-pool = { path = "../monero-rpc-pool" }
|
||||
rustls = { version = "0.23.26", default-features = false, features = ["ring"] }
|
||||
serde = { version = "1", features = [ "derive" ] }
|
||||
serde_json = "1"
|
||||
|
|
|
@ -17,7 +17,7 @@ use swap::cli::{
|
|||
tauri_bindings::{TauriContextStatusEvent, TauriEmitter, TauriHandle, TauriSettings},
|
||||
Context, ContextBuilder,
|
||||
},
|
||||
command::{Bitcoin, Monero},
|
||||
command::Bitcoin,
|
||||
};
|
||||
use tauri::{async_runtime::RwLock, Manager, RunEvent};
|
||||
use tauri_plugin_dialog::DialogExt;
|
||||
|
@ -141,7 +141,8 @@ fn setup(app: &mut tauri::App) -> Result<(), Box<dyn std::error::Error>> {
|
|||
|
||||
// We need to set a value for the Tauri state right at the start
|
||||
// If we don't do this, Tauri commands will panic at runtime if no value is present
|
||||
app_handle.manage::<RwLock<State>>(RwLock::new(State::new()));
|
||||
let state = RwLock::new(State::new());
|
||||
app_handle.manage::<RwLock<State>>(state);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -192,7 +193,7 @@ pub fn run() {
|
|||
get_data_dir,
|
||||
resolve_approval_request,
|
||||
redact,
|
||||
save_txt_files
|
||||
save_txt_files,
|
||||
])
|
||||
.setup(setup)
|
||||
.build(tauri::generate_context!())
|
||||
|
@ -377,9 +378,7 @@ async fn initialize_context(
|
|||
bitcoin_electrum_rpc_urls: settings.electrum_rpc_urls.clone(),
|
||||
bitcoin_target_block: None,
|
||||
})
|
||||
.with_monero(Monero {
|
||||
monero_node_address: settings.monero_node_url.clone(),
|
||||
})
|
||||
.with_monero(settings.monero_node_config)
|
||||
.with_json(false)
|
||||
.with_debug(true)
|
||||
.with_tor(settings.use_tor)
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"productName": "UnstoppableSwap",
|
||||
"version": "2.2.0-beta",
|
||||
"version": "2.3.0-beta.1",
|
||||
"identifier": "net.unstoppableswap.gui",
|
||||
"build": {
|
||||
"devUrl": "http://localhost:1420",
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "swap"
|
||||
version = "2.2.0-beta"
|
||||
version = "2.3.0-beta.1"
|
||||
authors = ["The COMIT guys <hello@comit.network>"]
|
||||
edition = "2021"
|
||||
description = "XMR/BTC trustless atomic swaps."
|
||||
|
@ -13,7 +13,7 @@ tauri = ["dep:tauri"]
|
|||
|
||||
[dependencies]
|
||||
anyhow = "1"
|
||||
arti-client = { version = "0.25.0", features = ["static-sqlite", "tokio", "rustls"], default-features = false }
|
||||
arti-client = { version = "0.25.0", features = ["static-sqlite", "tokio", "rustls", "onion-service-service"], default-features = false }
|
||||
async-compression = { version = "0.3", features = ["bzip2", "tokio"] }
|
||||
async-trait = "0.1"
|
||||
asynchronous-codec = "0.7.0"
|
||||
|
@ -40,11 +40,13 @@ ed25519-dalek = "1"
|
|||
futures = { version = "0.3", default-features = false, features = ["std"] }
|
||||
hex = "0.4"
|
||||
libp2p = { version = "0.53.2", features = ["tcp", "yamux", "dns", "noise", "request-response", "ping", "rendezvous", "identify", "macros", "cbor", "json", "tokio", "serde", "rsa"] }
|
||||
libp2p-community-tor = { git = "https://github.com/umgefahren/libp2p-tor", branch = "main", features = ["listen-onion-service"] }
|
||||
libp2p-community-tor = { git = "https://github.com/umgefahren/libp2p-tor", rev = "e6b913e0f1ac1fc90b3ee4dd31b5511140c4a9af", features = ["listen-onion-service"] }
|
||||
moka = { version = "0.12", features = ["sync", "future"] }
|
||||
monero = { version = "0.12", features = ["serde_support"] }
|
||||
monero-rpc = { path = "../monero-rpc" }
|
||||
monero-rpc-pool = { path = "../monero-rpc-pool" }
|
||||
monero-sys = { path = "../monero-sys" }
|
||||
electrum-pool = { path = "../electrum-pool" }
|
||||
once_cell = "1.19"
|
||||
pem = "3.0"
|
||||
proptest = "1"
|
||||
|
|
|
@ -242,6 +242,12 @@ pub struct Monero {
|
|||
pub finality_confirmations: Option<u64>,
|
||||
#[serde(with = "crate::monero::network")]
|
||||
pub network: monero::Network,
|
||||
#[serde(default = "default_monero_node_pool")]
|
||||
pub monero_node_pool: bool,
|
||||
}
|
||||
|
||||
fn default_monero_node_pool() -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize)]
|
||||
|
@ -461,6 +467,7 @@ pub fn query_user_for_initial_config(testnet: bool) -> Result<Config> {
|
|||
daemon_url: monero_daemon_url,
|
||||
finality_confirmations: None,
|
||||
network: monero_network,
|
||||
monero_node_pool: false,
|
||||
},
|
||||
tor: TorConf {
|
||||
register_hidden_service,
|
||||
|
@ -511,6 +518,7 @@ mod tests {
|
|||
daemon_url: defaults.monero_daemon_address,
|
||||
finality_confirmations: None,
|
||||
network: monero::Network::Stagenet,
|
||||
monero_node_pool: false,
|
||||
},
|
||||
tor: Default::default(),
|
||||
maker: Maker {
|
||||
|
@ -556,6 +564,7 @@ mod tests {
|
|||
daemon_url: defaults.monero_daemon_address,
|
||||
finality_confirmations: None,
|
||||
network: monero::Network::Mainnet,
|
||||
monero_node_pool: false,
|
||||
},
|
||||
tor: Default::default(),
|
||||
maker: Maker {
|
||||
|
@ -611,6 +620,7 @@ mod tests {
|
|||
daemon_url: defaults.monero_daemon_address,
|
||||
finality_confirmations: None,
|
||||
network: monero::Network::Mainnet,
|
||||
monero_node_pool: false,
|
||||
},
|
||||
tor: Default::default(),
|
||||
maker: Maker {
|
||||
|
|
|
@ -44,6 +44,28 @@ use uuid::Uuid;
|
|||
|
||||
const DEFAULT_WALLET_NAME: &str = "asb-wallet";
|
||||
|
||||
trait IntoDaemon {
|
||||
fn into_daemon(self) -> Result<Daemon>;
|
||||
}
|
||||
|
||||
impl IntoDaemon for url::Url {
|
||||
fn into_daemon(self) -> Result<Daemon> {
|
||||
let address = self.to_string();
|
||||
let ssl = self.scheme() == "https";
|
||||
|
||||
Ok(Daemon { address, ssl })
|
||||
}
|
||||
}
|
||||
|
||||
impl IntoDaemon for monero_rpc_pool::ServerInfo {
|
||||
fn into_daemon(self) -> Result<Daemon> {
|
||||
let address = format!("http://{}:{}", self.host, self.port);
|
||||
let ssl = false; // Pool server always uses HTTP locally
|
||||
|
||||
Ok(Daemon { address, ssl })
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
pub async fn main() -> Result<()> {
|
||||
rustls::crypto::ring::default_provider()
|
||||
|
@ -457,9 +479,39 @@ async fn init_monero_wallet(
|
|||
) -> Result<Arc<monero::Wallets>> {
|
||||
tracing::debug!("Initializing Monero wallets");
|
||||
|
||||
let daemon = Daemon {
|
||||
address: config.monero.daemon_url.to_string(),
|
||||
ssl: config.monero.daemon_url.as_str().contains("https"),
|
||||
let daemon = if config.monero.monero_node_pool {
|
||||
// Start the monero-rpc-pool and use it
|
||||
tracing::info!("Starting Monero RPC Pool for ASB");
|
||||
|
||||
let (server_info, _status_receiver, _pool_handle) =
|
||||
monero_rpc_pool::start_server_with_random_port(
|
||||
monero_rpc_pool::config::Config::new_random_port(
|
||||
"127.0.0.1".to_string(),
|
||||
config.data.dir.join("monero-rpc-pool"),
|
||||
),
|
||||
env_config.monero_network,
|
||||
)
|
||||
.await
|
||||
.context("Failed to start Monero RPC Pool for ASB")?;
|
||||
|
||||
let pool_url = format!("http://{}:{}", server_info.host, server_info.port);
|
||||
tracing::info!("Monero RPC Pool started for ASB on {}", pool_url);
|
||||
|
||||
server_info
|
||||
.into_daemon()
|
||||
.context("Failed to convert ServerInfo to Daemon")?
|
||||
} else {
|
||||
tracing::info!(
|
||||
"Using direct Monero daemon connection: {}",
|
||||
config.monero.daemon_url
|
||||
);
|
||||
|
||||
config
|
||||
.monero
|
||||
.daemon_url
|
||||
.clone()
|
||||
.into_daemon()
|
||||
.context("Failed to convert daemon URL to Daemon")?
|
||||
};
|
||||
|
||||
let manager = monero::Wallets::new(
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
pub mod electrum_balancer;
|
||||
pub mod wallet;
|
||||
|
||||
mod cancel;
|
||||
|
@ -458,7 +457,7 @@ impl From<RpcErrorCode> for i64 {
|
|||
|
||||
pub fn parse_rpc_error_code(error: &anyhow::Error) -> anyhow::Result<i64> {
|
||||
// First try to extract an Electrum error from a MultiError if present
|
||||
if let Some(multi_error) = error.downcast_ref::<crate::bitcoin::electrum_balancer::MultiError>()
|
||||
if let Some(multi_error) = error.downcast_ref::<electrum_pool::MultiError>()
|
||||
{
|
||||
// Try to find the first Electrum error in the MultiError
|
||||
for single_error in multi_error.iter() {
|
||||
|
|
|
@ -41,8 +41,9 @@ use tracing::{debug_span, Instrument};
|
|||
|
||||
use super::bitcoin_address::revalidate_network;
|
||||
use super::BlockHeight;
|
||||
use crate::bitcoin::electrum_balancer::ElectrumBalancer;
|
||||
use electrum_pool::ElectrumBalancer;
|
||||
use derive_builder::Builder;
|
||||
use moka;
|
||||
|
||||
/// We allow transaction fees of up to 20% of the transferred amount to ensure
|
||||
/// that lock transactions can always be published, even when fees are high.
|
||||
|
@ -66,8 +67,10 @@ pub struct Wallet<Persister = Connection, C = Client> {
|
|||
persister: Arc<TokioMutex<Persister>>,
|
||||
/// The electrum client.
|
||||
electrum_client: Arc<TokioMutex<C>>,
|
||||
/// The mempool client.
|
||||
mempool_client: Arc<Option<mempool_client::MempoolClient>>,
|
||||
/// The cached fee estimator for the electrum client.
|
||||
cached_electrum_fee_estimator: Arc<CachedFeeEstimator<C>>,
|
||||
/// The cached fee estimator for the mempool client.
|
||||
cached_mempool_fee_estimator: Arc<Option<CachedFeeEstimator<mempool_client::MempoolClient>>>,
|
||||
/// The network this wallet is on.
|
||||
network: Network,
|
||||
/// The number of confirmations (blocks) we require for a transaction
|
||||
|
@ -83,6 +86,7 @@ pub struct Wallet<Persister = Connection, C = Client> {
|
|||
}
|
||||
|
||||
/// This is our wrapper around a bdk electrum client.
|
||||
#[derive(Clone)]
|
||||
pub struct Client {
|
||||
/// The underlying electrum balancer for load balancing across multiple servers.
|
||||
inner: Arc<ElectrumBalancer>,
|
||||
|
@ -130,7 +134,7 @@ impl WalletBuilder {
|
|||
/// Asynchronously builds the `Wallet<Connection>` using the configured parameters.
|
||||
/// This method contains the core logic for wallet initialization, including
|
||||
/// database setup, key derivation, and potential migration from older wallet formats.
|
||||
pub async fn build(self) -> Result<Wallet<Connection>> {
|
||||
pub async fn build(self) -> Result<Wallet<Connection, Client>> {
|
||||
let config = self
|
||||
.validate_config()
|
||||
.map_err(|e| anyhow!("Builder validation failed: {e}"))?;
|
||||
|
@ -293,6 +297,83 @@ pub trait EstimateFeeRate {
|
|||
fn min_relay_fee(&self) -> impl std::future::Future<Output = Result<FeeRate>> + Send;
|
||||
}
|
||||
|
||||
/// A caching wrapper around EstimateFeeRate implementations.
|
||||
///
|
||||
/// Uses Moka cache with TTL (Time To Live) expiration for both fee rate estimates
|
||||
/// and minimum relay fees to reduce the frequency of network calls to Electrum and mempool.space APIs.
|
||||
#[derive(Clone)]
|
||||
pub struct CachedFeeEstimator<T> {
|
||||
inner: T,
|
||||
fee_cache: Arc<moka::future::Cache<u32, FeeRate>>,
|
||||
min_relay_cache: Arc<moka::future::Cache<(), FeeRate>>,
|
||||
}
|
||||
|
||||
impl<T> CachedFeeEstimator<T> {
|
||||
/// Cache duration for fee estimates (2 minutes)
|
||||
const CACHE_DURATION: Duration = Duration::from_secs(120);
|
||||
/// Maximum number of cached fee rate entries (different target blocks)
|
||||
const MAX_CACHE_SIZE: u64 = 10;
|
||||
|
||||
/// Create a new caching wrapper around an EstimateFeeRate implementation.
|
||||
pub fn new(inner: T) -> Self {
|
||||
Self {
|
||||
inner,
|
||||
fee_cache: Arc::new(
|
||||
moka::future::Cache::builder()
|
||||
.max_capacity(Self::MAX_CACHE_SIZE)
|
||||
.time_to_live(Self::CACHE_DURATION)
|
||||
.build(),
|
||||
),
|
||||
min_relay_cache: Arc::new(
|
||||
moka::future::Cache::builder()
|
||||
.max_capacity(1) // Only one min relay fee value
|
||||
.time_to_live(Self::CACHE_DURATION)
|
||||
.build(),
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: EstimateFeeRate + Send + Sync> EstimateFeeRate for CachedFeeEstimator<T> {
|
||||
async fn estimate_feerate(&self, target_block: u32) -> Result<FeeRate> {
|
||||
// Check cache first
|
||||
if let Some(cached_rate) = self.fee_cache.get(&target_block).await {
|
||||
return Ok(cached_rate);
|
||||
}
|
||||
|
||||
// If not in cache, fetch from underlying estimator
|
||||
let fee_rate = self.inner.estimate_feerate(target_block).await?;
|
||||
|
||||
// Store in cache
|
||||
self.fee_cache.insert(target_block, fee_rate).await;
|
||||
|
||||
Ok(fee_rate)
|
||||
}
|
||||
|
||||
async fn min_relay_fee(&self) -> Result<FeeRate> {
|
||||
// Check cache first
|
||||
if let Some(cached_rate) = self.min_relay_cache.get(&()).await {
|
||||
return Ok(cached_rate);
|
||||
}
|
||||
|
||||
// If not in cache, fetch from underlying estimator
|
||||
let min_relay_fee = self.inner.min_relay_fee().await?;
|
||||
|
||||
// Store in cache
|
||||
self.min_relay_cache.insert((), min_relay_fee).await;
|
||||
|
||||
Ok(min_relay_fee)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> std::ops::Deref for CachedFeeEstimator<T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.inner
|
||||
}
|
||||
}
|
||||
|
||||
impl Wallet {
|
||||
/// If this many consequent addresses are unused, we stop the full scan.
|
||||
/// On old wallets we used to generate a ton of unused addresses
|
||||
|
@ -362,7 +443,7 @@ impl Wallet {
|
|||
sync_interval: Duration,
|
||||
env_config: crate::env::Config,
|
||||
tauri_handle: Option<TauriHandle>,
|
||||
) -> Result<Wallet<bdk_wallet::rusqlite::Connection>> {
|
||||
) -> Result<Wallet<bdk_wallet::rusqlite::Connection, Client>> {
|
||||
// Construct the private key, directory and wallet file for the new (>= 1.0.0) bdk wallet
|
||||
let xprivkey = seed.derive_extended_private_key(env_config.bitcoin_network)?;
|
||||
let wallet_dir = data_dir
|
||||
|
@ -425,7 +506,7 @@ impl Wallet {
|
|||
target_block: u32,
|
||||
sync_interval: Duration,
|
||||
tauri_handle: Option<TauriHandle>,
|
||||
) -> Result<Wallet<bdk_wallet::rusqlite::Connection>> {
|
||||
) -> Result<Wallet<bdk_wallet::rusqlite::Connection, Client>> {
|
||||
Self::create_new(
|
||||
seed.derive_extended_private_key(network)?,
|
||||
network,
|
||||
|
@ -458,7 +539,7 @@ impl Wallet {
|
|||
old_wallet: Option<pre_1_0_0_bdk::Export>,
|
||||
tauri_handle: Option<TauriHandle>,
|
||||
use_mempool_space_fee_estimation: bool,
|
||||
) -> Result<Wallet<Persister>>
|
||||
) -> Result<Wallet<Persister, Client>>
|
||||
where
|
||||
Persister: WalletPersister + Sized,
|
||||
<Persister as WalletPersister>::Error: std::error::Error + Send + Sync + 'static,
|
||||
|
@ -550,10 +631,16 @@ impl Wallet {
|
|||
None
|
||||
};
|
||||
|
||||
// Create cached fee estimators
|
||||
let cached_electrum_fee_estimator = Arc::new(CachedFeeEstimator::new(client.clone()));
|
||||
let cached_mempool_fee_estimator =
|
||||
Arc::new(mempool_client.clone().map(CachedFeeEstimator::new));
|
||||
|
||||
Ok(Wallet {
|
||||
wallet: wallet.into_arc_mutex_async(),
|
||||
electrum_client: client.into_arc_mutex_async(),
|
||||
mempool_client: Arc::new(mempool_client),
|
||||
cached_electrum_fee_estimator,
|
||||
cached_mempool_fee_estimator,
|
||||
persister: persister.into_arc_mutex_async(),
|
||||
tauri_handle,
|
||||
network,
|
||||
|
@ -573,7 +660,7 @@ impl Wallet {
|
|||
target_block: u32,
|
||||
tauri_handle: Option<TauriHandle>,
|
||||
use_mempool_space_fee_estimation: bool,
|
||||
) -> Result<Wallet<Persister>>
|
||||
) -> Result<Wallet<Persister, Client>>
|
||||
where
|
||||
Persister: WalletPersister + Sized,
|
||||
<Persister as WalletPersister>::Error: std::error::Error + Send + Sync + 'static,
|
||||
|
@ -596,19 +683,23 @@ impl Wallet {
|
|||
.context("Failed to open database")?
|
||||
.context("No wallet found in database")?;
|
||||
|
||||
// Create the mempool client
|
||||
let mempool_client = if use_mempool_space_fee_estimation {
|
||||
// Create the mempool client with caching
|
||||
let cached_mempool_fee_estimator = if use_mempool_space_fee_estimation {
|
||||
mempool_client::MempoolClient::new(network).inspect_err(|e| {
|
||||
tracing::warn!("Failed to create mempool client: {:?}. We will only use the Electrum server for fee estimation.", e);
|
||||
}).ok()
|
||||
}).ok().map(CachedFeeEstimator::new)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Wrap the electrum client with caching
|
||||
let cached_electrum_fee_estimator = Arc::new(CachedFeeEstimator::new(client.clone()));
|
||||
|
||||
let wallet = Wallet {
|
||||
wallet: wallet.into_arc_mutex_async(),
|
||||
electrum_client: client.into_arc_mutex_async(),
|
||||
mempool_client: Arc::new(mempool_client),
|
||||
cached_electrum_fee_estimator,
|
||||
cached_mempool_fee_estimator: Arc::new(cached_mempool_fee_estimator),
|
||||
persister: persister.into_arc_mutex_async(),
|
||||
tauri_handle,
|
||||
network,
|
||||
|
@ -663,7 +754,7 @@ impl Wallet {
|
|||
kind, txid, total_count
|
||||
);
|
||||
|
||||
let multi_error = crate::bitcoin::electrum_balancer::MultiError::new(errors, context);
|
||||
let multi_error = electrum_pool::MultiError::new(errors, context);
|
||||
return Err(anyhow::Error::from(multi_error));
|
||||
}
|
||||
|
||||
|
@ -1095,10 +1186,11 @@ where
|
|||
/// If either of the clients fail but the other is successful, we use the successful one.
|
||||
/// If both clients fail, we return an error
|
||||
async fn combined_fee_rate(&self) -> Result<FeeRate> {
|
||||
let electrum_client = self.electrum_client.lock().await;
|
||||
let electrum_future = electrum_client.estimate_feerate(self.target_block);
|
||||
let electrum_future = self
|
||||
.cached_electrum_fee_estimator
|
||||
.estimate_feerate(self.target_block);
|
||||
let mempool_future = async {
|
||||
match self.mempool_client.as_ref() {
|
||||
match self.cached_mempool_fee_estimator.as_ref() {
|
||||
Some(mempool_client) => mempool_client
|
||||
.estimate_feerate(self.target_block)
|
||||
.await
|
||||
|
@ -1174,10 +1266,9 @@ where
|
|||
///
|
||||
/// Only fails if both sources fail. Always chooses the higher value.
|
||||
async fn combined_min_relay_fee(&self) -> Result<FeeRate> {
|
||||
let electrum_client = self.electrum_client.lock().await;
|
||||
let electrum_future = electrum_client.min_relay_fee();
|
||||
let electrum_future = self.cached_electrum_fee_estimator.min_relay_fee();
|
||||
let mempool_future = async {
|
||||
match self.mempool_client.as_ref() {
|
||||
match self.cached_mempool_fee_estimator.as_ref() {
|
||||
Some(mempool_client) => mempool_client.min_relay_fee().await.map(Some),
|
||||
None => Ok(None),
|
||||
}
|
||||
|
@ -2455,6 +2546,7 @@ mod mempool_client {
|
|||
/// A client for the mempool.space API.
|
||||
///
|
||||
/// This client is used to estimate the fee rate for a transaction.
|
||||
#[derive(Clone)]
|
||||
pub struct MempoolClient {
|
||||
client: reqwest::Client,
|
||||
base_url: String,
|
||||
|
@ -2751,6 +2843,7 @@ impl<T> IntoArcMutex<T> for T {
|
|||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[derive(Clone)]
|
||||
pub struct StaticFeeRate {
|
||||
fee_rate: FeeRate,
|
||||
min_relay_fee: bitcoin::Amount,
|
||||
|
@ -2856,10 +2949,13 @@ impl TestWalletBuilder {
|
|||
bitcoin::Amount::from_sat(self.min_relay_sats_per_vb),
|
||||
);
|
||||
|
||||
let cached_electrum_fee_estimator = Arc::new(CachedFeeEstimator::new(client.clone()));
|
||||
|
||||
let wallet = Wallet {
|
||||
wallet: bdk_core_wallet.into_arc_mutex_async(),
|
||||
electrum_client: client.into_arc_mutex_async(),
|
||||
mempool_client: Arc::new(None), // We don't use mempool client in tests
|
||||
cached_electrum_fee_estimator,
|
||||
cached_mempool_fee_estimator: Arc::new(None), // We don't use mempool client in tests
|
||||
persister: persister.into_arc_mutex_async(),
|
||||
tauri_handle: None,
|
||||
network: Network::Regtest,
|
||||
|
@ -3309,6 +3405,264 @@ TRACE swap::bitcoin::wallet: Bitcoin transaction status changed txid=00000000000
|
|||
});
|
||||
}
|
||||
}
|
||||
|
||||
mod cached_fee_estimator_tests {
|
||||
use super::*;
|
||||
use std::sync::atomic::{AtomicU32, Ordering};
|
||||
use std::sync::Arc;
|
||||
use tokio::time::{sleep, Duration};
|
||||
|
||||
/// Mock fee estimator that tracks how many times methods are called
|
||||
#[derive(Clone)]
|
||||
struct MockFeeEstimator {
|
||||
estimate_calls: Arc<AtomicU32>,
|
||||
min_relay_calls: Arc<AtomicU32>,
|
||||
fee_rate: FeeRate,
|
||||
min_relay_fee: FeeRate,
|
||||
delay: Duration,
|
||||
}
|
||||
|
||||
impl MockFeeEstimator {
|
||||
fn new(fee_rate: FeeRate, min_relay_fee: FeeRate) -> Self {
|
||||
Self {
|
||||
estimate_calls: Arc::new(AtomicU32::new(0)),
|
||||
min_relay_calls: Arc::new(AtomicU32::new(0)),
|
||||
fee_rate,
|
||||
min_relay_fee,
|
||||
delay: Duration::from_millis(0),
|
||||
}
|
||||
}
|
||||
|
||||
fn with_delay(mut self, delay: Duration) -> Self {
|
||||
self.delay = delay;
|
||||
self
|
||||
}
|
||||
|
||||
fn estimate_call_count(&self) -> u32 {
|
||||
self.estimate_calls.load(Ordering::SeqCst)
|
||||
}
|
||||
|
||||
fn min_relay_call_count(&self) -> u32 {
|
||||
self.min_relay_calls.load(Ordering::SeqCst)
|
||||
}
|
||||
}
|
||||
|
||||
impl EstimateFeeRate for MockFeeEstimator {
|
||||
async fn estimate_feerate(&self, _target_block: u32) -> Result<FeeRate> {
|
||||
self.estimate_calls.fetch_add(1, Ordering::SeqCst);
|
||||
if !self.delay.is_zero() {
|
||||
sleep(self.delay).await;
|
||||
}
|
||||
Ok(self.fee_rate)
|
||||
}
|
||||
|
||||
async fn min_relay_fee(&self) -> Result<FeeRate> {
|
||||
self.min_relay_calls.fetch_add(1, Ordering::SeqCst);
|
||||
if !self.delay.is_zero() {
|
||||
sleep(self.delay).await;
|
||||
}
|
||||
Ok(self.min_relay_fee)
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn caches_fee_rate_estimates() {
|
||||
let mock = MockFeeEstimator::new(
|
||||
FeeRate::from_sat_per_vb(50).unwrap(),
|
||||
FeeRate::from_sat_per_vb(1).unwrap(),
|
||||
);
|
||||
let cached = CachedFeeEstimator::new(mock.clone());
|
||||
|
||||
// First call should hit the underlying estimator
|
||||
let fee1 = cached.estimate_feerate(6).await.unwrap();
|
||||
assert_eq!(fee1, FeeRate::from_sat_per_vb(50).unwrap());
|
||||
assert_eq!(mock.estimate_call_count(), 1);
|
||||
|
||||
// Second call with same target should use cache
|
||||
let fee2 = cached.estimate_feerate(6).await.unwrap();
|
||||
assert_eq!(fee2, FeeRate::from_sat_per_vb(50).unwrap());
|
||||
assert_eq!(mock.estimate_call_count(), 1); // Still 1, not 2
|
||||
|
||||
// Different target should hit the underlying estimator again
|
||||
let fee3 = cached.estimate_feerate(12).await.unwrap();
|
||||
assert_eq!(fee3, FeeRate::from_sat_per_vb(50).unwrap());
|
||||
assert_eq!(mock.estimate_call_count(), 2);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn caches_min_relay_fee() {
|
||||
let mock = MockFeeEstimator::new(
|
||||
FeeRate::from_sat_per_vb(50).unwrap(),
|
||||
FeeRate::from_sat_per_vb(1).unwrap(),
|
||||
);
|
||||
let cached = CachedFeeEstimator::new(mock.clone());
|
||||
|
||||
// First call should hit the underlying estimator
|
||||
let fee1 = cached.min_relay_fee().await.unwrap();
|
||||
assert_eq!(fee1, FeeRate::from_sat_per_vb(1).unwrap());
|
||||
assert_eq!(mock.min_relay_call_count(), 1);
|
||||
|
||||
// Second call should use cache
|
||||
let fee2 = cached.min_relay_fee().await.unwrap();
|
||||
assert_eq!(fee2, FeeRate::from_sat_per_vb(1).unwrap());
|
||||
assert_eq!(mock.min_relay_call_count(), 1); // Still 1, not 2
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn concurrent_requests_dont_duplicate_calls() {
|
||||
let mock = MockFeeEstimator::new(
|
||||
FeeRate::from_sat_per_vb(25).unwrap(),
|
||||
FeeRate::from_sat_per_vb(1).unwrap(),
|
||||
)
|
||||
.with_delay(Duration::from_millis(50)); // Add delay to simulate network call
|
||||
|
||||
let cached = CachedFeeEstimator::new(mock.clone());
|
||||
|
||||
// First, make one call to populate the cache
|
||||
let _initial = cached.estimate_feerate(6).await.unwrap();
|
||||
assert_eq!(mock.estimate_call_count(), 1);
|
||||
|
||||
// Now make multiple concurrent requests for the same target
|
||||
// These should all hit the cache
|
||||
let handles: Vec<_> = (0..5)
|
||||
.map(|_| {
|
||||
let cached = cached.clone();
|
||||
tokio::spawn(async move { cached.estimate_feerate(6).await })
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Wait for all requests to complete
|
||||
let results: Vec<_> = futures::future::join_all(handles).await;
|
||||
|
||||
// All should succeed with the same value
|
||||
for result in results {
|
||||
let fee = result.unwrap().unwrap();
|
||||
assert_eq!(fee, FeeRate::from_sat_per_vb(25).unwrap());
|
||||
}
|
||||
|
||||
// The underlying estimator should still only have been called once
|
||||
// since all subsequent requests should hit the cache
|
||||
assert_eq!(
|
||||
mock.estimate_call_count(),
|
||||
1,
|
||||
"Expected exactly 1 call, got {}",
|
||||
mock.estimate_call_count()
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn different_target_blocks_cached_separately() {
|
||||
let mock = MockFeeEstimator::new(
|
||||
FeeRate::from_sat_per_vb(30).unwrap(),
|
||||
FeeRate::from_sat_per_vb(1).unwrap(),
|
||||
);
|
||||
let cached = CachedFeeEstimator::new(mock.clone());
|
||||
|
||||
// Request different target blocks
|
||||
let _fee1 = cached.estimate_feerate(1).await.unwrap();
|
||||
let _fee2 = cached.estimate_feerate(6).await.unwrap();
|
||||
let _fee3 = cached.estimate_feerate(12).await.unwrap();
|
||||
|
||||
assert_eq!(mock.estimate_call_count(), 3);
|
||||
|
||||
// Request same targets again - should use cache
|
||||
let _fee1_cached = cached.estimate_feerate(1).await.unwrap();
|
||||
let _fee2_cached = cached.estimate_feerate(6).await.unwrap();
|
||||
let _fee3_cached = cached.estimate_feerate(12).await.unwrap();
|
||||
|
||||
assert_eq!(mock.estimate_call_count(), 3); // Still 3, no additional calls
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn cache_respects_ttl() {
|
||||
let mock = MockFeeEstimator::new(
|
||||
FeeRate::from_sat_per_vb(40).unwrap(),
|
||||
FeeRate::from_sat_per_vb(1).unwrap(),
|
||||
);
|
||||
let cached = CachedFeeEstimator::new(mock.clone());
|
||||
|
||||
// First call
|
||||
let _fee1 = cached.estimate_feerate(6).await.unwrap();
|
||||
assert_eq!(mock.estimate_call_count(), 1);
|
||||
|
||||
// Wait for cache to expire (2 minutes + small buffer)
|
||||
// Note: In a real test environment, you might want to use a shorter TTL
|
||||
// or mock the time. For now, we'll just verify the cache works within TTL.
|
||||
|
||||
// Immediate second call should use cache
|
||||
let _fee2 = cached.estimate_feerate(6).await.unwrap();
|
||||
assert_eq!(mock.estimate_call_count(), 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn error_propagation() {
|
||||
#[derive(Clone)]
|
||||
struct FailingEstimator;
|
||||
|
||||
impl EstimateFeeRate for FailingEstimator {
|
||||
async fn estimate_feerate(&self, _target_block: u32) -> Result<FeeRate> {
|
||||
Err(anyhow::anyhow!("Network error"))
|
||||
}
|
||||
|
||||
async fn min_relay_fee(&self) -> Result<FeeRate> {
|
||||
Err(anyhow::anyhow!("Network error"))
|
||||
}
|
||||
}
|
||||
|
||||
let cached = CachedFeeEstimator::new(FailingEstimator);
|
||||
|
||||
// Errors should be propagated, not cached
|
||||
let result1 = cached.estimate_feerate(6).await;
|
||||
assert!(result1.is_err());
|
||||
assert!(result1.unwrap_err().to_string().contains("Network error"));
|
||||
|
||||
let result2 = cached.min_relay_fee().await;
|
||||
assert!(result2.is_err());
|
||||
assert!(result2.unwrap_err().to_string().contains("Network error"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn cache_capacity_limits() {
|
||||
let mock = MockFeeEstimator::new(
|
||||
FeeRate::from_sat_per_vb(35).unwrap(),
|
||||
FeeRate::from_sat_per_vb(1).unwrap(),
|
||||
);
|
||||
let cached = CachedFeeEstimator::new(mock.clone());
|
||||
|
||||
// Fill cache beyond capacity (MAX_CACHE_SIZE = 10)
|
||||
for target in 1..=15 {
|
||||
let _fee = cached.estimate_feerate(target).await.unwrap();
|
||||
}
|
||||
|
||||
assert_eq!(mock.estimate_call_count(), 15);
|
||||
|
||||
// Request some of the earlier targets - some might have been evicted
|
||||
// Due to LRU eviction, the earliest entries might be gone
|
||||
let _fee = cached.estimate_feerate(1).await.unwrap();
|
||||
|
||||
// The exact behavior depends on Moka's eviction policy,
|
||||
// but we should see that the cache is working within its limits
|
||||
assert!(mock.estimate_call_count() >= 15);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn clone_shares_cache() {
|
||||
let mock = MockFeeEstimator::new(
|
||||
FeeRate::from_sat_per_vb(45).unwrap(),
|
||||
FeeRate::from_sat_per_vb(1).unwrap(),
|
||||
);
|
||||
let cached1 = CachedFeeEstimator::new(mock.clone());
|
||||
let cached2 = cached1.clone();
|
||||
|
||||
// First estimator makes a call
|
||||
let _fee1 = cached1.estimate_feerate(6).await.unwrap();
|
||||
assert_eq!(mock.estimate_call_count(), 1);
|
||||
|
||||
// Second estimator should use the shared cache
|
||||
let _fee2 = cached2.estimate_feerate(6).await.unwrap();
|
||||
assert_eq!(mock.estimate_call_count(), 1); // Still 1, cache was shared
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
|
|
|
@ -20,7 +20,9 @@ use std::fmt;
|
|||
use std::future::Future;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::{Arc, Once};
|
||||
use tauri_bindings::{TauriBackgroundProgress, TauriContextStatusEvent, TauriEmitter, TauriHandle};
|
||||
use tauri_bindings::{
|
||||
MoneroNodeConfig, TauriBackgroundProgress, TauriContextStatusEvent, TauriEmitter, TauriHandle,
|
||||
};
|
||||
use tokio::sync::{broadcast, broadcast::Sender, Mutex as TokioMutex, RwLock};
|
||||
use tokio::task::JoinHandle;
|
||||
use tor_rtcompat::tokio::TokioRustlsRuntime;
|
||||
|
@ -188,12 +190,13 @@ pub struct Context {
|
|||
bitcoin_wallet: Option<Arc<bitcoin::Wallet>>,
|
||||
monero_manager: Option<Arc<monero::Wallets>>,
|
||||
tor_client: Option<Arc<TorClient<TokioRustlsRuntime>>>,
|
||||
monero_rpc_pool_handle: Option<Arc<monero_rpc_pool::PoolHandle>>,
|
||||
}
|
||||
|
||||
/// A conveniant builder struct for [`Context`].
|
||||
#[must_use = "ContextBuilder must be built to be useful"]
|
||||
pub struct ContextBuilder {
|
||||
monero: Option<Monero>,
|
||||
monero_config: Option<MoneroNodeConfig>,
|
||||
bitcoin: Option<Bitcoin>,
|
||||
data: Option<PathBuf>,
|
||||
is_testnet: bool,
|
||||
|
@ -216,7 +219,7 @@ impl ContextBuilder {
|
|||
/// Basic builder with default options for mainnet
|
||||
pub fn mainnet() -> Self {
|
||||
ContextBuilder {
|
||||
monero: None,
|
||||
monero_config: None,
|
||||
bitcoin: None,
|
||||
data: None,
|
||||
is_testnet: false,
|
||||
|
@ -235,8 +238,8 @@ impl ContextBuilder {
|
|||
}
|
||||
|
||||
/// Configures the Context to initialize a Monero wallet with the given configuration.
|
||||
pub fn with_monero(mut self, monero: impl Into<Option<Monero>>) -> Self {
|
||||
self.monero = monero.into();
|
||||
pub fn with_monero(mut self, monero_config: impl Into<Option<MoneroNodeConfig>>) -> Self {
|
||||
self.monero_config = monero_config.into();
|
||||
self
|
||||
}
|
||||
|
||||
|
@ -247,8 +250,8 @@ impl ContextBuilder {
|
|||
}
|
||||
|
||||
/// Attach a handle to Tauri to the Context for emitting events etc.
|
||||
pub fn with_tauri(mut self, tauri: impl Into<Option<TauriHandle>>) -> Self {
|
||||
self.tauri_handle = tauri.into();
|
||||
pub fn with_tauri(mut self, tauri_handle: impl Into<Option<TauriHandle>>) -> Self {
|
||||
self.tauri_handle = tauri_handle.into();
|
||||
self
|
||||
}
|
||||
|
||||
|
@ -364,17 +367,61 @@ impl ContextBuilder {
|
|||
};
|
||||
|
||||
let initialize_monero_wallet = async {
|
||||
match self.monero {
|
||||
Some(monero) => {
|
||||
match self.monero_config {
|
||||
Some(monero_config) => {
|
||||
let monero_progress_handle = tauri_handle
|
||||
.new_background_process_with_initial_progress(
|
||||
TauriBackgroundProgress::OpeningMoneroWallet,
|
||||
(),
|
||||
);
|
||||
|
||||
// Handle the different monero configurations
|
||||
let (monero_node_address, rpc_pool_handle) = match monero_config {
|
||||
MoneroNodeConfig::Pool => {
|
||||
// Start RPC pool and use it
|
||||
match monero_rpc_pool::start_server_with_random_port(
|
||||
monero_rpc_pool::config::Config::new_random_port(
|
||||
"127.0.0.1".to_string(),
|
||||
data_dir.join("monero-rpc-pool"),
|
||||
),
|
||||
match self.is_testnet {
|
||||
true => crate::monero::Network::Stagenet,
|
||||
false => crate::monero::Network::Mainnet,
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok((server_info, mut status_receiver, pool_handle)) => {
|
||||
let rpc_url =
|
||||
format!("http://{}:{}", server_info.host, server_info.port);
|
||||
tracing::info!("Monero RPC Pool started on {}", rpc_url);
|
||||
|
||||
// Start listening for pool status updates and forward them to frontend
|
||||
if let Some(ref handle) = self.tauri_handle {
|
||||
let pool_tauri_handle = handle.clone();
|
||||
tokio::spawn(async move {
|
||||
while let Ok(status) = status_receiver.recv().await {
|
||||
pool_tauri_handle.emit_pool_status_update(status);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
(Some(rpc_url), Some(Arc::new(pool_handle)))
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!("Failed to start Monero RPC Pool: {}", e);
|
||||
(None, None)
|
||||
}
|
||||
}
|
||||
}
|
||||
MoneroNodeConfig::SingleNode { url } => {
|
||||
(if url.is_empty() { None } else { Some(url) }, None)
|
||||
}
|
||||
};
|
||||
|
||||
let wallets = init_monero_wallet(
|
||||
data_dir.as_path(),
|
||||
monero.monero_node_address.map(|url| url.to_string()),
|
||||
monero_node_address,
|
||||
env_config,
|
||||
tauri_handle.clone(),
|
||||
)
|
||||
|
@ -382,9 +429,9 @@ impl ContextBuilder {
|
|||
|
||||
monero_progress_handle.finish();
|
||||
|
||||
Ok(Some(wallets))
|
||||
Ok((Some(wallets), rpc_pool_handle))
|
||||
}
|
||||
None => Ok(None),
|
||||
None => Ok((None, None)),
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -405,7 +452,7 @@ impl ContextBuilder {
|
|||
Ok(maybe_tor_client)
|
||||
};
|
||||
|
||||
let (bitcoin_wallet, monero_manager, tor) = tokio::try_join!(
|
||||
let (bitcoin_wallet, (monero_manager, monero_rpc_pool_handle), tor) = tokio::try_join!(
|
||||
initialize_bitcoin_wallet,
|
||||
initialize_monero_wallet,
|
||||
initialize_tor_client,
|
||||
|
@ -443,6 +490,7 @@ impl ContextBuilder {
|
|||
tasks,
|
||||
tauri_handle: self.tauri_handle,
|
||||
tor_client: tor,
|
||||
monero_rpc_pool_handle,
|
||||
};
|
||||
|
||||
Ok(context)
|
||||
|
@ -476,6 +524,7 @@ impl Context {
|
|||
tasks: PendingTaskList::default().into(),
|
||||
tauri_handle: None,
|
||||
tor_client: None,
|
||||
monero_rpc_pool_handle: None,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -507,7 +556,7 @@ async fn init_bitcoin_wallet(
|
|||
env_config: EnvConfig,
|
||||
bitcoin_target_block: u16,
|
||||
tauri_handle_option: Option<TauriHandle>,
|
||||
) -> Result<bitcoin::Wallet> {
|
||||
) -> Result<bitcoin::Wallet<bdk_wallet::rusqlite::Connection, bitcoin::wallet::Client>> {
|
||||
let mut builder = bitcoin::wallet::WalletBuilder::default()
|
||||
.seed(seed.clone())
|
||||
.network(env_config.bitcoin_network)
|
||||
|
@ -637,6 +686,23 @@ impl Config {
|
|||
}
|
||||
}
|
||||
|
||||
impl From<Monero> for MoneroNodeConfig {
|
||||
fn from(monero: Monero) -> Self {
|
||||
match monero.monero_node_address {
|
||||
Some(url) => MoneroNodeConfig::SingleNode {
|
||||
url: url.to_string(),
|
||||
},
|
||||
None => MoneroNodeConfig::Pool,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Monero> for Option<MoneroNodeConfig> {
|
||||
fn from(monero: Monero) -> Self {
|
||||
Some(MoneroNodeConfig::from(monero))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod api_test {
|
||||
use super::*;
|
||||
|
|
|
@ -1195,10 +1195,27 @@ pub async fn monero_recovery(
|
|||
#[tracing::instrument(fields(method = "get_current_swap"), skip(context))]
|
||||
pub async fn get_current_swap(context: Arc<Context>) -> Result<serde_json::Value> {
|
||||
Ok(json!({
|
||||
"swap_id": context.swap_lock.get_current_swap_id().await
|
||||
"swap_id": context.swap_lock.get_current_swap_id().await,
|
||||
}))
|
||||
}
|
||||
|
||||
pub async fn resolve_approval_request(
|
||||
resolve_approval: ResolveApprovalArgs,
|
||||
ctx: Arc<Context>,
|
||||
) -> Result<ResolveApprovalResponse> {
|
||||
let request_id = Uuid::parse_str(&resolve_approval.request_id).context("Invalid request ID")?;
|
||||
|
||||
if let Some(handle) = ctx.tauri_handle.clone() {
|
||||
handle
|
||||
.resolve_approval(request_id, resolve_approval.accept)
|
||||
.await?;
|
||||
} else {
|
||||
bail!("Cannot resolve approval without a Tauri handle");
|
||||
}
|
||||
|
||||
Ok(ResolveApprovalResponse { success: true })
|
||||
}
|
||||
|
||||
fn qr_code(value: &impl ToString) -> Result<String> {
|
||||
let code = QrCode::new(value.to_string())?;
|
||||
let qr_code = code
|
||||
|
@ -1353,6 +1370,9 @@ struct UnknownMoneroNetwork(String);
|
|||
|
||||
impl CheckMoneroNodeArgs {
|
||||
pub async fn request(self) -> Result<CheckMoneroNodeResponse> {
|
||||
let url = self.url.clone();
|
||||
let network_str = self.network.clone();
|
||||
|
||||
let network = match self.network.to_lowercase().as_str() {
|
||||
// When the GUI says testnet, it means monero stagenet
|
||||
"mainnet" => Network::Mainnet,
|
||||
|
@ -1373,11 +1393,20 @@ impl CheckMoneroNodeArgs {
|
|||
return Ok(CheckMoneroNodeResponse { available: false });
|
||||
};
|
||||
|
||||
let Ok(available) = monero_daemon.is_available(&CLIENT).await else {
|
||||
return Ok(CheckMoneroNodeResponse { available: false });
|
||||
};
|
||||
match monero_daemon.is_available(&CLIENT).await {
|
||||
Ok(available) => Ok(CheckMoneroNodeResponse { available }),
|
||||
Err(e) => {
|
||||
tracing::error!(
|
||||
url = %url,
|
||||
network = %network_str,
|
||||
error = ?e,
|
||||
error_chain = %format!("{:#}", e),
|
||||
"Failed to check monero node availability"
|
||||
);
|
||||
|
||||
Ok(CheckMoneroNodeResponse { available })
|
||||
Ok(CheckMoneroNodeResponse { available: false })
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1410,14 +1439,14 @@ impl CheckElectrumNodeArgs {
|
|||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Deserialize, Serialize)]
|
||||
#[derive(Debug, Eq, PartialEq, Serialize, Deserialize)]
|
||||
pub struct ResolveApprovalArgs {
|
||||
pub request_id: String,
|
||||
pub accept: bool,
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Deserialize, Serialize)]
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct ResolveApprovalResponse {
|
||||
pub success: bool,
|
||||
}
|
||||
|
@ -1426,14 +1455,6 @@ impl Request for ResolveApprovalArgs {
|
|||
type Response = ResolveApprovalResponse;
|
||||
|
||||
async fn request(self, ctx: Arc<Context>) -> Result<Self::Response> {
|
||||
let request_id = Uuid::parse_str(&self.request_id).context("Invalid request ID")?;
|
||||
|
||||
if let Some(handle) = ctx.tauri_handle.clone() {
|
||||
handle.resolve_approval(request_id, self.accept).await?;
|
||||
} else {
|
||||
bail!("Cannot resolve approval without a Tauri handle");
|
||||
}
|
||||
|
||||
Ok(ResolveApprovalResponse { success: true })
|
||||
resolve_approval_request(self, ctx).await
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ use crate::bitcoin;
|
|||
use crate::{bitcoin::ExpiredTimelocks, monero, network::quote::BidQuote};
|
||||
use anyhow::{anyhow, Context, Result};
|
||||
use bitcoin::Txid;
|
||||
use monero_rpc_pool::pool::PoolStatus;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::future::Future;
|
||||
|
@ -12,7 +13,6 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
|||
use strum::Display;
|
||||
use tokio::sync::{oneshot, Mutex as TokioMutex};
|
||||
use typeshare::typeshare;
|
||||
use url::Url;
|
||||
use uuid::Uuid;
|
||||
|
||||
#[typeshare]
|
||||
|
@ -27,6 +27,7 @@ pub enum TauriEvent {
|
|||
TimelockChange(TauriTimelockChangeEvent),
|
||||
Approval(ApprovalRequest),
|
||||
BackgroundProgress(TauriBackgroundProgressWrapper),
|
||||
PoolStatusUpdate(PoolStatus),
|
||||
}
|
||||
|
||||
const TAURI_UNIFIED_EVENT_NAME: &str = "tauri-unified-event";
|
||||
|
@ -297,6 +298,10 @@ pub trait TauriEmitter {
|
|||
));
|
||||
}
|
||||
|
||||
fn emit_pool_status_update(&self, status: PoolStatus) {
|
||||
self.emit_unified_event(TauriEvent::PoolStatusUpdate(status));
|
||||
}
|
||||
|
||||
/// Create a new background progress handle for tracking a specific type of progress
|
||||
fn new_background_process<T: Clone>(
|
||||
&self,
|
||||
|
@ -609,14 +614,14 @@ pub enum TauriSwapProgressEvent {
|
|||
BtcLockTxInMempool {
|
||||
#[typeshare(serialized_as = "string")]
|
||||
btc_lock_txid: bitcoin::Txid,
|
||||
#[typeshare(serialized_as = "number")]
|
||||
btc_lock_confirmations: u64,
|
||||
#[typeshare(serialized_as = "Option<number>")]
|
||||
btc_lock_confirmations: Option<u64>,
|
||||
},
|
||||
XmrLockTxInMempool {
|
||||
#[typeshare(serialized_as = "string")]
|
||||
xmr_lock_txid: monero::TxHash,
|
||||
#[typeshare(serialized_as = "number")]
|
||||
xmr_lock_tx_confirmations: u64,
|
||||
#[typeshare(serialized_as = "Option<number>")]
|
||||
xmr_lock_tx_confirmations: Option<u64>,
|
||||
},
|
||||
XmrLocked,
|
||||
EncryptedSignatureSent,
|
||||
|
@ -697,13 +702,20 @@ pub enum BackgroundRefundState {
|
|||
Completed,
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
#[serde(tag = "type", content = "content")]
|
||||
pub enum MoneroNodeConfig {
|
||||
Pool,
|
||||
SingleNode { url: String },
|
||||
}
|
||||
|
||||
/// This struct contains the settings for the Context
|
||||
#[typeshare]
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
pub struct TauriSettings {
|
||||
/// The URL of the Monero node e.g `http://xmr.node:18081`
|
||||
#[typeshare(serialized_as = "Option<string>")]
|
||||
pub monero_node_url: Option<Url>,
|
||||
/// Configuration for Monero node connection
|
||||
pub monero_node_config: MoneroNodeConfig,
|
||||
/// The URLs of the Electrum RPC servers e.g `["ssl://bitcoin.com:50001", "ssl://backup.com:50001"]`
|
||||
pub electrum_rpc_urls: Vec<String>,
|
||||
/// Whether to initialize and use a tor client.
|
||||
|
|
|
@ -67,7 +67,16 @@ pub fn init(
|
|||
"libp2p_dcutr",
|
||||
"monero_cpp",
|
||||
];
|
||||
let OUR_CRATES: Vec<&str> = vec!["swap", "asb", "monero_sys", "unstoppableswap-gui-rs"];
|
||||
let OUR_CRATES: Vec<&str> = vec![
|
||||
"swap",
|
||||
"asb",
|
||||
"monero_sys",
|
||||
"unstoppableswap-gui-rs",
|
||||
];
|
||||
|
||||
let INFO_LEVEL_CRATES: Vec<&str> = vec![
|
||||
"monero_rpc_pool",
|
||||
];
|
||||
|
||||
// General log file for non-verbose logs
|
||||
let file_appender: RollingFileAppender = tracing_appender::rolling::never(&dir, "swap-all.log");
|
||||
|
@ -89,8 +98,10 @@ pub fn init(
|
|||
.with_ansi(false)
|
||||
.with_timer(UtcTime::rfc_3339())
|
||||
.with_target(false)
|
||||
.with_file(true)
|
||||
.with_line_number(true)
|
||||
.json()
|
||||
.with_filter(env_filter(level_filter, OUR_CRATES.clone())?);
|
||||
.with_filter(env_filter_with_info_crates(level_filter, OUR_CRATES.clone(), INFO_LEVEL_CRATES.clone())?);
|
||||
|
||||
// Layer for writing to the verbose log file
|
||||
// Crates: All crates with different levels (libp2p at INFO+, others at TRACE)
|
||||
|
@ -100,12 +111,15 @@ pub fn init(
|
|||
.with_ansi(false)
|
||||
.with_timer(UtcTime::rfc_3339())
|
||||
.with_target(false)
|
||||
.with_file(true)
|
||||
.with_line_number(true)
|
||||
.json()
|
||||
.with_filter(env_filter_with_libp2p_info(
|
||||
.with_filter(env_filter_with_all_crates(
|
||||
LevelFilter::TRACE,
|
||||
OUR_CRATES.clone(),
|
||||
LIBP2P_CRATES.clone(),
|
||||
TOR_CRATES.clone(),
|
||||
INFO_LEVEL_CRATES.clone(),
|
||||
)?);
|
||||
|
||||
// Layer for writing to the terminal
|
||||
|
@ -116,7 +130,9 @@ pub fn init(
|
|||
.with_writer(std::io::stderr)
|
||||
.with_ansi(is_terminal)
|
||||
.with_timer(UtcTime::rfc_3339())
|
||||
.with_target(true);
|
||||
.with_target(true)
|
||||
.with_file(true)
|
||||
.with_line_number(true);
|
||||
|
||||
// Layer for writing to the Tauri guest. This will be displayed in the GUI.
|
||||
// Crates: All crates with libp2p at INFO+ level
|
||||
|
@ -126,24 +142,28 @@ pub fn init(
|
|||
.with_ansi(false)
|
||||
.with_timer(UtcTime::rfc_3339())
|
||||
.with_target(true)
|
||||
.with_file(true)
|
||||
.with_line_number(true)
|
||||
.json()
|
||||
.with_filter(env_filter_with_libp2p_info(
|
||||
.with_filter(env_filter_with_all_crates(
|
||||
level_filter,
|
||||
OUR_CRATES.clone(),
|
||||
LIBP2P_CRATES.clone(),
|
||||
TOR_CRATES.clone(),
|
||||
INFO_LEVEL_CRATES.clone(),
|
||||
)?);
|
||||
|
||||
// If trace_stdout is true, we log all messages to the terminal
|
||||
// Otherwise, we only log the bare minimum
|
||||
let terminal_layer_env_filter = match trace_stdout {
|
||||
true => env_filter_with_libp2p_info(
|
||||
true => env_filter_with_all_crates(
|
||||
LevelFilter::TRACE,
|
||||
OUR_CRATES.clone(),
|
||||
LIBP2P_CRATES.clone(),
|
||||
TOR_CRATES.clone(),
|
||||
INFO_LEVEL_CRATES.clone(),
|
||||
)?,
|
||||
false => env_filter(level_filter, OUR_CRATES.clone())?,
|
||||
false => env_filter_with_info_crates(level_filter, OUR_CRATES.clone(), INFO_LEVEL_CRATES.clone())?,
|
||||
};
|
||||
|
||||
let final_terminal_layer = match format {
|
||||
|
@ -185,6 +205,29 @@ fn env_filter(level_filter: LevelFilter, crates: Vec<&str>) -> Result<EnvFilter>
|
|||
Ok(filter)
|
||||
}
|
||||
|
||||
/// This function controls which crate's logs actually get logged and from which level, with info-level crates at INFO level or higher.
|
||||
fn env_filter_with_info_crates(
|
||||
level_filter: LevelFilter,
|
||||
our_crates: Vec<&str>,
|
||||
info_level_crates: Vec<&str>,
|
||||
) -> Result<EnvFilter> {
|
||||
let mut filter = EnvFilter::from_default_env();
|
||||
|
||||
// Add directives for each crate in the provided list
|
||||
for crate_name in our_crates {
|
||||
filter = filter.add_directive(Directive::from_str(&format!(
|
||||
"{}={}",
|
||||
crate_name, &level_filter
|
||||
))?);
|
||||
}
|
||||
|
||||
for crate_name in info_level_crates {
|
||||
filter = filter.add_directive(Directive::from_str(&format!("{}=INFO", crate_name))?);
|
||||
}
|
||||
|
||||
Ok(filter)
|
||||
}
|
||||
|
||||
/// This function controls which crate's logs actually get logged and from which level, with libp2p crates at INFO level or higher.
|
||||
fn env_filter_with_libp2p_info(
|
||||
level_filter: LevelFilter,
|
||||
|
@ -216,6 +259,42 @@ fn env_filter_with_libp2p_info(
|
|||
Ok(filter)
|
||||
}
|
||||
|
||||
/// This function controls which crate's logs actually get logged and from which level, including all crate categories.
|
||||
fn env_filter_with_all_crates(
|
||||
level_filter: LevelFilter,
|
||||
our_crates: Vec<&str>,
|
||||
libp2p_crates: Vec<&str>,
|
||||
tor_crates: Vec<&str>,
|
||||
info_level_crates: Vec<&str>,
|
||||
) -> Result<EnvFilter> {
|
||||
let mut filter = EnvFilter::from_default_env();
|
||||
|
||||
// Add directives for each crate in the provided list
|
||||
for crate_name in our_crates {
|
||||
filter = filter.add_directive(Directive::from_str(&format!(
|
||||
"{}={}",
|
||||
crate_name, &level_filter
|
||||
))?);
|
||||
}
|
||||
|
||||
for crate_name in libp2p_crates {
|
||||
filter = filter.add_directive(Directive::from_str(&format!("{}=INFO", crate_name))?);
|
||||
}
|
||||
|
||||
for crate_name in tor_crates {
|
||||
filter = filter.add_directive(Directive::from_str(&format!(
|
||||
"{}={}",
|
||||
crate_name, &level_filter
|
||||
))?);
|
||||
}
|
||||
|
||||
for crate_name in info_level_crates {
|
||||
filter = filter.add_directive(Directive::from_str(&format!("{}=INFO", crate_name))?);
|
||||
}
|
||||
|
||||
Ok(filter)
|
||||
}
|
||||
|
||||
/// A writer that forwards tracing log messages to the tauri guest.
|
||||
#[derive(Clone)]
|
||||
pub struct TauriWriter {
|
||||
|
|
|
@ -146,23 +146,9 @@ async fn next_state(
|
|||
BobState::SwapSetupCompleted(state2)
|
||||
}
|
||||
BobState::SwapSetupCompleted(state2) => {
|
||||
// Record the current monero wallet block height so we don't have to scan from
|
||||
// block 0 once we create the redeem wallet.
|
||||
// This has to be done **before** the Bitcoin is locked in order to ensure that
|
||||
// if Bob goes offline the recorded wallet-height is correct.
|
||||
// If we only record this later, it can happen that Bob publishes the Bitcoin
|
||||
// transaction, goes offline, while offline Alice publishes Monero.
|
||||
// If the Monero transaction gets confirmed before Bob comes online again then
|
||||
// Bob would record a wallet-height that is past the lock transaction height,
|
||||
// which can lead to the wallet not detect the transaction.
|
||||
let monero_wallet_restore_blockheight = monero_wallet
|
||||
.blockchain_height()
|
||||
.await
|
||||
.context("Failed to fetch current Monero blockheight")?;
|
||||
|
||||
// Alice and Bob have exchanged all necessary signatures
|
||||
let xmr_receive_amount = state2.xmr;
|
||||
|
||||
// Alice and Bob have exchanged info
|
||||
// Sign the Bitcoin lock transaction
|
||||
let (state3, tx_lock) = state2.lock_btc().await?;
|
||||
let signed_tx = bitcoin_wallet
|
||||
|
@ -184,8 +170,9 @@ async fn next_state(
|
|||
swap_id,
|
||||
});
|
||||
|
||||
// We request approval before publishing the Bitcoin lock transaction, as the exchange rate determined at this step might be different from the
|
||||
// we previously displayed to the user.
|
||||
// We request approval before publishing the Bitcoin lock transaction,
|
||||
// as the exchange rate determined at this step might be different
|
||||
// from the one we previously displayed to the user.
|
||||
let approval_result = event_emitter
|
||||
.request_approval(request, PRE_BTC_LOCK_APPROVAL_TIMEOUT_SECS)
|
||||
.await;
|
||||
|
@ -194,6 +181,20 @@ async fn next_state(
|
|||
Ok(true) => {
|
||||
tracing::debug!("User approved swap offer");
|
||||
|
||||
// Record the current monero wallet block height so we don't have to scan from
|
||||
// block 0 once we create the redeem wallet.
|
||||
// This has to be done **before** the Bitcoin is locked in order to ensure that
|
||||
// if Bob goes offline the recorded wallet-height is correct.
|
||||
// If we only record this later, it can happen that Bob publishes the Bitcoin
|
||||
// transaction, goes offline, while offline Alice publishes Monero.
|
||||
// If the Monero transaction gets confirmed before Bob comes online again then
|
||||
// Bob would record a wallet-height that is past the lock transaction height,
|
||||
// which can lead to the wallet not detect the transaction.
|
||||
let monero_wallet_restore_blockheight = monero_wallet
|
||||
.blockchain_height()
|
||||
.await
|
||||
.context("Failed to fetch current Monero blockheight")?;
|
||||
|
||||
// Publish the signed Bitcoin lock transaction
|
||||
let (..) = bitcoin_wallet.broadcast(signed_tx, "lock").await?;
|
||||
|
||||
|
@ -224,7 +225,7 @@ async fn next_state(
|
|||
swap_id,
|
||||
TauriSwapProgressEvent::BtcLockTxInMempool {
|
||||
btc_lock_txid: state3.tx_lock_id(),
|
||||
btc_lock_confirmations: 0,
|
||||
btc_lock_confirmations: None,
|
||||
},
|
||||
);
|
||||
|
||||
|
@ -289,7 +290,7 @@ async fn next_state(
|
|||
swap_id,
|
||||
TauriSwapProgressEvent::BtcLockTxInMempool {
|
||||
btc_lock_txid: state3.tx_lock_id(),
|
||||
btc_lock_confirmations: u64::from(confirmed.confirmations()),
|
||||
btc_lock_confirmations: Some(u64::from(confirmed.confirmations())),
|
||||
},
|
||||
);
|
||||
}
|
||||
|
@ -334,7 +335,7 @@ async fn next_state(
|
|||
swap_id,
|
||||
TauriSwapProgressEvent::XmrLockTxInMempool {
|
||||
xmr_lock_txid: lock_transfer_proof.tx_hash(),
|
||||
xmr_lock_tx_confirmations: 0,
|
||||
xmr_lock_tx_confirmations: None,
|
||||
},
|
||||
);
|
||||
|
||||
|
@ -369,7 +370,7 @@ async fn next_state(
|
|||
swap_id,
|
||||
TauriSwapProgressEvent::XmrLockTxInMempool {
|
||||
xmr_lock_txid: lock_transfer_proof.clone().tx_hash(),
|
||||
xmr_lock_tx_confirmations: confirmations,
|
||||
xmr_lock_tx_confirmations: Some(confirmations),
|
||||
},
|
||||
);
|
||||
}),
|
||||
|
|
|
@ -18,6 +18,8 @@ pub fn capture_logs(min_level: LevelFilter) -> MakeCapturingWriter {
|
|||
tracing_subscriber::fmt()
|
||||
.with_ansi(false)
|
||||
.without_time()
|
||||
.with_file(true)
|
||||
.with_line_number(true)
|
||||
.with_writer(make_writer.clone())
|
||||
.with_env_filter(format!("{}", min_level))
|
||||
.finish(),
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue